mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
290 Commits
v0.1.0-alp
...
v0.1.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b2a09c888a | ||
|
|
af101480b3 | ||
|
|
b8f7f603cf | ||
|
|
8fb97ea1d8 | ||
|
|
21ce9c1163 | ||
|
|
0a22375ac1 | ||
|
|
0596d20a3b | ||
|
|
e19c8fa2b6 | ||
|
|
ad886f5b3e | ||
|
|
f6669a8201 | ||
|
|
ad5c47185d | ||
|
|
64441616db | ||
|
|
09491d6aee | ||
|
|
7cfa30b2ab | ||
|
|
a7676d8860 | ||
|
|
62e2a60b7b | ||
|
|
128c5cabe1 | ||
|
|
9a001d3392 | ||
|
|
facdda4d9f | ||
|
|
17eb99bc52 | ||
|
|
cd8be77968 | ||
|
|
b530ac9e60 | ||
|
|
76f1a79f1b | ||
|
|
4705245d60 | ||
|
|
f712f978cf | ||
|
|
cbf64e65b9 | ||
|
|
242ce5c2aa | ||
|
|
e8d2e82335 | ||
|
|
0086cc2d3d | ||
|
|
cdc111b607 | ||
|
|
81ca1d8399 | ||
|
|
8d3999df5f | ||
|
|
a60788e92e | ||
|
|
296c6dfcbf | ||
|
|
604c20a83d | ||
|
|
c7f114c8fa | ||
|
|
8a83de4ea5 | ||
|
|
3377930a50 | ||
|
|
85dd7e4f24 | ||
|
|
f790fa05c1 | ||
|
|
dfd91a1bf8 | ||
|
|
ded31fb069 | ||
|
|
6a574fc52b | ||
|
|
58bdf27068 | ||
|
|
610a895b66 | ||
|
|
a9ccc06449 | ||
|
|
38fe1a2f01 | ||
|
|
3414ac46b0 | ||
|
|
757b4a87a0 | ||
|
|
ba1517fceb | ||
|
|
5b5d953d56 | ||
|
|
3f6cbc378d | ||
|
|
9619940569 | ||
|
|
ed8252157a | ||
|
|
3e0fb7e75b | ||
|
|
ba3ce436df | ||
|
|
b31a6cb506 | ||
|
|
95090592f0 | ||
|
|
3a527c0fd5 | ||
|
|
819b60ca13 | ||
|
|
7169fe2989 | ||
|
|
b70672be77 | ||
|
|
a4c01f4a3a | ||
|
|
bd98a26cca | ||
|
|
1b4236d698 | ||
|
|
e8cc9b4b29 | ||
|
|
379f581780 | ||
|
|
ff6cfe8e70 | ||
|
|
5a397917c0 | ||
|
|
559880cb84 | ||
|
|
b76b27f3bf | ||
|
|
d4e0dc3685 | ||
|
|
b022556b79 | ||
|
|
bd065ea6e8 | ||
|
|
9a87f5edf8 | ||
|
|
e851b6d019 | ||
|
|
e7b92f24e8 | ||
|
|
4b8db408cf | ||
|
|
98659899c0 | ||
|
|
b1311801da | ||
|
|
f1b65d9b77 | ||
|
|
d5a2a26916 | ||
|
|
8e7e68708f | ||
|
|
9c1118b06d | ||
|
|
3fb93efbd0 | ||
|
|
3fd9c2f144 | ||
|
|
75e48c5f20 | ||
|
|
d402f83442 | ||
|
|
c5c6494e0b | ||
|
|
dc50095af3 | ||
|
|
8cd69f441e | ||
|
|
f52fc9b7d4 | ||
|
|
50d2685365 | ||
|
|
11d45e2918 | ||
|
|
30287e7e41 | ||
|
|
0b3f955ca7 | ||
|
|
4b58a8a18d | ||
|
|
bd377ef329 | ||
|
|
df751c38b4 | ||
|
|
f6e871708a | ||
|
|
819c990a89 | ||
|
|
a8b4e8d933 | ||
|
|
710e2ed133 | ||
|
|
81eab74b90 | ||
|
|
8f67d8ca93 | ||
|
|
4cc3ac37d5 | ||
|
|
b48c851b96 | ||
|
|
fdd17c6eeb | ||
|
|
51641db39e | ||
|
|
98ef74bff4 | ||
|
|
f42acc90c2 | ||
|
|
2df8143ad5 | ||
|
|
fb2e0c7cf3 | ||
|
|
390e9095f6 | ||
|
|
bcd44b90c1 | ||
|
|
c6f2db8ae0 | ||
|
|
e17d5a1c41 | ||
|
|
23092a5208 | ||
|
|
4bbad6ab1e | ||
|
|
6833b405d9 | ||
|
|
aaaf24143d | ||
|
|
9161796dfa | ||
|
|
68b231987c | ||
|
|
6e9964ac97 | ||
|
|
6afd79cab8 | ||
|
|
4e88a01638 | ||
|
|
af1f8d6101 | ||
|
|
a9c8584c98 | ||
|
|
7787cfdd42 | ||
|
|
2f39a77137 | ||
|
|
16f86a9d77 | ||
|
|
5ec1a7027b | ||
|
|
ddbc97befb | ||
|
|
a8c2b35ec6 | ||
|
|
04afee216e | ||
|
|
5533040be7 | ||
|
|
34fdba77df | ||
|
|
cd0d58cb24 | ||
|
|
8b869642b8 | ||
|
|
a33d1e9863 | ||
|
|
dfe7bfb07f | ||
|
|
5d1f231004 | ||
|
|
40eec85cf7 | ||
|
|
e17d564bf0 | ||
|
|
301656d568 | ||
|
|
a19dee1dc0 | ||
|
|
75b8afe043 | ||
|
|
e2904b99ac | ||
|
|
de0b8aa0a0 | ||
|
|
63e396e9e9 | ||
|
|
4d8276790b | ||
|
|
374acc8830 | ||
|
|
8491f65093 | ||
|
|
5e6f340dd9 | ||
|
|
7b98718cd9 | ||
|
|
0f7e5a2fb2 | ||
|
|
9ad6c45913 | ||
|
|
7fe417e740 | ||
|
|
c1a9f84c7f | ||
|
|
be897efd01 | ||
|
|
c06e04afbb | ||
|
|
e77a7f253c | ||
|
|
7d6f4cd88b | ||
|
|
83ac6598b6 | ||
|
|
4c925e0079 | ||
|
|
c6128ec0a4 | ||
|
|
7c34b009ec | ||
|
|
70edd4d55b | ||
|
|
6beea73590 | ||
|
|
c0d3533d10 | ||
|
|
9989a8c192 | ||
|
|
19dd8b1246 | ||
|
|
1e9918ddf9 | ||
|
|
4ce62f850b | ||
|
|
83d57f9111 | ||
|
|
803b7f0633 | ||
|
|
37ca5ba380 | ||
|
|
c1d32bdf2b | ||
|
|
83509f31f4 | ||
|
|
926022e14c | ||
|
|
2f2609d8c6 | ||
|
|
ecadbc1435 | ||
|
|
afac885c10 | ||
|
|
5d62e193bd | ||
|
|
7d77913e88 | ||
|
|
3f45a0d337 | ||
|
|
a1e97c990f | ||
|
|
4ae63b7089 | ||
|
|
b0925d94ed | ||
|
|
fc9276c79d | ||
|
|
184ca78a4d | ||
|
|
ebbf1e43b5 | ||
|
|
54fe81dad9 | ||
|
|
af935671b2 | ||
|
|
74adb077bc | ||
|
|
54c7a8be02 | ||
|
|
ea5146762a | ||
|
|
788b5362a1 | ||
|
|
028a69e349 | ||
|
|
9a30ba00c4 | ||
|
|
8149932bad | ||
|
|
89e4084af4 | ||
|
|
39df25a8f6 | ||
|
|
b2ad0e972b | ||
|
|
18e6740ac9 | ||
|
|
a7dc86ffe5 | ||
|
|
71482b38d7 | ||
|
|
dc9b5339bf | ||
|
|
5e05c8f884 | ||
|
|
aafc26c788 | ||
|
|
64243e3a7d | ||
|
|
36a13dafb7 | ||
|
|
637837ae44 | ||
|
|
ae8afd3711 | ||
|
|
3db8f95169 | ||
|
|
43aefc5d74 | ||
|
|
b33937f48e | ||
|
|
9bc4c0d9c7 | ||
|
|
302d7ec41b | ||
|
|
cc46194f29 | ||
|
|
5dfc24e4f6 | ||
|
|
4987136850 | ||
|
|
6960739b3d | ||
|
|
49d83abc0c | ||
|
|
ecb71f81be | ||
|
|
6f5639fccd | ||
|
|
1e9d09099e | ||
|
|
daad38360f | ||
|
|
bae0243959 | ||
|
|
d162fbb598 | ||
|
|
0959c1d16b | ||
|
|
e428a84446 | ||
|
|
58c37f588d | ||
|
|
d195a22f40 | ||
|
|
6775c5be87 | ||
|
|
5e89f1ba4e | ||
|
|
2664436194 | ||
|
|
b91c77b862 | ||
|
|
4015dd8075 | ||
|
|
b39dbcbda9 | ||
|
|
0e8411c2ff | ||
|
|
a9b42b436d | ||
|
|
9428e70971 | ||
|
|
32d51947a4 | ||
|
|
5fb417ec7c | ||
|
|
90fcaa8487 | ||
|
|
c609b193a1 | ||
|
|
1305924423 | ||
|
|
ea753b9ac0 | ||
|
|
72f05a3137 | ||
|
|
9e58311ecd | ||
|
|
2679faf911 | ||
|
|
fa54870197 | ||
|
|
3988770266 | ||
|
|
777a3182c5 | ||
|
|
5b675f54a8 | ||
|
|
627d444723 | ||
|
|
d1730a9577 | ||
|
|
ca7ed67dc5 | ||
|
|
072e5f78b4 | ||
|
|
8f5ecefc90 | ||
|
|
afd9866709 | ||
|
|
89d5306740 | ||
|
|
50cc0e9b51 | ||
|
|
7762873842 | ||
|
|
4aa24f0639 | ||
|
|
f1b95e25a1 | ||
|
|
041cd422a1 | ||
|
|
f907a93b97 | ||
|
|
a6eb213adf | ||
|
|
5fcad7a175 | ||
|
|
0566f812d3 | ||
|
|
334fd26bc5 | ||
|
|
8ffc078f88 | ||
|
|
179ff728df | ||
|
|
4d56d896ca | ||
|
|
6fe205f3b5 | ||
|
|
d13de0aeba | ||
|
|
11194f37d4 | ||
|
|
de6803d253 | ||
|
|
d0ef3aa9eb | ||
|
|
04df80e640 | ||
|
|
76236646ef | ||
|
|
26848f9f5c | ||
|
|
90990584b7 | ||
|
|
a14ec94653 | ||
|
|
26a3e93ca7 | ||
|
|
3978931b8e | ||
|
|
d589de63ef | ||
|
|
7829e4a219 |
@@ -1,2 +1,5 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
@@ -2,3 +2,9 @@
|
||||
GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
GT_S3_ENDPOINT_URL=S3 endpoint url
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
GT_OSS_ACCESS_KEY=OSS access key
|
||||
GT_OSS_ENDPOINT=OSS endpoint
|
||||
|
||||
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "breaking change",
|
||||
"color": "D93F0B"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(?:(?!!:).)*$",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
],
|
||||
"alwaysPassCI": true
|
||||
}
|
||||
}
|
||||
18
.github/pr-title-checker-config.json
vendored
18
.github/pr-title-checker-config.json
vendored
@@ -1,10 +1,12 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*",
|
||||
"ignoreLabels" : ["ignore-title"]
|
||||
}
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
42
.github/workflows/apidoc.yml
vendored
Normal file
42
.github/workflows/apidoc.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; url='greptime/'" />
|
||||
</head>
|
||||
<body></body></html>
|
||||
EOF
|
||||
- name: Publish dist directory
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
70
.github/workflows/coverage.yml
vendored
70
.github/workflows/coverage.yml
vendored
@@ -1,70 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Code coverage
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
92
.github/workflows/develop.yml
vendored
92
.github/workflows/develop.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
@@ -23,15 +24,15 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: crate-ci/typos@v1.0.4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
@@ -110,6 +111,42 @@ jobs:
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness && ls /tmp
|
||||
- name: Upload sqlness logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -147,3 +184,52 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
16
.github/workflows/doc-issue.yml
vendored
16
.github/workflows/doc-issue.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Create Issue in docs repo on doc related changes
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
@@ -23,3 +23,17 @@ jobs:
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
|
||||
55
.github/workflows/docs.yml
vendored
Normal file
55
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
|
||||
# To pass the required status check, see:
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@main
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
|
||||
9
.github/workflows/pr-title-checker.yml
vendored
9
.github/workflows/pr-title-checker.yml
vendored
@@ -18,3 +18,12 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||
|
||||
334
.github/workflows/release.yml
vendored
334
.github/workflows/release.yml
vendored
@@ -10,13 +10,16 @@ on:
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
|
||||
|
||||
# In the future, we can change SCHEDULED_PERIOD to nightly.
|
||||
SCHEDULED_PERIOD: weekly
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
## FIXME(zyy17): Enable it after the tests are stabled.
|
||||
DISABLE_RUN_TESTS: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -26,18 +29,44 @@ jobs:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-latest-16-cores
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-latest-16-cores
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: false
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -67,11 +96,37 @@ jobs:
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
|
||||
- name: Compile Python 3.10.10 from source for linux
|
||||
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -82,13 +137,61 @@ jobs:
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for amd64-linux
|
||||
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "version=3.10" >> pyo3.config
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "shared=true" >> pyo3.config
|
||||
echo "abi3=true" >> pyo3.config
|
||||
echo "lib_name=python3.10" >> pyo3.config
|
||||
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
|
||||
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
|
||||
echo "pointer_width=64" >> pyo3.config
|
||||
echo "build_flags=" >> pyo3.config
|
||||
echo "suppress_build_script_link_lines=false" >> pyo3.config
|
||||
|
||||
cat pyo3.config
|
||||
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/release
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
@@ -97,94 +200,23 @@ jobs:
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.tgz
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.sha256sum
|
||||
release:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
- name: Publish release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name != 'schedule'
|
||||
with:
|
||||
name: "Release ${{ github.ref_name }}"
|
||||
files: |
|
||||
**/greptime-*
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uhub.service.ucloud.cn
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -212,8 +244,36 @@ jobs:
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Build and push
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64-pyo3
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64-pyo3.tgz
|
||||
rm greptime-linux-amd64-pyo3.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64-pyo3
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64-pyo3.tgz
|
||||
rm greptime-linux-arm64-pyo3.tgz
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
@@ -222,5 +282,105 @@ jobs:
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
release:
|
||||
name: Release artifacts
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build,docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
- name: Publish release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name != 'schedule'
|
||||
with:
|
||||
name: "Release ${{ github.ref_name }}"
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
docker-push-uhub:
|
||||
name: Push docker image to UCloud Container Registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uhub.service.ucloud.cn
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
|
||||
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
@@ -5,11 +5,11 @@ repos:
|
||||
- id: conventional-pre-commit
|
||||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/DevinR528/cargo-sort
|
||||
rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
|
||||
hooks:
|
||||
- id: cargo-sort
|
||||
args: ["--workspace"]
|
||||
# - repo: https://github.com/DevinR528/cargo-sort
|
||||
# rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
|
||||
# hooks:
|
||||
# - id: cargo-sort
|
||||
# args: ["--workspace"]
|
||||
|
||||
- repo: https://github.com/doublify/pre-commit-rust
|
||||
rev: v1.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Welcome!
|
||||
# Welcome 👋
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
@@ -50,34 +50,33 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed.
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||
|
||||
1. Install `pre-commit`
|
||||
```
|
||||
$ pip install pre-commit
|
||||
```
|
||||
or
|
||||
```
|
||||
$ brew install pre-commit
|
||||
```
|
||||
|
||||
pip install pre-commit
|
||||
|
||||
or
|
||||
|
||||
brew install pre-commit
|
||||
|
||||
2. Install the `pre-commit` hooks
|
||||
```
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
|
||||
$ pre-commit install --hook-type commit-msg
|
||||
pre-commit installed at .git/hooks/commit-msg
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
|
||||
$ pre-commit install --hook-type pre-push
|
||||
pre-commit installed at .git/hooks/pre-pus
|
||||
```
|
||||
$ pre-commit install --hook-type commit-msg
|
||||
pre-commit installed at .git/hooks/commit-msg
|
||||
|
||||
now `pre-commit` will run automatically on `git commit`.
|
||||
$ pre-commit install --hook-type pre-push
|
||||
pre-commit installed at .git/hooks/pre-push
|
||||
|
||||
Now, `pre-commit` will run automatically on `git commit`.
|
||||
|
||||
### Title
|
||||
|
||||
@@ -102,10 +101,12 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
## Community
|
||||
|
||||
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
- [GreptimeDB Docs](https://greptime.com/docs)
|
||||
- [Learn GreptimeDB](https://greptime.com/products/db)
|
||||
- [Greptime Inc. Website](https://greptime.com)
|
||||
|
||||
2861
Cargo.lock
generated
2861
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
54
Cargo.toml
54
Cargo.toml
@@ -7,16 +7,20 @@ members = [
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/procedure",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
@@ -26,6 +30,7 @@ members = [
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/object-store",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
"src/script",
|
||||
@@ -35,27 +40,52 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/table-procedure",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = "29.0"
|
||||
arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
parquet = "29.0"
|
||||
sqlparser = "0.28"
|
||||
arrow = { version = "34.0" }
|
||||
arrow-array = "34.0"
|
||||
arrow-flight = "34.0"
|
||||
arrow-schema = { version = "34.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "34.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.32"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
tonic = { version = "0.8", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
[profile.nightly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
6
Makefile
6
Makefile
@@ -19,6 +19,10 @@ clean: ## Clean the project.
|
||||
fmt: ## Format all the Rust code.
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
@@ -35,7 +39,7 @@ integration-test: ## Run integation test.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo run --bin sqlness-runner
|
||||
cargo sqlness
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
|
||||
17
README.md
17
README.md
@@ -61,6 +61,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -141,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
downloadable pre-built binaries for Linux and MacOS
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
|
||||
Docker images
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
@@ -153,6 +159,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Java
|
||||
@@ -169,7 +178,7 @@ For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam
|
||||
|
||||
## Community
|
||||
|
||||
Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
|
||||
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||
community, please check out:
|
||||
|
||||
@@ -11,4 +11,4 @@ client = { path = "../src/client" }
|
||||
indicatif = "0.17.1"
|
||||
itertools = "0.10.5"
|
||||
parquet.workspace = true
|
||||
tokio = { version = "1.21", features = ["full"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -21,19 +21,17 @@ use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::admin::Admin;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertExpr, TableId};
|
||||
use client::{Client, Database, Select};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const DATABASE_NAME: &str = "greptime";
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
@@ -63,7 +61,7 @@ struct Args {
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
@@ -99,16 +97,18 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let insert_expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let now = Instant::now();
|
||||
db.insert(insert_expr).await.unwrap();
|
||||
db.insert(request).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -125,11 +125,16 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let values = build_values(array);
|
||||
let (values, datatype) = build_values(array);
|
||||
let column = Column {
|
||||
column_name: field.name().to_owned(),
|
||||
values: Some(values),
|
||||
null_mask: vec![],
|
||||
null_mask: array
|
||||
.data()
|
||||
.null_bitmap()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
};
|
||||
@@ -139,7 +144,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
@@ -147,10 +152,13 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
@@ -158,29 +166,38 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
@@ -211,10 +228,15 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
| DataType::Dictionary(_, _)
|
||||
| DataType::Decimal128(_, _)
|
||||
| DataType::Decimal256(_, _)
|
||||
| DataType::RunEndEncoded(_, _)
|
||||
| DataType::Map(_, _) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
@@ -342,7 +364,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,13 +384,11 @@ fn query_set() -> HashMap<String, String> {
|
||||
ret
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, client: &Client) {
|
||||
let admin = Admin::new("admin", client.clone());
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = admin.create(create_table_expr()).await;
|
||||
let create_table_result = db.create(create_table_expr()).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
@@ -383,7 +403,7 @@ async fn do_write(args: &Args, client: &Client) {
|
||||
let batch_size = args.batch_size;
|
||||
for _ in 0..args.thread_num {
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = Database::new(DATABASE_NAME, client.clone());
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
@@ -392,7 +412,7 @@ async fn do_write(args: &Args, client: &Client) {
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
file_progress.inc(1);
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = Database::new(DATABASE_NAME, client.clone());
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
@@ -405,7 +425,7 @@ async fn do_query(num_iter: usize, db: &Database) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let _res = db.select(Select::Sql(query.clone())).await.unwrap();
|
||||
let _res = db.sql(&query).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
@@ -427,13 +447,13 @@ fn main() {
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &client).await;
|
||||
do_write(&args, &db).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
let db = Database::new(DATABASE_NAME, client.clone());
|
||||
do_query(args.iter_num, &db).await;
|
||||
}
|
||||
})
|
||||
|
||||
@@ -8,3 +8,5 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -1,18 +1,50 @@
|
||||
node_id = 42
|
||||
mode = 'distributed'
|
||||
rpc_addr = '127.0.0.1:3001'
|
||||
wal_dir = '/tmp/greptimedb/wal'
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = '127.0.0.1:4406'
|
||||
mysql_runtime_size = 4
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# Whether to use in-memory catalog, see `standalone.example.toml`.
|
||||
enable_memory_catalog = false
|
||||
# The datanode identifier, should be unique.
|
||||
node_id = 42
|
||||
# gRPC server address, "127.0.0.1:3001" by default.
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
# Hostname of this node.
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
[meta_client_opts]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = false
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
# WAL options, see `standalone.example.toml`.
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
# [procedure.store]
|
||||
# type = "File"
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# max_retry_times = 3
|
||||
# retry_delay = "500ms"
|
||||
|
||||
@@ -1,12 +1,58 @@
|
||||
mode = 'distributed'
|
||||
datanode_rpc_addr = '127.0.0.1:3001'
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
|
||||
[meta_client_opts]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc_options]
|
||||
addr = "127.0.0.1:4001"
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options, see `standalone.example.toml`.
|
||||
[mysql_options]
|
||||
addr = "127.0.0.1:4002"
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options, see `standalone.example.toml`.
|
||||
[mysql_options.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres_options]
|
||||
addr = "127.0.0.1:4003"
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||
[postgres_options.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb_options]
|
||||
addr = "127.0.0.1:4242"
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||
[influxdb_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prometheus_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prom_options]
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = false
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -1,4 +1,15 @@
|
||||
bind_addr = '127.0.0.1:3002'
|
||||
server_addr = '127.0.0.1:3002'
|
||||
store_addr = '127.0.0.1:2379'
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode lease in seconds, 15 seconds by default.
|
||||
datanode_lease_secs = 15
|
||||
# Datanode selector type.
|
||||
# - "LeaseBased" (default value).
|
||||
# - "LoadBased"
|
||||
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
|
||||
@@ -1,36 +1,120 @@
|
||||
node_id = 0
|
||||
mode = 'standalone'
|
||||
wal_dir = '/tmp/greptimedb/wal/'
|
||||
# Node running mode, "standalone" or "distributed".
|
||||
mode = "standalone"
|
||||
# Whether to use in-memory catalog, `false` by default.
|
||||
enable_memory_catalog = false
|
||||
|
||||
# HTTP server options.
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
# Server address, "127.0.0.1:4000" by default.
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
# gRPC server options.
|
||||
[grpc_options]
|
||||
addr = '127.0.0.1:4001'
|
||||
# Server address, "127.0.0.1:4001" by default.
|
||||
addr = "127.0.0.1:4001"
|
||||
# The number of server worker threads, 8 by default.
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options.
|
||||
[mysql_options]
|
||||
addr = '127.0.0.1:4002'
|
||||
# Server address, "127.0.0.1:4002" by default.
|
||||
addr = "127.0.0.1:4002"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
[influxdb_options]
|
||||
enable = true
|
||||
|
||||
[opentsdb_options]
|
||||
addr = '127.0.0.1:4242'
|
||||
enable = true
|
||||
runtime_size = 2
|
||||
|
||||
[prometheus_options]
|
||||
enable = true
|
||||
# MySQL server TLS options.
|
||||
[mysql_options.tls]
|
||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
# - "disable" (default value)
|
||||
# - "prefer"
|
||||
# - "require"
|
||||
# - "verify-ca"
|
||||
# - "verify-full"
|
||||
mode = "disable"
|
||||
# Certificate file path.
|
||||
cert_path = ""
|
||||
# Private key file path.
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options.
|
||||
[postgres_options]
|
||||
addr = '127.0.0.1:4003'
|
||||
# Server address, "127.0.0.1:4003" by default.
|
||||
addr = "127.0.0.1:4003"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
check_pwd = false
|
||||
|
||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
||||
[postgres_options.tls]
|
||||
# TLS mode.
|
||||
mode = "disable"
|
||||
# certificate file path.
|
||||
cert_path = ""
|
||||
# private key file path.
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb_options]
|
||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
||||
addr = "127.0.0.1:4242"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options.
|
||||
[influxdb_options]
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options.
|
||||
[prometheus_options]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prom protocol options.
|
||||
[prom_options]
|
||||
# Prometheus API server address, "127.0.0.1:4004" by default.
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory.
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "1GB"
|
||||
# WAL purge threshold in bytes.
|
||||
purge_threshold = "50GB"
|
||||
# WAL purge interval in seconds.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options.
|
||||
[compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Procedure storage options.
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
# # Storage type.
|
||||
# type = "File"
|
||||
# # Procedure data path.
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# # Procedure max retry time.
|
||||
# max_retry_times = 3
|
||||
# # Initial retry delay of procedures, increases exponentially
|
||||
# retry_delay = "500ms"
|
||||
|
||||
@@ -9,7 +9,10 @@ RUN apt-get update && apt-get install -y \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& pip install pyarrow
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -24,6 +27,8 @@ RUN cargo build --release
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
57
docker/aarch64/Dockerfile
Normal file
57
docker/aarch64/Dockerfile
Normal file
@@ -0,0 +1,57 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install cross platform toolchain
|
||||
RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
|
||||
COPY ./rust-toolchain.toml .
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
COPY . .
|
||||
# Update dependency, using separate `RUN` to separate cache
|
||||
RUN cargo fetch
|
||||
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||
# Build the project in release mode.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
87
docker/aarch64/compile-python.sh
Normal file
87
docker/aarch64/compile-python.sh
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
@@ -1,5 +1,13 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
RUN python3 -m pip install pyarrow
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
@@ -149,10 +149,10 @@ inputs:
|
||||
- title: 'Series Normalize: \noffset = 0'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: filter
|
||||
inputs:
|
||||
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: scan -->
|
||||
|
||||

|
||||
|
||||
151
docs/rfcs/2023-01-03-procedure-framework.md
Normal file
151
docs/rfcs/2023-01-03-procedure-framework.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
Feature Name: "procedure-framework"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/286
|
||||
Date: 2023-01-03
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Procedure Framework
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
A framework for executing operations in a fault-tolerant manner.
|
||||
|
||||
# Motivation
|
||||
Some operations in GreptimeDB require multiple steps to implement. For example, creating a table needs:
|
||||
1. Check whether the table exists
|
||||
2. Create the table in the table engine
|
||||
1. Create a region for the table in the storage engine
|
||||
2. Persist the metadata of the table to the table manifest
|
||||
3. Add the table to the catalog manager
|
||||
|
||||
If the node dies or restarts in the middle of creating a table, it could leave the system in an inconsistent state. The procedure framework, inspired by [Apache HBase's ProcedureV2 framework](https://github.com/apache/hbase/blob/bfc9fc9605de638785435e404430a9408b99a8d0/src/main/asciidoc/_chapters/pv2.adoc) and [Apache Accumulo’s FATE framework](https://accumulo.apache.org/docs/2.x/administration/fate), aims to provide a unified way to implement multi-step operations that is tolerant to failure.
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
The procedure framework consists of the following primary components:
|
||||
- A `Procedure` represents an operation or a set of operations to be performed step-by-step
|
||||
- `ProcedureManager`, the runtime to run `Procedures`. It executes the submitted procedures, stores procedures' states to the `ProcedureStore` and restores procedures from `ProcedureStore` while the database restarts.
|
||||
- `ProcedureStore` is a storage layer for persisting the procedure state
|
||||
|
||||
|
||||
## Procedures
|
||||
The `ProcedureManager` keeps calling `Procedure::execute()` until the Procedure is done, so the operation of the Procedure should be [idempotent](https://developer.mozilla.org/en-US/docs/Glossary/Idempotent): it needs to be able to undo or replay a partial execution of itself.
|
||||
|
||||
```rust
|
||||
trait Procedure {
|
||||
fn execute(&mut self, ctx: &Context) -> Result<Status>;
|
||||
|
||||
fn dump(&self) -> Result<String>;
|
||||
|
||||
fn rollback(&self) -> Result<()>;
|
||||
|
||||
// other methods...
|
||||
}
|
||||
```
|
||||
|
||||
The `Status` is an enum that has the following variants:
|
||||
```rust
|
||||
enum Status {
|
||||
Executing {
|
||||
persist: bool,
|
||||
},
|
||||
Suspended {
|
||||
subprocedures: Vec<ProcedureWithId>,
|
||||
persist: bool,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
```
|
||||
|
||||
A call to `execute()` can result in the following possibilities:
|
||||
- `Ok(Status::Done)`: we are done
|
||||
- `Ok(Status::Executing { .. })`: there are remaining steps to do
|
||||
- `Ok(Status::Suspend { sub_procedure, .. })`: execution is suspended and can be resumed later after the sub-procedure is done.
|
||||
- `Err(e)`: error occurs during execution and the procedure is unable to proceed anymore.
|
||||
|
||||
Users need to assign a unique `ProcedureId` to the procedure and the procedure can get this id via the `Context`. The `ProcedureId` is typically a UUID.
|
||||
|
||||
```rust
|
||||
struct Context {
|
||||
id: ProcedureId,
|
||||
// other fields ...
|
||||
}
|
||||
```
|
||||
|
||||
The `ProcedureManager` calls `Procedure::dump()` to serialize the internal state of the procedure and writes to the `ProcedureStore`. The `Status` has a field `persist` to tell the `ProcedureManager` whether it needs persistence.
|
||||
|
||||
## Sub-procedures
|
||||
A procedure may need to create some sub-procedures to process its subtasks. For example, creating a distributed table with multiple regions (partitions) needs to set up the regions in each node, thus the parent procedure should instantiate a sub-procedure for each region. The `ProcedureManager` makes sure that the parent procedure does not proceed till all sub-procedures are successfully finished.
|
||||
|
||||
The procedure can submit sub-procedures to the `ProcedureManager` by returning `Status::Suspended`. It needs to assign a procedure id to each procedure manually so it can track the status of the sub-procedures.
|
||||
```rust
|
||||
struct ProcedureWithId {
|
||||
id: ProcedureId,
|
||||
procedure: BoxedProcedure,
|
||||
}
|
||||
```
|
||||
|
||||
## ProcedureStore
|
||||
We might need to provide two different ProcedureStore implementations:
|
||||
- In standalone mode, it stores data on the local disk.
|
||||
- In distributed mode, it stores data on the meta server or the object store service.
|
||||
|
||||
These implementations should share the same storage structure. They store each procedure's state in a unique path based on the procedure id:
|
||||
|
||||
```
|
||||
Sample paths:
|
||||
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.step
|
||||
/procedures/{PROCEDURE_ID}/000003.commit
|
||||
```
|
||||
|
||||
`ProcedureStore` behaves like a WAL. Before performing each step, the `ProcedureManager` can write the procedure's current state to the ProcedureStore, which stores the state in the `.step` file. The `000001` in the path is a monotonic increasing sequence of the step. After the procedure is done, the `ProcedureManager` puts a `.commit` file to indicate the procedure is finished (committed).
|
||||
|
||||
The `ProcedureManager` can remove the procedure's files once the procedure is done, but it needs to leave the `.commit` as the last file to remove in case of failure during removal.
|
||||
|
||||
## ProcedureManager
|
||||
`ProcedureManager` executes procedures submitted to it.
|
||||
|
||||
```rust
|
||||
trait ProcedureManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
It supports the following operations:
|
||||
- Register a `ProcedureLoader` by the type name of the `Procedure`.
|
||||
- Submit a `Procedure` to the manager and execute it.
|
||||
|
||||
When `ProcedureManager` starts, it loads procedures from the `ProcedureStore` and restores the procedures by the `ProcedureLoader`. The manager stores the type name from `Procedure::type_name()` with the data from `Procedure::dump()` in the `.step` file and uses the type name to find a `ProcedureLoader` to recover the procedure from its data.
|
||||
|
||||
```rust
|
||||
type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Send>;
|
||||
```
|
||||
|
||||
## Rollback
|
||||
The rollback step is supposed to clean up the resources created during the execute() step. When a procedure has failed, the `ProcedureManager` puts a `rollback` file and calls the `Procedure::rollback()` method.
|
||||
|
||||
|
||||
```text
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.rollback
|
||||
```
|
||||
|
||||
Rollback is complicated to implement so some procedures might not support rollback or only provide a best-efforts approach.
|
||||
|
||||
## Locking
|
||||
The `ProcedureManager` can provide a locking mechanism that gives a procedure read/write access to a database object such as a table so other procedures are unable to modify the same table while the current one is executing.
|
||||
|
||||
# Drawbacks
|
||||
The `Procedure` framework introduces additional complexity and overhead to our database.
|
||||
- To execute a `Procedure`, we need to write to the `ProcedureStore` multiple times, which may slow down the server
|
||||
- We need to rewrite the logic of creating/dropping/altering a table using the procedure framework
|
||||
|
||||
# Alternatives
|
||||
Another approach is to tolerate failure during execution and allow users to retry the operation until it succeeds. But we still need to:
|
||||
- Make each step idempotent
|
||||
- Record the status in some place to check whether we are done
|
||||
92
docs/rfcs/2023-02-01-table-compaction.md
Normal file
92
docs/rfcs/2023-02-01-table-compaction.md
Normal file
@@ -0,0 +1,92 @@
|
||||
---
|
||||
Feature Name: "table-compaction"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/930
|
||||
Date: 2023-02-01
|
||||
Author: "Lei, HUANG <mrsatangel@gmail.com>"
|
||||
---
|
||||
|
||||
# Table Compaction
|
||||
|
||||
---
|
||||
|
||||
## Background
|
||||
|
||||
GreptimeDB uses an LSM-tree based storage engine that flushes memtables to SSTs for persistence.
|
||||
But currently it only supports level 0. SST files in level 0 does not guarantee to contain only rows with disjoint time ranges.
|
||||
That is to say, different SST files in level 0 may contain overlapped timestamps.
|
||||
The consequence is, in order to retrieve rows in some time range, all files need to be scanned, which brings a lot of IO overhead.
|
||||
|
||||
Also, just like other LSMT engines, delete/update to existing primary keys are converted to new rows with delete/update mark and appended to SSTs on flushing.
|
||||
We need to merge the operations to same primary keys so that we don't have to go through all SST files to find the final state of these primary keys.
|
||||
|
||||
## Goal
|
||||
|
||||
Implement a compaction framework to:
|
||||
- maintain SSTs in timestamp order to accelerate queries with timestamp condition;
|
||||
- merge rows with same primary key;
|
||||
- purge expired SSTs;
|
||||
- accommodate other tasks like data rollup/indexing.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Table compaction involves following components:
|
||||
- Compaction scheduler: run compaction tasks, limit the consumed resources;
|
||||
- Compaction strategy: find the SSTs to compact and determine the output files of compaction.
|
||||
- Compaction task: read the rows from input SSTs and write to the output files.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Compaction scheduler
|
||||
|
||||
`CompactionScheduler` is an executor that continuously polls and executes compaction request from a task queue.
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait CompactionScheduler {
|
||||
/// Schedules a compaction task.
|
||||
async fn schedule(&self, task: CompactionRequest) -> Result<()>;
|
||||
|
||||
/// Stops compaction scheduler.
|
||||
async fn stop(&self) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Compaction triggering
|
||||
|
||||
Currently, we can check whether to compact tables when memtable is flushed to SST.
|
||||
|
||||
https://github.com/GreptimeTeam/greptimedb/blob/4015dd80752e1e6aaa3d7cacc3203cb67ed9be6d/src/storage/src/flush.rs#L245
|
||||
|
||||
|
||||
### Compaction strategy
|
||||
|
||||
`CompactionStrategy` defines how to pick SSTs in all levels for compaction.
|
||||
|
||||
```rust
|
||||
pub trait CompactionStrategy {
|
||||
fn pick(
|
||||
&self,
|
||||
ctx: CompactionContext,
|
||||
levels: &LevelMetas,
|
||||
) -> Result<CompactionTask>;
|
||||
}
|
||||
```
|
||||
|
||||
The most suitable compaction strategy for time-series scenario would be
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
|
||||
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
|
||||
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
|
||||
SSTs from different time windows are neven compacted together.
|
||||
That strategy guarantees SSTs in each level are mainly sorted in timestamp order which boosts queries with
|
||||
explicit timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Currently, GreptimeDB's storage engine [only support two levels](https://github.com/GreptimeTeam/greptimedb/blob/43aefc5d74dfa73b7819cae77b7eb546d8534a41/src/storage/src/sst.rs#L32).
|
||||
For level 0, we can start with a simple time-window based leveled compaction, which reads from all SSTs in level 0,
|
||||
align them to time windows with a fixed duration, merge them with SSTs in level 1 within the same time window
|
||||
to ensure there is only one sorted run in level 1.
|
||||
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
Feature Name: "Fault Tolerance for Region"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
|
||||
Date: 2023-03-08
|
||||
Author: "Luo Fucong <luofucong@greptime.com>"
|
||||
---
|
||||
|
||||
Fault Tolerance for Region
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
|
||||
|
||||
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
|
||||
|
||||
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
|
||||
|
||||
# Motivation
|
||||
|
||||
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
|
||||
|
||||
# Details
|
||||
|
||||
## Background
|
||||
|
||||
Some backgrounds about region in distributed mode:
|
||||
|
||||
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
|
||||
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
|
||||
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
|
||||
|
||||
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
|
||||
|
||||
## Failure Detection
|
||||
|
||||
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
|
||||
|
||||
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
|
||||
|
||||
```protobuf
|
||||
message HeartbeatRequest {
|
||||
...
|
||||
// Region stats on this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
...
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
|
||||
|
||||
> The heartbeat interval is now hardcoded to five seconds.
|
||||
|
||||
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
|
||||
|
||||
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
|
||||
|
||||
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
|
||||
|
||||
To conclude, the failure detection pseudo-codes are like this:
|
||||
|
||||
```rust
|
||||
// in Metasrv:
|
||||
fn failure_detection() {
|
||||
loop {
|
||||
// passive detection
|
||||
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
|
||||
|
||||
// find the datanodes that contains the failed regions
|
||||
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
|
||||
|
||||
// active detection
|
||||
for (datanode, regions) in datanodes_and_regions {
|
||||
if !datanode.is_healthy(regions) {
|
||||
do_failover(datanode, regions);
|
||||
}
|
||||
}
|
||||
|
||||
sleep(config.detect_interval);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Some design considerations:
|
||||
|
||||
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
|
||||
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
|
||||
|
||||
So we detect some regions are not available. How to regain the availability back?
|
||||
|
||||
## Region Failover
|
||||
|
||||
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
|
||||
|
||||
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
|
||||
|
||||
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
|
||||
|
||||
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
|
||||
|
||||
```protobuf
|
||||
message Region {
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
message State {
|
||||
Active,
|
||||
Passive,
|
||||
}
|
||||
State state = 4;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
```
|
||||
|
||||
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
|
||||
|
||||
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
|
||||
|
||||
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
|
||||
|
||||
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
|
||||
|
||||
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
|
||||
|
||||
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
|
||||
|
||||
A picture is worth a 1000 words:
|
||||
|
||||
```text
|
||||
+-------------------------+
|
||||
| Metasrv detects region |
|
||||
| failure |
|
||||
+-------------------------+
|
||||
|
|
||||
v
|
||||
+----------------------------+
|
||||
| Metasrv chooses candidates |
|
||||
| to hold failed regions |
|
||||
+----------------------------+
|
||||
|
|
||||
v
|
||||
+-------------------------+ +-------------------------+
|
||||
| Metasrv "passive" the |------>| Frontend rejects writes |
|
||||
| failed regions | | to "passive" regions |
|
||||
+-------------------------+ +-------------------------+
|
||||
|
|
||||
v
|
||||
+--------------------------+ +---------------------------+
|
||||
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
|
||||
| regions from remote WAL | | "open" region requests |
|
||||
+--------------------------+ +---------------------------+
|
||||
|
|
||||
|
|
||||
| +-------------------------+ +-------------------------+
|
||||
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
|
||||
| failed regions | | restriction to regions |
|
||||
+-------------------------+ +-------------------------+
|
||||
|
|
||||
v
|
||||
+-------------------------+
|
||||
| Region failover done, |
|
||||
| HA regain |
|
||||
+-------------------------+
|
||||
```
|
||||
|
||||
# Alternatives
|
||||
|
||||
## The "Neon" Way
|
||||
|
||||
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
|
||||
|
||||
However, the main concerns we don't adopt this method are two-fold:
|
||||
|
||||
1. Remote WAL is planned to be quorum based, it can be efficiently written;
|
||||
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
|
||||
|
||||
## No WAL, Replication instead
|
||||
|
||||
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
|
||||
|
||||
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
|
||||
|
||||
# Future Work
|
||||
|
||||
Some optimizations we could take:
|
||||
|
||||
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
|
||||
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2022-12-20
|
||||
2
rust-toolchain.toml
Normal file
2
rust-toolchain.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-02-26"
|
||||
@@ -59,5 +59,5 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
fi
|
||||
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
|
||||
@@ -5,13 +5,15 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow-flight.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
prost = "0.11"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eb760d219206c77dd3a105ecb6a3ba97d9d650ec" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic = "0.8"
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.8"
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
let default_out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
|
||||
tonic_build::configure()
|
||||
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
|
||||
.compile(
|
||||
&[
|
||||
"greptime/v1/select.proto",
|
||||
"greptime/v1/greptime.proto",
|
||||
"greptime/v1/meta/common.proto",
|
||||
"greptime/v1/meta/heartbeat.proto",
|
||||
"greptime/v1/meta/route.proto",
|
||||
"greptime/v1/meta/store.proto",
|
||||
"prometheus/remote/remote.proto",
|
||||
],
|
||||
&["."],
|
||||
)
|
||||
.expect("compile proto");
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
message AdminRequest {
|
||||
string name = 1;
|
||||
repeated AdminExpr exprs = 2;
|
||||
}
|
||||
|
||||
message AdminResponse {
|
||||
repeated AdminResult results = 1;
|
||||
}
|
||||
|
||||
message AdminExpr {
|
||||
ExprHeader header = 1;
|
||||
oneof expr {
|
||||
CreateTableExpr create_table = 2;
|
||||
AlterExpr alter = 3;
|
||||
CreateDatabaseExpr create_database = 4;
|
||||
DropTableExpr drop_table = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message AdminResult {
|
||||
ResultHeader header = 1;
|
||||
oneof result {
|
||||
MutateResult mutate = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message CreateTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
string desc = 4;
|
||||
repeated ColumnDef column_defs = 5;
|
||||
string time_index = 6;
|
||||
repeated string primary_keys = 7;
|
||||
bool create_if_not_exists = 8;
|
||||
map<string, string> table_options = 9;
|
||||
TableId table_id = 10;
|
||||
repeated uint32 region_ids = 11;
|
||||
}
|
||||
|
||||
message AlterExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
oneof kind {
|
||||
AddColumns add_columns = 4;
|
||||
DropColumns drop_columns = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message DropTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message CreateDatabaseExpr {
|
||||
//TODO(hl): maybe rename to schema_name?
|
||||
string database_name = 1;
|
||||
}
|
||||
|
||||
message AddColumns {
|
||||
repeated AddColumn add_columns = 1;
|
||||
}
|
||||
|
||||
message DropColumns {
|
||||
repeated DropColumn drop_columns = 1;
|
||||
}
|
||||
|
||||
message AddColumn {
|
||||
ColumnDef column_def = 1;
|
||||
bool is_key = 2;
|
||||
}
|
||||
|
||||
message DropColumn {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message TableId {
|
||||
uint32 id = 1;
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message Column {
|
||||
string column_name = 1;
|
||||
|
||||
enum SemanticType {
|
||||
TAG = 0;
|
||||
FIELD = 1;
|
||||
TIMESTAMP = 2;
|
||||
}
|
||||
SemanticType semantic_type = 2;
|
||||
|
||||
message Values {
|
||||
repeated int32 i8_values = 1;
|
||||
repeated int32 i16_values = 2;
|
||||
repeated int32 i32_values = 3;
|
||||
repeated int64 i64_values = 4;
|
||||
|
||||
repeated uint32 u8_values = 5;
|
||||
repeated uint32 u16_values = 6;
|
||||
repeated uint32 u32_values = 7;
|
||||
repeated uint64 u64_values = 8;
|
||||
|
||||
repeated float f32_values = 9;
|
||||
repeated double f64_values = 10;
|
||||
|
||||
repeated bool bool_values = 11;
|
||||
repeated bytes binary_values = 12;
|
||||
repeated string string_values = 13;
|
||||
|
||||
repeated int32 date_values = 14;
|
||||
repeated int64 datetime_values = 15;
|
||||
repeated int64 ts_second_values = 16;
|
||||
repeated int64 ts_millisecond_values = 17;
|
||||
repeated int64 ts_microsecond_values = 18;
|
||||
repeated int64 ts_nanosecond_values = 19;
|
||||
}
|
||||
// The array of non-null values in this column.
|
||||
//
|
||||
// For example: suppose there is a column "foo" that contains some int32 values (1, 2, 3, 4, 5, null, 7, 8, 9, null);
|
||||
// column:
|
||||
// column_name: foo
|
||||
// semantic_type: Tag
|
||||
// values: 1, 2, 3, 4, 5, 7, 8, 9
|
||||
// null_masks: 00100000 00000010
|
||||
Values values = 3;
|
||||
|
||||
// Mask maps the positions of null values.
|
||||
// If a bit in null_mask is 1, it indicates that the column value at that position is null.
|
||||
bytes null_mask = 4;
|
||||
|
||||
// Helpful in creating vector from column.
|
||||
ColumnDataType datatype = 5;
|
||||
}
|
||||
|
||||
message ColumnDef {
|
||||
string name = 1;
|
||||
ColumnDataType datatype = 2;
|
||||
bool is_nullable = 3;
|
||||
bytes default_constraint = 4;
|
||||
}
|
||||
|
||||
enum ColumnDataType {
|
||||
BOOLEAN = 0;
|
||||
INT8 = 1;
|
||||
INT16 = 2;
|
||||
INT32 = 3;
|
||||
INT64 = 4;
|
||||
UINT8 = 5;
|
||||
UINT16 = 6;
|
||||
UINT32 = 7;
|
||||
UINT64 = 8;
|
||||
FLOAT32 = 9;
|
||||
FLOAT64 = 10;
|
||||
BINARY = 11;
|
||||
STRING = 12;
|
||||
DATE = 13;
|
||||
DATETIME = 14;
|
||||
TIMESTAMP_SECOND = 15;
|
||||
TIMESTAMP_MILLISECOND = 16;
|
||||
TIMESTAMP_MICROSECOND = 17;
|
||||
TIMESTAMP_NANOSECOND = 18;
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message RequestHeader {
|
||||
string tenant = 1;
|
||||
}
|
||||
|
||||
message ExprHeader {
|
||||
uint32 version = 1;
|
||||
}
|
||||
|
||||
message ResultHeader {
|
||||
uint32 version = 1;
|
||||
uint32 code = 2;
|
||||
string err_msg = 3;
|
||||
}
|
||||
|
||||
message MutateResult {
|
||||
uint32 success = 1;
|
||||
uint32 failure = 2;
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
message DatabaseRequest {
|
||||
string name = 1;
|
||||
repeated ObjectExpr exprs = 2;
|
||||
}
|
||||
|
||||
message DatabaseResponse {
|
||||
repeated ObjectResult results = 1;
|
||||
}
|
||||
|
||||
message ObjectExpr {
|
||||
ExprHeader header = 1;
|
||||
oneof expr {
|
||||
InsertExpr insert = 2;
|
||||
SelectExpr select = 3;
|
||||
UpdateExpr update = 4;
|
||||
DeleteExpr delete = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(fys): Only support sql now, and will support promql etc in the future
|
||||
message SelectExpr {
|
||||
oneof expr {
|
||||
string sql = 1;
|
||||
bytes logical_plan = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InsertExpr {
|
||||
string schema_name = 1;
|
||||
string table_name = 2;
|
||||
|
||||
// Data is represented here.
|
||||
repeated Column columns = 3;
|
||||
|
||||
// The row_count of all columns, which include null and non-null values.
|
||||
//
|
||||
// Note: the row_count of all columns in a InsertExpr must be same.
|
||||
uint32 row_count = 4;
|
||||
|
||||
// The region number of current insert request.
|
||||
uint32 region_number = 5;
|
||||
}
|
||||
|
||||
// TODO(jiachun)
|
||||
message UpdateExpr {}
|
||||
// TODO(jiachun)
|
||||
message DeleteExpr {}
|
||||
|
||||
message ObjectResult {
|
||||
ResultHeader header = 1;
|
||||
oneof result {
|
||||
SelectResult select = 2;
|
||||
MutateResult mutate = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message SelectResult {
|
||||
bytes raw_data = 1;
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/admin.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
import "greptime/v1/database.proto";
|
||||
|
||||
service Greptime {
|
||||
rpc Batch(BatchRequest) returns (BatchResponse) {}
|
||||
}
|
||||
|
||||
message BatchRequest {
|
||||
RequestHeader header = 1;
|
||||
repeated AdminRequest admins = 2;
|
||||
repeated DatabaseRequest databases = 3;
|
||||
}
|
||||
|
||||
message BatchResponse {
|
||||
repeated AdminResponse admins = 1;
|
||||
repeated DatabaseResponse databases = 2;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
message RequestHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which be sent to.
|
||||
uint64 cluster_id = 2;
|
||||
// member_id is the ID of the sender server.
|
||||
uint64 member_id = 3;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which sent the response.
|
||||
uint64 cluster_id = 2;
|
||||
Error error = 3;
|
||||
}
|
||||
|
||||
message Error {
|
||||
int32 code = 1;
|
||||
string err_msg = 2;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
uint64 id = 1;
|
||||
string addr = 2;
|
||||
}
|
||||
|
||||
message TableName {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message TimeInterval {
|
||||
// The unix timestamp in millis of the start of this period.
|
||||
uint64 start_timestamp_millis = 1;
|
||||
// The unix timestamp in millis of the end of this period.
|
||||
uint64 end_timestamp_millis = 2;
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
// key is the key in bytes. An empty key is not allowed.
|
||||
bytes key = 1;
|
||||
// value is the value held by the key, in bytes.
|
||||
bytes value = 2;
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Heartbeat {
|
||||
// Heartbeat, there may be many contents of the heartbeat, such as:
|
||||
// 1. Metadata to be registered to meta server and discoverable by other nodes.
|
||||
// 2. Some performance metrics, such as Load, CPU usage, etc.
|
||||
// 3. The number of computing tasks being executed.
|
||||
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
|
||||
|
||||
// Ask leader's endpoint.
|
||||
rpc AskLeader(AskLeaderRequest) returns (AskLeaderResponse) {}
|
||||
}
|
||||
|
||||
message HeartbeatRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// Self peer
|
||||
Peer peer = 2;
|
||||
// Leader node
|
||||
bool is_leader = 3;
|
||||
// Actually reported time interval
|
||||
TimeInterval report_interval = 4;
|
||||
// Node stat
|
||||
NodeStat node_stat = 5;
|
||||
// Region stats in this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
// Follower nodes and stats, empty on follower nodes
|
||||
repeated ReplicaStat replica_stats = 7;
|
||||
}
|
||||
|
||||
message NodeStat {
|
||||
// The read capacity units during this period
|
||||
uint64 rcus = 1;
|
||||
// The write capacity units during this period
|
||||
uint64 wcus = 2;
|
||||
// Table number in this node
|
||||
uint64 table_num = 3;
|
||||
// Region number in this node
|
||||
uint64 region_num = 4;
|
||||
|
||||
double cpu_usage = 5;
|
||||
double load = 6;
|
||||
// Read disk I/O in the node
|
||||
double read_io_rate = 7;
|
||||
// Write disk I/O in the node
|
||||
double write_io_rate = 8;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
// The read capacity units during this period
|
||||
uint64 rcus = 3;
|
||||
// The write capacity units during this period
|
||||
uint64 wcus = 4;
|
||||
// Approximate region size
|
||||
uint64 approximate_size = 5;
|
||||
// Approximate number of rows
|
||||
uint64 approximate_rows = 6;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message ReplicaStat {
|
||||
Peer peer = 1;
|
||||
bool in_sync = 2;
|
||||
bool is_learner = 3;
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated bytes payload = 2;
|
||||
}
|
||||
|
||||
message AskLeaderRequest {
|
||||
RequestHeader header = 1;
|
||||
}
|
||||
|
||||
message AskLeaderResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
Peer leader = 2;
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Router {
|
||||
rpc Create(CreateRequest) returns (RouteResponse) {}
|
||||
|
||||
// Fetch routing information for tables. The smallest unit is the complete
|
||||
// routing information(all regions) of a table.
|
||||
//
|
||||
// ```text
|
||||
// table_1
|
||||
// table_name
|
||||
// table_schema
|
||||
// regions
|
||||
// region_1
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2
|
||||
// region_2
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2, follower_peer_3
|
||||
// region_xxx
|
||||
// table_2
|
||||
// ...
|
||||
// ```
|
||||
//
|
||||
rpc Route(RouteRequest) returns (RouteResponse) {}
|
||||
|
||||
rpc Delete(DeleteRequest) returns (RouteResponse) {}
|
||||
}
|
||||
|
||||
message CreateRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
repeated Partition partitions = 3;
|
||||
}
|
||||
|
||||
message RouteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated TableName table_names = 2;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
}
|
||||
|
||||
message RouteResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated Peer peers = 2;
|
||||
repeated TableRoute table_routes = 3;
|
||||
}
|
||||
|
||||
message TableRoute {
|
||||
Table table = 1;
|
||||
repeated RegionRoute region_routes = 2;
|
||||
}
|
||||
|
||||
message RegionRoute {
|
||||
Region region = 1;
|
||||
// single leader node for write task
|
||||
uint64 leader_peer_index = 2;
|
||||
// multiple follower nodes for read task
|
||||
repeated uint64 follower_peer_indexes = 3;
|
||||
}
|
||||
|
||||
message Table {
|
||||
uint64 id = 1;
|
||||
TableName table_name = 2;
|
||||
bytes table_schema = 3;
|
||||
}
|
||||
|
||||
message Region {
|
||||
// TODO(LFC): Maybe use message RegionNumber?
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
// PARTITION `region_name` VALUES LESS THAN (value_list)
|
||||
message Partition {
|
||||
repeated bytes column_list = 1;
|
||||
repeated bytes value_list = 2;
|
||||
}
|
||||
|
||||
// This message is only for saving into store.
|
||||
message TableRouteValue {
|
||||
repeated Peer peers = 1;
|
||||
TableRoute table_route = 2;
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Store {
|
||||
// Range gets the keys in the range from the key-value store.
|
||||
rpc Range(RangeRequest) returns (RangeResponse);
|
||||
|
||||
// Put puts the given key into the key-value store.
|
||||
rpc Put(PutRequest) returns (PutResponse);
|
||||
|
||||
// BatchPut atomically puts the given keys into the key-value store.
|
||||
rpc BatchPut(BatchPutRequest) returns (BatchPutResponse);
|
||||
|
||||
// CompareAndPut atomically puts the value to the given updated
|
||||
// value if the current value == the expected value.
|
||||
rpc CompareAndPut(CompareAndPutRequest) returns (CompareAndPutResponse);
|
||||
|
||||
// DeleteRange deletes the given range from the key-value store.
|
||||
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
|
||||
|
||||
// MoveValue atomically renames the key to the given updated key.
|
||||
rpc MoveValue(MoveValueRequest) returns (MoveValueResponse);
|
||||
}
|
||||
|
||||
message RangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key for the range, If range_end is not given, the
|
||||
// request only looks up key.
|
||||
bytes key = 2;
|
||||
// range_end is the upper bound on the requested range [key, range_end).
|
||||
// If range_end is '\0', the range is all keys >= key.
|
||||
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||
// then the range request gets all keys prefixed with key.
|
||||
// If both key and range_end are '\0', then the range request returns all
|
||||
// keys.
|
||||
bytes range_end = 3;
|
||||
// limit is a limit on the number of keys returned for the request. When
|
||||
// limit is set to 0, it is treated as no limit.
|
||||
int64 limit = 4;
|
||||
// keys_only when set returns only the keys and not the values.
|
||||
bool keys_only = 5;
|
||||
}
|
||||
|
||||
message RangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// kvs is the list of key-value pairs matched by the range request.
|
||||
repeated KeyValue kvs = 2;
|
||||
// more indicates if there are more keys to return in the requested range.
|
||||
bool more = 3;
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 3;
|
||||
// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||
// The previous key-value pair will be returned in the put response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pair will be
|
||||
// returned.
|
||||
KeyValue prev_kv = 2;
|
||||
}
|
||||
|
||||
message BatchPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated KeyValue kvs = 2;
|
||||
// If prev_kv is set, gets the previous key-value pairs before changing it.
|
||||
// The previous key-value pairs will be returned in the batch put response.
|
||||
bool prev_kv = 3;
|
||||
}
|
||||
|
||||
message BatchPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 2;
|
||||
}
|
||||
|
||||
message CompareAndPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// expect is the previous value, in bytes
|
||||
bytes expect = 3;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
message CompareAndPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
bool success = 2;
|
||||
KeyValue prev_kv = 3;
|
||||
}
|
||||
|
||||
message DeleteRangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key to delete in the range.
|
||||
bytes key = 2;
|
||||
// range_end is the key following the last key to delete for the range
|
||||
// [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key
|
||||
// argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the keys with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the
|
||||
// key argument.
|
||||
bytes range_end = 3;
|
||||
// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delete response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message DeleteRangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// deleted is the number of keys deleted by the delete range request.
|
||||
int64 deleted = 2;
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 3;
|
||||
}
|
||||
|
||||
message MoveValueRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, move the value of from_key to to_key (i.e. rename),
|
||||
// and return the value.
|
||||
bytes from_key = 2;
|
||||
bytes to_key = 3;
|
||||
}
|
||||
|
||||
message MoveValueResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, return the value of from_key.
|
||||
KeyValue kv = 2;
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.codec;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
message SelectResult {
|
||||
repeated Column columns = 1;
|
||||
uint32 row_count = 2;
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2016 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "prometheus/remote/types.proto";
|
||||
|
||||
message WriteRequest {
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
// Cortex uses this field to determine the source of the write request.
|
||||
// We reserve it to avoid any compatibility issues.
|
||||
reserved 2;
|
||||
repeated prometheus.MetricMetadata metadata = 3;
|
||||
}
|
||||
|
||||
// ReadRequest represents a remote read request.
|
||||
message ReadRequest {
|
||||
repeated Query queries = 1;
|
||||
|
||||
enum ResponseType {
|
||||
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||
// It's recommended to use streamed response types instead.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
// Content-Encoding: ""
|
||||
STREAMED_XOR_CHUNKS = 1;
|
||||
}
|
||||
|
||||
// accepted_response_types allows negotiating the content type of the response.
|
||||
//
|
||||
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||
// implemented by server, error is returned.
|
||||
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||
repeated ResponseType accepted_response_types = 2;
|
||||
}
|
||||
|
||||
// ReadResponse is a response when response_type equals SAMPLES.
|
||||
message ReadResponse {
|
||||
// In same order as the request's queries.
|
||||
repeated QueryResult results = 1;
|
||||
}
|
||||
|
||||
message Query {
|
||||
int64 start_timestamp_ms = 1;
|
||||
int64 end_timestamp_ms = 2;
|
||||
repeated prometheus.LabelMatcher matchers = 3;
|
||||
prometheus.ReadHints hints = 4;
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
// Samples within a time series must be ordered by time.
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
}
|
||||
|
||||
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||
message ChunkedReadResponse {
|
||||
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||
|
||||
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||
int64 query_index = 2;
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
message MetricMetadata {
|
||||
enum MetricType {
|
||||
UNKNOWN = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
HISTOGRAM = 3;
|
||||
GAUGEHISTOGRAM = 4;
|
||||
SUMMARY = 5;
|
||||
INFO = 6;
|
||||
STATESET = 7;
|
||||
}
|
||||
|
||||
// Represents the metric type, these match the set from Prometheus.
|
||||
// Refer to model/textparse/interface.go for details.
|
||||
MetricType type = 1;
|
||||
string metric_family_name = 2;
|
||||
string help = 4;
|
||||
string unit = 5;
|
||||
}
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
// Optional, can be empty.
|
||||
repeated Label labels = 1;
|
||||
double value = 2;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1;
|
||||
repeated Sample samples = 2;
|
||||
repeated Exemplar exemplars = 3;
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1;
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
||||
|
||||
message ReadHints {
|
||||
int64 step_ms = 1; // Query step size in milliseconds.
|
||||
string func = 2; // String representation of surrounding function or aggregation.
|
||||
int64 start_ms = 3; // Start time in milliseconds.
|
||||
int64 end_ms = 4; // End time in milliseconds.
|
||||
repeated string grouping = 5; // List of label names used in aggregation.
|
||||
bool by = 6; // Indicate whether it is without or by.
|
||||
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||
}
|
||||
|
||||
// Chunk represents a TSDB chunk.
|
||||
// Time range [min, max] is inclusive.
|
||||
message Chunk {
|
||||
int64 min_time_ms = 1;
|
||||
int64 max_time_ms = 2;
|
||||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
// ChunkedSeries represents single, encoded time series.
|
||||
message ChunkedSeries {
|
||||
// Labels should be sorted.
|
||||
repeated Label labels = 1;
|
||||
// Chunks will be in start time order and may overlap.
|
||||
repeated Chunk chunks = 2;
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -97,7 +97,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
|
||||
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
|
||||
},
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
@@ -105,125 +107,121 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
impl Values {
|
||||
pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl Column {
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(&mut self, origin_count: usize, vector: VectorRef) {
|
||||
let values = self.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&self.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
let values = column.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&column.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
|
||||
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
self.null_mask = null_mask.into_vec();
|
||||
}
|
||||
(0..len).for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -239,59 +237,59 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_values_with_capacity() {
|
||||
let values = Values::with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values.i8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values.i32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values.i64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values.u8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values.u32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values.u64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values.f32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values.f64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values.binary_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values.bool_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::String, 2);
|
||||
let values = values_with_capacity(ColumnDataType::String, 2);
|
||||
let values = values.string_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values.date_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values.datetime_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values.ts_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
@@ -462,28 +460,28 @@ mod tests {
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().ts_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().ts_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().ts_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().ts_second_values
|
||||
@@ -507,7 +505,7 @@ mod tests {
|
||||
let row_count = 4;
|
||||
|
||||
let vector = Arc::new(BooleanVector::from(vec![Some(true), None, Some(false)]));
|
||||
column.push_vals(row_count, vector);
|
||||
push_vals(&mut column, row_count, vector);
|
||||
// Some(false), None, Some(true), Some(true), Some(true), None, Some(false)
|
||||
let bool_values = column.values.unwrap().bool_values;
|
||||
assert_eq!(vec![false, true, true, true, false], bool_values);
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,9 +14,13 @@
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod prometheus;
|
||||
pub mod result;
|
||||
pub mod serde;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
}
|
||||
|
||||
pub mod v1;
|
||||
|
||||
pub use prost::DecodeError;
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::prelude::ErrorExt;
|
||||
|
||||
use crate::v1::codec::SelectResult;
|
||||
use crate::v1::{
|
||||
admin_result, object_result, AdminResult, MutateResult, ObjectResult, ResultHeader,
|
||||
SelectResult as SelectResultRaw,
|
||||
};
|
||||
|
||||
pub const PROTOCOL_VERSION: u32 = 1;
|
||||
|
||||
pub type Success = u32;
|
||||
pub type Failure = u32;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ObjectResultBuilder {
|
||||
version: u32,
|
||||
code: u32,
|
||||
err_msg: Option<String>,
|
||||
result: Option<Body>,
|
||||
}
|
||||
|
||||
pub enum Body {
|
||||
Mutate((Success, Failure)),
|
||||
Select(SelectResult),
|
||||
}
|
||||
|
||||
impl ObjectResultBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
version: PROTOCOL_VERSION,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn version(mut self, version: u32) -> Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn status_code(mut self, code: u32) -> Self {
|
||||
self.code = code;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn err_msg(mut self, err_msg: String) -> Self {
|
||||
self.err_msg = Some(err_msg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mutate_result(mut self, success: u32, failure: u32) -> Self {
|
||||
self.result = Some(Body::Mutate((success, failure)));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn select_result(mut self, select_result: SelectResult) -> Self {
|
||||
self.result = Some(Body::Select(select_result));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> ObjectResult {
|
||||
let header = Some(ResultHeader {
|
||||
version: self.version,
|
||||
code: self.code,
|
||||
err_msg: self.err_msg.unwrap_or_default(),
|
||||
});
|
||||
|
||||
let result = match self.result {
|
||||
Some(Body::Mutate((success, failure))) => {
|
||||
Some(object_result::Result::Mutate(MutateResult {
|
||||
success,
|
||||
failure,
|
||||
}))
|
||||
}
|
||||
Some(Body::Select(select)) => Some(object_result::Result::Select(SelectResultRaw {
|
||||
raw_data: select.into(),
|
||||
})),
|
||||
None => None,
|
||||
};
|
||||
|
||||
ObjectResult { header, result }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_err_result(err: &impl ErrorExt) -> ObjectResult {
|
||||
ObjectResultBuilder::new()
|
||||
.status_code(err.status_code() as u32)
|
||||
.err_msg(err.to_string())
|
||||
.build()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AdminResultBuilder {
|
||||
version: u32,
|
||||
code: u32,
|
||||
err_msg: Option<String>,
|
||||
mutate: Option<(Success, Failure)>,
|
||||
}
|
||||
|
||||
impl AdminResultBuilder {
|
||||
pub fn status_code(mut self, code: u32) -> Self {
|
||||
self.code = code;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn err_msg(mut self, err_msg: String) -> Self {
|
||||
self.err_msg = Some(err_msg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mutate_result(mut self, success: u32, failure: u32) -> Self {
|
||||
self.mutate = Some((success, failure));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> AdminResult {
|
||||
let header = Some(ResultHeader {
|
||||
version: self.version,
|
||||
code: self.code,
|
||||
err_msg: self.err_msg.unwrap_or_default(),
|
||||
});
|
||||
|
||||
let result = if let Some((success, failure)) = self.mutate {
|
||||
Some(admin_result::Result::Mutate(MutateResult {
|
||||
success,
|
||||
failure,
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
AdminResult { header, result }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AdminResultBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
version: PROTOCOL_VERSION,
|
||||
code: 0,
|
||||
err_msg: None,
|
||||
mutate: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::status_code::StatusCode;
|
||||
|
||||
use super::*;
|
||||
use crate::error::UnknownColumnDataTypeSnafu;
|
||||
use crate::v1::{object_result, MutateResult};
|
||||
|
||||
#[test]
|
||||
fn test_object_result_builder() {
|
||||
let obj_result = ObjectResultBuilder::new()
|
||||
.version(101)
|
||||
.status_code(500)
|
||||
.err_msg("Failed to read this file!".to_string())
|
||||
.mutate_result(100, 20)
|
||||
.build();
|
||||
let header = obj_result.header.unwrap();
|
||||
assert_eq!(101, header.version);
|
||||
assert_eq!(500, header.code);
|
||||
assert_eq!("Failed to read this file!", header.err_msg);
|
||||
|
||||
let result = obj_result.result.unwrap();
|
||||
assert_eq!(
|
||||
object_result::Result::Mutate(MutateResult {
|
||||
success: 100,
|
||||
failure: 20,
|
||||
}),
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_err_result() {
|
||||
let err = UnknownColumnDataTypeSnafu { datatype: 1 }.build();
|
||||
let err_result = build_err_result(&err);
|
||||
let header = err_result.header.unwrap();
|
||||
let result = err_result.result;
|
||||
|
||||
assert_eq!(PROTOCOL_VERSION, header.version);
|
||||
assert_eq!(StatusCode::InvalidArguments as u32, header.code);
|
||||
assert!(result.is_none());
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use prost::DecodeError;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::codec::SelectResult;
|
||||
use crate::v1::meta::TableRouteValue;
|
||||
|
||||
macro_rules! impl_convert_with_bytes {
|
||||
($data_type: ty) => {
|
||||
impl From<$data_type> for Vec<u8> {
|
||||
fn from(entity: $data_type) -> Self {
|
||||
entity.encode_to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for $data_type {
|
||||
type Error = DecodeError;
|
||||
|
||||
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
|
||||
<$data_type>::decode(value.as_ref())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_convert_with_bytes!(SelectResult);
|
||||
impl_convert_with_bytes!(TableRouteValue);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Deref;
|
||||
|
||||
use crate::v1::codec::*;
|
||||
use crate::v1::{column, Column};
|
||||
|
||||
const SEMANTIC_TAG: i32 = 0;
|
||||
|
||||
#[test]
|
||||
fn test_convert_select_result() {
|
||||
let select_result = mock_select_result();
|
||||
|
||||
let bytes: Vec<u8> = select_result.into();
|
||||
let result: SelectResult = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(8, result.row_count);
|
||||
assert_eq!(1, result.columns.len());
|
||||
|
||||
let column = &result.columns[0];
|
||||
assert_eq!("foo", column.column_name);
|
||||
assert_eq!(SEMANTIC_TAG, column.semantic_type);
|
||||
assert_eq!(vec![1], column.null_mask);
|
||||
assert_eq!(
|
||||
vec![2, 3, 4, 5, 6, 7, 8],
|
||||
column.values.as_ref().unwrap().i32_values
|
||||
);
|
||||
}
|
||||
|
||||
#[should_panic]
|
||||
#[test]
|
||||
fn test_convert_select_result_wrong() {
|
||||
let select_result = mock_select_result();
|
||||
|
||||
let mut bytes: Vec<u8> = select_result.into();
|
||||
|
||||
// modify some bytes
|
||||
bytes[0] = 0b1;
|
||||
bytes[1] = 0b1;
|
||||
|
||||
let result: SelectResult = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(8, result.row_count);
|
||||
assert_eq!(1, result.columns.len());
|
||||
|
||||
let column = &result.columns[0];
|
||||
assert_eq!("foo", column.column_name);
|
||||
assert_eq!(SEMANTIC_TAG, column.semantic_type);
|
||||
assert_eq!(vec![1], column.null_mask);
|
||||
assert_eq!(
|
||||
vec![2, 3, 4, 5, 6, 7, 8],
|
||||
column.values.as_ref().unwrap().i32_values
|
||||
);
|
||||
}
|
||||
|
||||
fn mock_select_result() -> SelectResult {
|
||||
let values = column::Values {
|
||||
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
|
||||
..Default::default()
|
||||
};
|
||||
let null_mask = vec![1];
|
||||
let column = Column {
|
||||
column_name: "foo".to_string(),
|
||||
semantic_type: SEMANTIC_TAG,
|
||||
values: Some(values),
|
||||
null_mask,
|
||||
..Default::default()
|
||||
};
|
||||
SelectResult {
|
||||
columns: vec![column],
|
||||
row_count: 8,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,14 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
tonic::include_proto!("greptime.v1");
|
||||
pub mod column_def;
|
||||
|
||||
pub const GREPTIME_FD_SET: &[u8] = tonic::include_file_descriptor_set!("greptime_fd");
|
||||
|
||||
pub mod codec {
|
||||
tonic::include_proto!("greptime.v1.codec");
|
||||
pub mod meta {
|
||||
pub use greptime_proto::v1::meta::*;
|
||||
}
|
||||
|
||||
mod column_def;
|
||||
pub mod meta;
|
||||
pub use greptime_proto::v1::*;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,21 +19,24 @@ use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
impl ColumnDef {
|
||||
pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
|
||||
|
||||
let constraint = if self.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(self.default_constraint.as_slice())
|
||||
.context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
|
||||
)
|
||||
};
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(column_def.default_constraint.as_slice()).context(
|
||||
error::ConvertColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
},
|
||||
)?,
|
||||
)
|
||||
};
|
||||
|
||||
ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu { column: &self.name })
|
||||
}
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
tonic::include_proto!("greptime.v1.meta");
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
pub const PROTOCOL_VERSION: u64 = 1;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PeerDict {
|
||||
peers: HashMap<Peer, usize>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl PeerDict {
|
||||
pub fn get_or_insert(&mut self, peer: Peer) -> usize {
|
||||
let index = self.peers.entry(peer).or_insert_with(|| {
|
||||
let v = self.index;
|
||||
self.index += 1;
|
||||
v
|
||||
});
|
||||
|
||||
*index
|
||||
}
|
||||
|
||||
pub fn into_peers(self) -> Vec<Peer> {
|
||||
let mut array = vec![Peer::default(); self.index];
|
||||
for (p, i) in self.peers {
|
||||
array[i] = p;
|
||||
}
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
impl Hash for Peer {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.id.hash(state);
|
||||
self.addr.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Peer {}
|
||||
|
||||
impl RequestHeader {
|
||||
#[inline]
|
||||
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
member_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseHeader {
|
||||
#[inline]
|
||||
pub fn success(cluster_id: u64) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn failed(cluster_id: u64, error: Error) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
error: Some(error),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(error) = &self.error {
|
||||
if error.code == ErrorCode::NotLeader as i32 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ErrorCode {
|
||||
NoActiveDatanodes = 1,
|
||||
NotLeader = 2,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[inline]
|
||||
pub fn no_active_datanodes() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NoActiveDatanodes as i32,
|
||||
err_msg: "No active datanodes".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NotLeader as i32,
|
||||
err_msg: "Current server is not leader".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatResponse {
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(header) = &self.header {
|
||||
return header.is_not_leader();
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! gen_set_header {
|
||||
($req: ty) => {
|
||||
impl $req {
|
||||
#[inline]
|
||||
pub fn set_header(&mut self, (cluster_id, member_id): (u64, u64)) {
|
||||
self.header = Some(RequestHeader::new((cluster_id, member_id)));
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
gen_set_header!(HeartbeatRequest);
|
||||
gen_set_header!(RouteRequest);
|
||||
gen_set_header!(CreateRequest);
|
||||
gen_set_header!(RangeRequest);
|
||||
gen_set_header!(DeleteRequest);
|
||||
gen_set_header!(PutRequest);
|
||||
gen_set_header!(BatchPutRequest);
|
||||
gen_set_header!(CompareAndPutRequest);
|
||||
gen_set_header!(DeleteRangeRequest);
|
||||
gen_set_header!(MoveValueRequest);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::vec;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_peer_dict() {
|
||||
let mut dict = PeerDict::default();
|
||||
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
|
||||
assert_eq!(2, dict.index);
|
||||
assert_eq!(
|
||||
vec![
|
||||
Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
},
|
||||
Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
}
|
||||
],
|
||||
dict.into_peers()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
arc-swap = "1.0"
|
||||
async-stream = "0.3"
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
backoff = { version = "0.4", features = ["tokio"] }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
@@ -18,25 +18,28 @@ common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
storage = { path = "../storage" }
|
||||
table = { path = "../table" }
|
||||
tokio = { version = "1.18", features = ["full"] }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
storage = { path = "../storage" }
|
||||
tempdir = "0.3"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,14 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
@@ -86,27 +88,25 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema, schema info: {}", schema_info))]
|
||||
#[snafu(display("Cannot find schema {} in catalog {}", schema, catalog))]
|
||||
SchemaNotFound {
|
||||
schema_info: String,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
TableNotExist { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to register table"))]
|
||||
RegisterTable {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
@@ -141,6 +141,17 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to deregister table, request: {:?}, source: {}",
|
||||
request,
|
||||
source
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal catalog manager state: {}", msg))]
|
||||
IllegalManagerState { backtrace: Backtrace, msg: String },
|
||||
|
||||
@@ -150,17 +161,16 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
|
||||
table_info,
|
||||
schema,
|
||||
source
|
||||
))]
|
||||
InvalidTableSchema {
|
||||
table_info: String,
|
||||
schema: RawSchema,
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
@@ -174,15 +184,6 @@ pub enum Error {
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("IO error occurred while fetching catalog info, source: {}", source))]
|
||||
Io {
|
||||
backtrace: Backtrace,
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Local and remote catalog data are inconsistent, msg: {}", msg))]
|
||||
CatalogStateInconsistent { msg: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
MetaSrv {
|
||||
#[snafu(backtrace)]
|
||||
@@ -195,10 +196,28 @@ pub enum Error {
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog internal error: {}", source))]
|
||||
Internal {
|
||||
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
|
||||
CatalogEntrySerde {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
source
|
||||
))]
|
||||
RegionStats {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -212,37 +231,41 @@ impl ErrorExt for Error {
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. }
|
||||
| Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::RegisterTable { .. } | Error::SystemCatalogTypeMismatch { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. } => source.status_code(),
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. }
|
||||
| Error::RegionStats { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableSchema { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
|
||||
Error::Internal { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +286,6 @@ impl From<Error> for DataFusionError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::mock::MockError;
|
||||
use snafu::GenerateImplicitData;
|
||||
|
||||
use super::*;
|
||||
@@ -284,22 +306,6 @@ mod tests {
|
||||
InvalidKeySnafu { key: None }.build().status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::OpenSystemCatalog {
|
||||
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::CreateSystemCatalog {
|
||||
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -24,10 +24,10 @@ use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
@@ -91,6 +91,7 @@ pub fn build_table_regional_prefix(
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
@@ -131,7 +132,6 @@ impl TableGlobalKey {
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
// TODO(LFC): Maybe remove it?
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
@@ -370,4 +370,10 @@ mod tests {
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -18,8 +18,9 @@ use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use snafu::ResultExt;
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -34,6 +35,7 @@ pub mod local;
|
||||
pub mod remote;
|
||||
pub mod schema;
|
||||
pub mod system;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
/// Represent a list of named catalogs
|
||||
@@ -97,6 +99,9 @@ pub trait CatalogManager: CatalogList {
|
||||
/// schema registered.
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
|
||||
|
||||
/// Register a system table, should be called before starting the manager.
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
-> error::Result<()>;
|
||||
@@ -104,7 +109,12 @@ pub trait CatalogManager: CatalogList {
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>>;
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
@@ -142,7 +152,16 @@ impl Debug for RegisterTableRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RenameTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table_name: String,
|
||||
pub new_table_name: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DeregisterTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
@@ -155,11 +174,6 @@ pub struct RegisterSchemaRequest {
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
@@ -179,15 +193,18 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
let table_name = &req.create_table_request.table_name;
|
||||
let table_id = req.create_table_request.id;
|
||||
|
||||
let table = if let Some(table) = manager.table(catalog_name, schema_name, table_name)? {
|
||||
let table = manager.table(catalog_name, schema_name, table_name).await?;
|
||||
let table = if let Some(table) = table {
|
||||
table
|
||||
} else {
|
||||
let table = engine
|
||||
.create_table(&EngineContext::default(), req.create_table_request.clone())
|
||||
.await
|
||||
.with_context(|_| CreateTableSnafu {
|
||||
table_info: format!(
|
||||
"{catalog_name}.{schema_name}.{table_name}, id: {table_id}",
|
||||
table_info: common_catalog::format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
),
|
||||
})?;
|
||||
manager
|
||||
@@ -208,3 +225,63 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The stat of regions in the datanode node.
|
||||
/// The number of regions can be got from len of vec.
|
||||
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> Result<(u64, Vec<RegionStat>)> {
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table =
|
||||
schema
|
||||
.table(&table_name)
|
||||
.await?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
region_stats.extend(stats);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get region status, err: {:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((region_number, region_stats))
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,6 +20,7 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
@@ -34,9 +35,9 @@ use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
|
||||
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
|
||||
TableExistsSnafu, TableNotFoundSnafu, UnimplementedSnafu,
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
@@ -45,9 +46,9 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
|
||||
CatalogProvider, CatalogProviderRef, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -241,7 +242,8 @@ impl LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &t.catalog_name, &t.schema_name),
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
})?;
|
||||
|
||||
let context = EngineContext {};
|
||||
@@ -250,7 +252,6 @@ impl LocalCatalogManager {
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
let option = self
|
||||
@@ -338,12 +339,13 @@ impl CatalogManager for LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{catalog_name}.{schema_name}"),
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
if let Some(existing) = schema.table(&request.table_name)? {
|
||||
if let Some(existing) = schema.table(&request.table_name).await? {
|
||||
if existing.table_info().ident.table_id != request.table_id {
|
||||
error!(
|
||||
"Unexpected table register request: {:?}, existing: {:?}",
|
||||
@@ -377,11 +379,76 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
// rename table in system catalog
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.new_table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
|
||||
let DeregisterTableRequest {
|
||||
catalog,
|
||||
schema,
|
||||
table_name,
|
||||
} = &request;
|
||||
let table_id = self
|
||||
.catalogs
|
||||
.table(catalog, schema, table_name)
|
||||
.await?
|
||||
.with_context(|| error::TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog, schema, table_name),
|
||||
})?
|
||||
.table_info()
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.catalogs.deregister_table(request).await
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -439,7 +506,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
fn table(
|
||||
async fn table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
@@ -452,9 +519,10 @@ impl CatalogManager for LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{catalog_name}.{schema_name}"),
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -18,18 +18,22 @@ use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::OptionExt;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
@@ -81,13 +85,33 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
@@ -99,7 +123,8 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
@@ -131,16 +156,20 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>> {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let catalog = if let Some(c) = c.get(catalog) {
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let Some(c) = c.get(catalog) else { return Ok(None) };
|
||||
c.clone()
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
match catalog.schema(schema)? {
|
||||
None => Ok(None),
|
||||
Some(s) => s.table(table_name),
|
||||
Some(s) => s.table(table_name).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -226,6 +255,10 @@ impl CatalogProvider for MemoryCatalogProvider {
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
@@ -255,6 +288,7 @@ impl Default for MemorySchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -265,7 +299,7 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
@@ -288,6 +322,20 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if tables.get(name).is_some() {
|
||||
let table = tables.remove(name).unwrap();
|
||||
tables.insert(new_name, table.clone());
|
||||
Ok(table)
|
||||
} else {
|
||||
TableNotFoundSnafu {
|
||||
table_info: name.to_string(),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
@@ -313,8 +361,8 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_new_memory_catalog_list() {
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_list().unwrap();
|
||||
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
|
||||
|
||||
@@ -327,9 +375,9 @@ mod tests {
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
.unwrap();
|
||||
|
||||
let table = default_schema.table("numbers").unwrap();
|
||||
let table = default_schema.table("numbers").await.unwrap();
|
||||
assert!(table.is_some());
|
||||
assert!(default_schema.table("not_exists").unwrap().is_none());
|
||||
assert!(default_schema.table("not_exists").await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -352,6 +400,86 @@ mod tests {
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
// register test table
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
);
|
||||
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider.register_table(new_table_name.to_string(), other_table);
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register table
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(schema.table_exist(table_name).unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(!schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(new_table_name).unwrap());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,16 +13,19 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use dashmap::DashMap;
|
||||
use futures::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
@@ -32,28 +35,28 @@ use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
|
||||
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
|
||||
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::helper::{
|
||||
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
|
||||
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
|
||||
CATALOG_KEY_PREFIX,
|
||||
};
|
||||
use crate::remote::{Kv, KvBackendRef};
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Catalog manager based on metasrv.
|
||||
pub struct RemoteCatalogManager {
|
||||
node_id: u64,
|
||||
backend: KvBackendRef,
|
||||
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
|
||||
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
|
||||
engine: TableEngineRef,
|
||||
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
|
||||
mutex: Arc<Mutex<()>>,
|
||||
}
|
||||
|
||||
impl RemoteCatalogManager {
|
||||
@@ -64,7 +67,6 @@ impl RemoteCatalogManager {
|
||||
backend,
|
||||
catalogs: Default::default(),
|
||||
system_table_requests: Default::default(),
|
||||
mutex: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,9 +110,13 @@ impl RemoteCatalogManager {
|
||||
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
|
||||
continue;
|
||||
}
|
||||
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
yield Ok(key)
|
||||
|
||||
let catalog_key = String::from_utf8_lossy(&k);
|
||||
if let Ok(key) = CatalogKey::parse(&catalog_key) {
|
||||
yield Ok(key)
|
||||
} else {
|
||||
error!("Invalid catalog key: {:?}", catalog_key);
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
@@ -324,7 +330,6 @@ impl RemoteCatalogManager {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
region_numbers: region_numbers.clone(),
|
||||
};
|
||||
match self
|
||||
.engine
|
||||
@@ -347,21 +352,13 @@ impl RemoteCatalogManager {
|
||||
);
|
||||
|
||||
let meta = &table_info.meta;
|
||||
let schema = meta
|
||||
.schema
|
||||
.clone()
|
||||
.try_into()
|
||||
.context(InvalidTableSchemaSnafu {
|
||||
table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
|
||||
schema: meta.schema.clone(),
|
||||
})?;
|
||||
let req = CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: Arc::new(schema),
|
||||
schema: meta.schema.clone(),
|
||||
region_numbers: region_numbers.clone(),
|
||||
primary_key_indices: meta.primary_key_indices.clone(),
|
||||
create_if_not_exists: true,
|
||||
@@ -390,7 +387,14 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
"Initialized catalogs: {:?}",
|
||||
catalogs.keys().cloned().collect::<Vec<_>>()
|
||||
);
|
||||
self.catalogs.store(Arc::new(catalogs));
|
||||
|
||||
{
|
||||
let self_catalogs = self.catalogs.read();
|
||||
catalogs.into_iter().for_each(|(k, v)| {
|
||||
self_catalogs.insert(k, v);
|
||||
});
|
||||
}
|
||||
|
||||
info!("Max table id allocated: {}", max_table_id);
|
||||
|
||||
let mut system_table_requests = self.system_table_requests.lock().await;
|
||||
@@ -418,7 +422,8 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
catalog_provider
|
||||
.schema(&schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &catalog_name, &schema_name),
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
if schema_provider.table_exist(&request.table_name)? {
|
||||
return TableExistsSnafu {
|
||||
@@ -430,11 +435,18 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
let schema = self
|
||||
.schema(catalog_name, schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let result = schema.deregister_table(&request.table_name)?;
|
||||
Ok(result.is_none())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -448,6 +460,13 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, _request: RenameTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let mut requests = self.system_table_requests.lock().await;
|
||||
requests.push(request);
|
||||
@@ -462,7 +481,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
fn table(
|
||||
async fn table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
@@ -474,9 +493,10 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{catalog_name}.{schema_name}"),
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -492,12 +512,10 @@ impl CatalogList for RemoteCatalogManager {
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
let key = self.build_catalog_key(&name).to_string();
|
||||
let backend = self.backend.clone();
|
||||
let mutex = self.mutex.clone();
|
||||
let catalogs = self.catalogs.clone();
|
||||
|
||||
std::thread::spawn(|| {
|
||||
common_runtime::block_on_write(async move {
|
||||
let _guard = mutex.lock().await;
|
||||
backend
|
||||
.set(
|
||||
key.as_bytes(),
|
||||
@@ -506,11 +524,10 @@ impl CatalogList for RemoteCatalogManager {
|
||||
.context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
let prev_catalogs = catalogs.load();
|
||||
let mut new_catalogs = HashMap::with_capacity(prev_catalogs.len() + 1);
|
||||
new_catalogs.clone_from(&prev_catalogs);
|
||||
let prev = new_catalogs.insert(name, catalog);
|
||||
catalogs.store(Arc::new(new_catalogs));
|
||||
|
||||
let catalogs = catalogs.read();
|
||||
let prev = catalogs.insert(name, catalog.clone());
|
||||
|
||||
Ok(prev)
|
||||
})
|
||||
})
|
||||
@@ -520,12 +537,65 @@ impl CatalogList for RemoteCatalogManager {
|
||||
|
||||
/// List all catalogs from metasrv
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.catalogs.load().keys().cloned().collect::<Vec<_>>())
|
||||
let catalogs = self.catalogs.read();
|
||||
Ok(catalogs.iter().map(|k| k.key().to_string()).collect())
|
||||
}
|
||||
|
||||
/// Read catalog info of given name from metasrv.
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
Ok(self.catalogs.load().get(name).cloned())
|
||||
{
|
||||
let catalogs = self.catalogs.read();
|
||||
let catalog = catalogs.get(name);
|
||||
|
||||
if let Some(catalog) = catalog {
|
||||
return Ok(Some(catalog.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
let catalogs = self.catalogs.write();
|
||||
|
||||
let catalog = catalogs.get(name);
|
||||
if let Some(catalog) = catalog {
|
||||
return Ok(Some(catalog.clone()));
|
||||
}
|
||||
|
||||
// It's for lack of incremental catalog syncing between datanode and meta. Here we fetch catalog
|
||||
// from meta on demand. This can be removed when incremental catalog syncing is done in datanode.
|
||||
|
||||
let backend = self.backend.clone();
|
||||
|
||||
let catalogs_from_meta: HashSet<String> = std::thread::spawn(|| {
|
||||
common_runtime::block_on_read(async move {
|
||||
let mut stream = backend.range(CATALOG_KEY_PREFIX.as_bytes());
|
||||
let mut catalogs = HashSet::new();
|
||||
|
||||
while let Some(catalog) = stream.next().await {
|
||||
if let Ok(catalog) = catalog {
|
||||
let catalog_key = String::from_utf8_lossy(&catalog.0);
|
||||
|
||||
if let Ok(key) = CatalogKey::parse(&catalog_key) {
|
||||
catalogs.insert(key.catalog_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
catalogs
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap();
|
||||
|
||||
catalogs.retain(|catalog_name, _| catalogs_from_meta.get(catalog_name).is_some());
|
||||
|
||||
for catalog in catalogs_from_meta {
|
||||
catalogs
|
||||
.entry(catalog.clone())
|
||||
.or_insert(self.new_catalog_provider(&catalog));
|
||||
}
|
||||
|
||||
let catalog = catalogs.get(name);
|
||||
|
||||
Ok(catalog.as_deref().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -685,6 +755,7 @@ impl RemoteSchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for RemoteSchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -694,7 +765,7 @@ impl SchemaProvider for RemoteSchemaProvider {
|
||||
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
Ok(self.tables.load().get(name).cloned())
|
||||
}
|
||||
|
||||
@@ -737,6 +808,13 @@ impl SchemaProvider for RemoteSchemaProvider {
|
||||
prev
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table_name = name.to_string();
|
||||
let table_key = self.build_regional_table_key(&table_name).to_string();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,11 +15,13 @@
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Represents a schema, comprising a number of named tables.
|
||||
#[async_trait]
|
||||
pub trait SchemaProvider: Sync + Send {
|
||||
/// Returns the schema provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
@@ -29,12 +31,16 @@ pub trait SchemaProvider: Sync + Send {
|
||||
fn table_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific table from the schema by name, provided it exists.
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, adds a new table to this schema.
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -25,29 +25,29 @@ use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
|
||||
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
|
||||
};
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
pub const ENTRY_TYPE_INDEX: usize = 0;
|
||||
pub const KEY_INDEX: usize = 1;
|
||||
pub const VALUE_INDEX: usize = 3;
|
||||
|
||||
pub struct SystemCatalogTable {
|
||||
table_info: TableInfoRef,
|
||||
pub table: TableRef,
|
||||
}
|
||||
pub struct SystemCatalogTable(TableRef);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for SystemCatalogTable {
|
||||
@@ -56,25 +56,29 @@ impl Table for SystemCatalogTable {
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.table_info.meta.schema.clone()
|
||||
self.0.schema()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
panic!("System catalog table does not support scan!")
|
||||
self.0.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
/// Insert values into table.
|
||||
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
|
||||
self.table.insert(request).await
|
||||
self.0.insert(request).await
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
self.table_info.clone()
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,9 +89,8 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let schema = Arc::new(build_system_catalog_schema());
|
||||
let schema = build_system_catalog_schema();
|
||||
let ctx = EngineContext::default();
|
||||
|
||||
if let Some(table) = engine
|
||||
@@ -95,10 +98,7 @@ impl SystemCatalogTable {
|
||||
.await
|
||||
.context(OpenSystemCatalogSnafu)?
|
||||
{
|
||||
Ok(Self {
|
||||
table_info: table.table_info(),
|
||||
table,
|
||||
})
|
||||
Ok(Self(table))
|
||||
} else {
|
||||
// system catalog table is not yet created, try to create
|
||||
let request = CreateTableRequest {
|
||||
@@ -107,19 +107,18 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
desc: Some("System catalog table".to_string()),
|
||||
schema: schema.clone(),
|
||||
schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
|
||||
create_if_not_exists: true,
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
|
||||
let table = engine
|
||||
.create_table(&ctx, request)
|
||||
.await
|
||||
.context(CreateSystemCatalogSnafu)?;
|
||||
let table_info = table.table_info();
|
||||
Ok(Self { table, table_info })
|
||||
Ok(Self(table))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,7 +127,6 @@ impl SystemCatalogTable {
|
||||
let full_projection = None;
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.table
|
||||
.scan(full_projection, &[], None)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
@@ -147,7 +145,7 @@ impl SystemCatalogTable {
|
||||
/// - value: JSON-encoded value of entry's metadata.
|
||||
/// - gmt_created: create time of this metadata.
|
||||
/// - gmt_modified: last updated time of this metadata.
|
||||
fn build_system_catalog_schema() -> Schema {
|
||||
fn build_system_catalog_schema() -> RawSchema {
|
||||
let cols = vec![
|
||||
ColumnSchema::new(
|
||||
"entry_type".to_string(),
|
||||
@@ -182,20 +180,59 @@ fn build_system_catalog_schema() -> Schema {
|
||||
),
|
||||
];
|
||||
|
||||
// The schema of this table must be valid.
|
||||
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
|
||||
RawSchema::new(cols)
|
||||
}
|
||||
|
||||
pub fn build_table_insert_request(full_table_name: String, table_id: TableId) -> InsertRequest {
|
||||
/// Formats key string for table entry in system catalog
|
||||
#[inline]
|
||||
pub fn format_table_entry_key(catalog: &str, schema: &str, table_id: TableId) -> String {
|
||||
format!("{catalog}.{schema}.{table_id}")
|
||||
}
|
||||
|
||||
pub fn build_table_insert_request(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> InsertRequest {
|
||||
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
full_table_name.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_id })
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_name })
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
|
||||
);
|
||||
m
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
let full_schema_name = format!("{catalog_name}.{schema_name}");
|
||||
build_insert_request(
|
||||
@@ -208,22 +245,10 @@ pub fn build_schema_insert_request(catalog_name: String, schema_name: String) ->
|
||||
}
|
||||
|
||||
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
|
||||
let primary_key_columns = build_primary_key_columns(entry_type, key);
|
||||
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
columns_values.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
|
||||
);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
|
||||
columns_values.insert(
|
||||
"value".to_string(),
|
||||
@@ -233,12 +258,12 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
let now = util::current_time_millis();
|
||||
columns_values.insert(
|
||||
"gmt_created".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"gmt_modified".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
InsertRequest {
|
||||
@@ -246,6 +271,7 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
columns_values,
|
||||
region_number: 0, // system catalog table has only one region
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,8 +311,8 @@ pub fn decode_system_catalog(
|
||||
}
|
||||
|
||||
EntryType::Table => {
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_name>`
|
||||
// and the value is a JSON string with format: `{"table_id": <table_id>}`
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_id>`
|
||||
// and the value is a JSON string with format: `{"table_name": <table_name>}`
|
||||
let table_parts = key.split('.').collect::<Vec<_>>();
|
||||
ensure!(
|
||||
table_parts.len() >= 3,
|
||||
@@ -298,11 +324,12 @@ pub fn decode_system_catalog(
|
||||
debug!("Table meta value: {}", String::from_utf8_lossy(value));
|
||||
let table_meta: TableEntryValue =
|
||||
serde_json::from_slice(value).context(ValueDeserializeSnafu)?;
|
||||
let table_id = table_parts[2].parse::<TableId>().unwrap();
|
||||
Ok(Entry::Table(TableEntry {
|
||||
catalog_name: table_parts[0].to_string(),
|
||||
schema_name: table_parts[1].to_string(),
|
||||
table_name: table_parts[2].to_string(),
|
||||
table_id: table_meta.table_id,
|
||||
table_name: table_meta.table_name,
|
||||
table_id,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -362,20 +389,23 @@ pub struct TableEntry {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableEntryValue {
|
||||
pub table_id: TableId,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use log_store::fs::noop::NoopLogStore;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datatypes::value::Value;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::metadata::TableType;
|
||||
use table::metadata::TableType::Base;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -415,8 +445,8 @@ mod tests {
|
||||
pub fn test_decode_table() {
|
||||
let entry = decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.some_table".as_bytes()),
|
||||
Some("{\"table_id\":42}".as_bytes()),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
Some("{\"table_name\":\"some_table\"}".as_bytes()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -435,7 +465,7 @@ mod tests {
|
||||
pub fn test_decode_mismatch() {
|
||||
decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.some_table".as_bytes()),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
@@ -450,19 +480,21 @@ mod tests {
|
||||
}
|
||||
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = TempDir::new("system-table-test").unwrap();
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::backend::fs::Builder::default()
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
@@ -487,4 +519,53 @@ mod tests {
|
||||
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
|
||||
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_catalog_table_records() {
|
||||
let (_, table_engine) = prepare_table_engine().await;
|
||||
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
|
||||
let table_insertion = build_table_insert_request(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"my_table".to_string(),
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.insert(table_insertion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 1);
|
||||
let batch = batches.remove(0);
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
|
||||
let row = batch.rows().next().unwrap();
|
||||
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
|
||||
let Value::Binary(key) = row[1].clone() else { unreachable!() };
|
||||
let Value::Binary(value) = row[3].clone() else { unreachable!() };
|
||||
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
|
||||
let expected = Entry::Table(TableEntry {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
let table_deletion = build_table_deletion_request(
|
||||
&DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
178
src/catalog/src/table_source.rs
Normal file
178
src/catalog/src/table_source.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
|
||||
};
|
||||
use crate::CatalogListRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_list: CatalogListRef,
|
||||
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
|
||||
disallow_cross_schema_query: bool,
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
pub fn new(
|
||||
catalog_list: CatalogListRef,
|
||||
disallow_cross_schema_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_list,
|
||||
disallow_cross_schema_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolve_table_ref<'a>(
|
||||
&'a self,
|
||||
table_ref: TableReference<'a>,
|
||||
) -> Result<ResolvedTableReference<'a>> {
|
||||
if self.disallow_cross_schema_query {
|
||||
match &table_ref {
|
||||
TableReference::Bare { .. } => (),
|
||||
TableReference::Partial { schema, .. } => {
|
||||
ensure!(
|
||||
schema.as_ref() == self.default_schema,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: &self.default_catalog,
|
||||
schema: schema.as_ref(),
|
||||
}
|
||||
);
|
||||
}
|
||||
TableReference::Full {
|
||||
catalog, schema, ..
|
||||
} => {
|
||||
ensure!(
|
||||
catalog.as_ref() == self.default_catalog
|
||||
&& schema.as_ref() == self.default_schema,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: catalog.as_ref(),
|
||||
schema: schema.as_ref()
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(table_ref.resolve(&self.default_catalog, &self.default_schema))
|
||||
}
|
||||
|
||||
pub async fn resolve_table(
|
||||
&mut self,
|
||||
table_ref: OwnedTableReference,
|
||||
) -> Result<Arc<dyn TableSource>> {
|
||||
let table_ref = table_ref.as_table_reference();
|
||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||
|
||||
let resolved_name = table_ref.to_string();
|
||||
if let Some(table) = self.resolved_tables.get(&resolved_name) {
|
||||
return Ok(table.clone());
|
||||
}
|
||||
|
||||
let catalog_name = table_ref.catalog.as_ref();
|
||||
let schema_name = table_ref.schema.as_ref();
|
||||
let table_name = table_ref.table.as_ref();
|
||||
|
||||
let catalog = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
let table = schema
|
||||
.table(table_name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let table = DfTableProviderAdapter::new(table);
|
||||
let table = provider_as_source(Arc::new(table));
|
||||
self.resolved_tables.insert(resolved_name, table.clone());
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::local::MemoryCatalogManager;
|
||||
|
||||
#[test]
|
||||
fn test_validate_table_ref() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
|
||||
|
||||
let table_ref = TableReference::Bare {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("wrong_schema"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("wrong_catalog"),
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,6 +20,7 @@ use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::logical_plan::Expr;
|
||||
@@ -38,10 +39,13 @@ use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::scan::SimpleTableScan;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{Error, InsertCatalogRecordSnafu};
|
||||
use crate::system::{build_schema_insert_request, build_table_insert_request, SystemCatalogTable};
|
||||
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::system::{
|
||||
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
|
||||
SystemCatalogTable,
|
||||
};
|
||||
use crate::{
|
||||
format_full_table_name, CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef,
|
||||
CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Tables holds all tables created by user.
|
||||
@@ -159,16 +163,10 @@ fn tables_to_record_batch(
|
||||
|
||||
for table_name in table_names {
|
||||
// Safety: All these vectors are string type.
|
||||
catalog_vec
|
||||
.push_value_ref(ValueRef::String(catalog_name))
|
||||
.unwrap();
|
||||
schema_vec
|
||||
.push_value_ref(ValueRef::String(schema_name))
|
||||
.unwrap();
|
||||
table_name_vec
|
||||
.push_value_ref(ValueRef::String(&table_name))
|
||||
.unwrap();
|
||||
engine_vec.push_value_ref(ValueRef::String(engine)).unwrap();
|
||||
catalog_vec.push_value_ref(ValueRef::String(catalog_name));
|
||||
schema_vec.push_value_ref(ValueRef::String(schema_name));
|
||||
table_name_vec.push_value_ref(ValueRef::String(&table_name));
|
||||
engine_vec.push_value_ref(ValueRef::String(engine));
|
||||
}
|
||||
|
||||
vec![
|
||||
@@ -203,6 +201,7 @@ pub struct InformationSchema {
|
||||
pub system: Arc<SystemCatalogTable>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for InformationSchema {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -215,7 +214,7 @@ impl SchemaProvider for InformationSchema {
|
||||
])
|
||||
}
|
||||
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
|
||||
if name.eq_ignore_ascii_case("tables") {
|
||||
Ok(Some(self.tables.clone()))
|
||||
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
|
||||
@@ -233,6 +232,10 @@ impl SchemaProvider for InformationSchema {
|
||||
panic!("System catalog & schema does not support register table")
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> crate::error::Result<TableRef> {
|
||||
unimplemented!("System catalog & schema does not support rename table")
|
||||
}
|
||||
|
||||
fn deregister_table(&self, _name: &str) -> crate::error::Result<Option<TableRef>> {
|
||||
panic!("System catalog & schema does not support deregister table")
|
||||
}
|
||||
@@ -269,8 +272,7 @@ impl SystemCatalog {
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> crate::error::Result<usize> {
|
||||
let full_table_name = format_full_table_name(&catalog, &schema, &table_name);
|
||||
let request = build_table_insert_request(full_table_name, table_id);
|
||||
let request = build_table_insert_request(catalog, schema, table_name, table_id);
|
||||
self.information_schema
|
||||
.system
|
||||
.insert(request)
|
||||
@@ -278,6 +280,21 @@ impl SystemCatalog {
|
||||
.context(InsertCatalogRecordSnafu)
|
||||
}
|
||||
|
||||
pub(crate) async fn deregister_table(
|
||||
&self,
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> CatalogResult<bool> {
|
||||
self.information_schema
|
||||
.system
|
||||
.delete(build_table_deletion_request(request, table_id))
|
||||
.await
|
||||
.map(|x| x == 1)
|
||||
.with_context(|_| error::DeregisterTableSnafu {
|
||||
request: request.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn register_schema(
|
||||
&self,
|
||||
catalog: String,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -17,7 +17,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::local::LocalCatalogManager;
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::{error, info};
|
||||
use mito::config::EngineConfig;
|
||||
@@ -38,6 +38,45 @@ mod tests {
|
||||
Ok(catalog_manager)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
// register table
|
||||
let table_name = "test_table";
|
||||
let table_id = 42;
|
||||
let table = Arc::new(NumbersTable::new(table_id));
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table: table.clone(),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "table_t";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog_manager
|
||||
.rename_table(rename_table_req)
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let registered_table = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_duplicate_register() {
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
@@ -120,6 +159,7 @@ mod tests {
|
||||
let table = guard.as_ref().unwrap();
|
||||
let table_registered = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -147,6 +147,7 @@ impl TableEngine for MockTableEngine {
|
||||
let table_id = TableId::from_str(
|
||||
request
|
||||
.table_options
|
||||
.extra_options
|
||||
.get("table_id")
|
||||
.unwrap_or(&default_table_id),
|
||||
)
|
||||
@@ -220,4 +221,8 @@ impl TableEngine for MockTableEngine {
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -28,7 +28,7 @@ mod tests {
|
||||
};
|
||||
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::Schema;
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -116,7 +116,7 @@ mod tests {
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let table_name = "fail_table".to_string();
|
||||
// this schema has no effect
|
||||
let table_schema = Arc::new(Schema::new(vec![]));
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
@@ -126,7 +126,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: table_schema.clone(),
|
||||
schema: table_schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
@@ -176,7 +176,7 @@ mod tests {
|
||||
let table_name = "test_table".to_string();
|
||||
let table_id = 1;
|
||||
// this schema has no effect
|
||||
let table_schema = Arc::new(Schema::new(vec![]));
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
@@ -186,7 +186,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: table_schema.clone(),
|
||||
schema: table_schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: "".to_string(),
|
||||
desc: None,
|
||||
schema: Arc::new(Schema::new(vec![])),
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
|
||||
@@ -6,34 +6,35 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
async-stream = "0.3"
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
rand = "0.8"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic = "0.8"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
datanode = { path = "../datanode" }
|
||||
substrait = { path = "../common/substrait" }
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
|
||||
[dev-dependencies.prost_09]
|
||||
package = "prost"
|
||||
version = "0.9"
|
||||
prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.2"
|
||||
version = "0.4"
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::*;
|
||||
use client::{Client, Database};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
let db = Database::new("greptime", client);
|
||||
|
||||
let (columns, row_count) = insert_data();
|
||||
|
||||
let expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
db.insert(expr).await.unwrap();
|
||||
}
|
||||
|
||||
fn insert_data() -> (Vec<Column>, u32) {
|
||||
const SEMANTIC_TAG: i32 = 0;
|
||||
const SEMANTIC_FIELD: i32 = 1;
|
||||
const SEMANTIC_TS: i32 = 2;
|
||||
|
||||
let row_count = 4;
|
||||
|
||||
let host_vals = column::Values {
|
||||
string_values: vec![
|
||||
"host1".to_string(),
|
||||
"host2".to_string(),
|
||||
"host3".to_string(),
|
||||
"host4".to_string(),
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
let host_column = Column {
|
||||
column_name: "host".to_string(),
|
||||
semantic_type: SEMANTIC_TAG,
|
||||
values: Some(host_vals),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let cpu_vals = column::Values {
|
||||
f64_values: vec![0.31, 0.41, 0.2],
|
||||
..Default::default()
|
||||
};
|
||||
let cpu_column = Column {
|
||||
column_name: "cpu".to_string(),
|
||||
semantic_type: SEMANTIC_FIELD,
|
||||
values: Some(cpu_vals),
|
||||
null_mask: vec![2],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_vals = column::Values {
|
||||
f64_values: vec![0.1, 0.2, 0.3],
|
||||
..Default::default()
|
||||
};
|
||||
let mem_column = Column {
|
||||
column_name: "memory".to_string(),
|
||||
semantic_type: SEMANTIC_FIELD,
|
||||
values: Some(mem_vals),
|
||||
null_mask: vec![4],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let ts_vals = column::Values {
|
||||
i64_values: vec![100, 101, 102, 103],
|
||||
..Default::default()
|
||||
};
|
||||
let ts_column = Column {
|
||||
column_name: "ts".to_string(),
|
||||
semantic_type: SEMANTIC_TS,
|
||||
values: Some(ts_vals),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
(
|
||||
vec![host_column, cpu_column, mem_column, ts_column],
|
||||
row_count,
|
||||
)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,13 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
|
||||
use client::admin::Admin;
|
||||
use client::{Client, Database};
|
||||
use prost_09::Message;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::protobuf::rel::RelType;
|
||||
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use prost::Message;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
@@ -66,13 +66,12 @@ async fn run() {
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let admin = Admin::new("create table", client.clone());
|
||||
let result = admin.create(create_table_expr).await.unwrap();
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let db = Database::new("greptime", client);
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
@@ -90,12 +89,8 @@ fn mock_logical_plan() -> Vec<u8> {
|
||||
let read_type = ReadType::NamedTable(named_table);
|
||||
|
||||
let read_rel = ReadRel {
|
||||
common: None,
|
||||
base_schema: None,
|
||||
filter: None,
|
||||
projection: None,
|
||||
advanced_extension: None,
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut buf = vec![];
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use client::{Client, Database, Select};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
let db = Database::new("greptime", client);
|
||||
|
||||
let sql = Select::Sql("select * from demo".to_string());
|
||||
let result = db.select(sql).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::*;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_query::Output;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::database::PROTOCOL_VERSION;
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Admin {
|
||||
name: String,
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl Admin {
|
||||
pub fn new(name: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::CreateTable(expr)),
|
||||
};
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
pub async fn do_request(&self, expr: AdminExpr) -> Result<AdminResult> {
|
||||
// `remove(0)` is safe because of `do_requests`'s invariants.
|
||||
Ok(self.do_requests(vec![expr]).await?.remove(0))
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::Alter(expr)),
|
||||
};
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::DropTable(expr)),
|
||||
};
|
||||
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
/// Invariants: the lengths of input vec (`Vec<AdminExpr>`) and output vec (`Vec<AdminResult>`) are equal.
|
||||
async fn do_requests(&self, exprs: Vec<AdminExpr>) -> Result<Vec<AdminResult>> {
|
||||
let expr_count = exprs.len();
|
||||
let req = AdminRequest {
|
||||
name: self.name.clone(),
|
||||
exprs,
|
||||
};
|
||||
|
||||
let resp = self.client.admin(req).await?;
|
||||
|
||||
let results = resp.results;
|
||||
ensure!(
|
||||
results.len() == expr_count,
|
||||
error::MissingResultSnafu {
|
||||
name: "admin_results",
|
||||
expected: expr_count,
|
||||
actual: results.len(),
|
||||
}
|
||||
);
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn create_database(&self, expr: CreateDatabaseExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::CreateDatabase(expr)),
|
||||
};
|
||||
Ok(self.do_requests(vec![expr]).await?.remove(0))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn admin_result_to_output(admin_result: AdminResult) -> Result<Output> {
|
||||
let header = admin_result.header.context(error::MissingHeaderSnafu)?;
|
||||
if !StatusCode::is_success(header.code) {
|
||||
return error::DatanodeSnafu {
|
||||
code: header.code,
|
||||
msg: header.err_msg,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let result = admin_result.result.context(error::MissingResultSnafu {
|
||||
name: "result".to_string(),
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})?;
|
||||
let output = match result {
|
||||
admin_result::Result::Mutate(mutate) => {
|
||||
if mutate.failure != 0 {
|
||||
return error::MutateFailureSnafu {
|
||||
failure: mutate.failure,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Output::AffectedRows(mutate.success as usize)
|
||||
}
|
||||
};
|
||||
Ok(output)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_client::GreptimeClient;
|
||||
use api::v1::*;
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -24,6 +24,25 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
}
|
||||
|
||||
impl FlightClient {
|
||||
pub(crate) fn addr(&self) -> &str {
|
||||
&self.addr
|
||||
}
|
||||
|
||||
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
||||
&mut self.client
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Client {
|
||||
inner: Arc<Inner>,
|
||||
@@ -104,57 +123,35 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub async fn admin(&self, req: AdminRequest) -> Result<AdminResponse> {
|
||||
let req = BatchRequest {
|
||||
admins: vec![req],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut res = self.batch(req).await?;
|
||||
res.admins.pop().context(error::MissingResultSnafu {
|
||||
name: "admins",
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn database(&self, req: DatabaseRequest) -> Result<DatabaseResponse> {
|
||||
let req = BatchRequest {
|
||||
databases: vec![req],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut res = self.batch(req).await?;
|
||||
res.databases.pop().context(error::MissingResultSnafu {
|
||||
name: "database",
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn batch(&self, req: BatchRequest) -> Result<BatchResponse> {
|
||||
let peer = self
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
.context(error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "No available peer found",
|
||||
})?;
|
||||
let mut client = self.make_client(&peer)?;
|
||||
let result = client
|
||||
.batch(req)
|
||||
.await
|
||||
.context(error::TonicStatusSnafu { addr: peer })?;
|
||||
Ok(result.into_inner())
|
||||
}
|
||||
|
||||
fn make_client(&self, addr: impl AsRef<str>) -> Result<GreptimeClient<Channel>> {
|
||||
let addr = addr.as_ref();
|
||||
let channel = self
|
||||
.inner
|
||||
.channel_manager
|
||||
.get(addr)
|
||||
.context(error::CreateChannelSnafu { addr })?;
|
||||
Ok(GreptimeClient::new(channel))
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,236 +12,245 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::codec::SelectResult as GrpcSelectResult;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
|
||||
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, SelectExpr,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use common_telemetry::logging;
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ColumnToVectorSnafu, ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
pub const PROTOCOL_VERSION: u32 = 1;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Database {
|
||||
name: String,
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub fn new(name: impl Into<String>, client: Client) -> Self {
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
pub fn catalog(&self) -> &String {
|
||||
&self.catalog
|
||||
}
|
||||
|
||||
pub async fn insert(&self, insert: InsertExpr) -> Result<ObjectResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = ObjectExpr {
|
||||
header: Some(header),
|
||||
expr: Some(object_expr::Expr::Insert(insert)),
|
||||
};
|
||||
self.object(expr).await?.try_into()
|
||||
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub async fn batch_insert(&self, insert_exprs: Vec<InsertExpr>) -> Result<Vec<ObjectResult>> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
pub fn schema(&self) -> &String {
|
||||
&self.schema
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let obj_exprs = insert_exprs
|
||||
.into_iter()
|
||||
.map(|expr| ObjectExpr {
|
||||
header: Some(header.clone()),
|
||||
expr: Some(object_expr::Expr::Insert(expr)),
|
||||
})
|
||||
.collect();
|
||||
self.objects(obj_exprs)
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn prom_range_query(
|
||||
&self,
|
||||
promql: &str,
|
||||
start: &str,
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
.mut_inner()
|
||||
.do_get(request)
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
})
|
||||
.unwrap_err()
|
||||
})?;
|
||||
|
||||
let decoder = &mut FlightDecoder::default();
|
||||
let flight_messages = flight_data
|
||||
.into_iter()
|
||||
.map(|result| result.try_into())
|
||||
.collect()
|
||||
}
|
||||
.map(|x| decoder.try_decode(x).context(ConvertFlightDataSnafu))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
pub async fn select(&self, expr: Select) -> Result<ObjectResult> {
|
||||
let select_expr = match expr {
|
||||
Select::Sql(sql) => SelectExpr {
|
||||
expr: Some(select_expr::Expr::Sql(sql)),
|
||||
},
|
||||
};
|
||||
self.do_select(select_expr).await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
|
||||
let select_expr = SelectExpr {
|
||||
expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
|
||||
};
|
||||
self.do_select(select_expr).await
|
||||
}
|
||||
|
||||
async fn do_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
|
||||
let expr = ObjectExpr {
|
||||
header: Some(header),
|
||||
expr: Some(object_expr::Expr::Select(select_expr)),
|
||||
};
|
||||
|
||||
let obj_result = self.object(expr).await?;
|
||||
obj_result.try_into()
|
||||
}
|
||||
|
||||
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
|
||||
let res = self.objects(vec![expr]).await?.pop().unwrap();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn objects(&self, exprs: Vec<ObjectExpr>) -> Result<Vec<GrpcObjectResult>> {
|
||||
let expr_count = exprs.len();
|
||||
let req = DatabaseRequest {
|
||||
name: self.name.clone(),
|
||||
exprs,
|
||||
};
|
||||
|
||||
let res = self.client.database(req).await?;
|
||||
let res = res.results;
|
||||
|
||||
ensure!(
|
||||
res.len() == expr_count,
|
||||
error::MissingResultSnafu {
|
||||
name: "object_results",
|
||||
expected: expr_count,
|
||||
actual: res.len(),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ObjectResult {
|
||||
Select(GrpcSelectResult),
|
||||
Mutate(GrpcMutateResult),
|
||||
}
|
||||
|
||||
impl TryFrom<api::v1::ObjectResult> for ObjectResult {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(object_result: api::v1::ObjectResult) -> std::result::Result<Self, Self::Error> {
|
||||
let header = object_result.header.context(error::MissingHeaderSnafu)?;
|
||||
if !StatusCode::is_success(header.code) {
|
||||
return DatanodeSnafu {
|
||||
code: header.code,
|
||||
msg: header.err_msg,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let obj_result = object_result.result.context(error::MissingResultSnafu {
|
||||
name: "result".to_string(),
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})?;
|
||||
Ok(match obj_result {
|
||||
object_result::Result::Select(select) => {
|
||||
let result = (*select.raw_data).try_into().context(DecodeSelectSnafu)?;
|
||||
ObjectResult::Select(result)
|
||||
}
|
||||
object_result::Result::Mutate(mutate) => ObjectResult::Mutate(mutate),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Select {
|
||||
Sql(String),
|
||||
}
|
||||
|
||||
impl TryFrom<ObjectResult> for Output {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: ObjectResult) -> Result<Self> {
|
||||
let output = match value {
|
||||
ObjectResult::Select(select) => {
|
||||
let vectors = select
|
||||
.columns
|
||||
.iter()
|
||||
.map(|column| {
|
||||
column_to_vector(column, select.row_count).context(ColumnToVectorSnafu)
|
||||
})
|
||||
.collect::<Result<Vec<VectorRef>>>()?;
|
||||
|
||||
let column_schemas = select
|
||||
.columns
|
||||
.iter()
|
||||
.zip(vectors.iter())
|
||||
.map(|(column, vector)| {
|
||||
let datatype = vector.data_type();
|
||||
// nullable or not, does not affect the output
|
||||
let mut column_schema =
|
||||
ColumnSchema::new(&column.column_name, datatype, true);
|
||||
if column.semantic_type == SemanticType::Timestamp as i32 {
|
||||
column_schema = column_schema.with_time_index(true);
|
||||
}
|
||||
column_schema
|
||||
})
|
||||
.collect::<Vec<ColumnSchema>>();
|
||||
|
||||
let schema = Arc::new(Schema::try_new(column_schemas).context(ConvertSchemaSnafu)?);
|
||||
let recordbatches = if vectors.is_empty() {
|
||||
RecordBatches::try_new(schema, vec![])
|
||||
} else {
|
||||
RecordBatch::new(schema, vectors)
|
||||
.and_then(|batch| RecordBatches::try_new(batch.schema.clone(), vec![batch]))
|
||||
let output = if let Some(FlightMessage::AffectedRows(rows)) = flight_messages.get(0) {
|
||||
ensure!(
|
||||
flight_messages.len() == 1,
|
||||
IllegalFlightMessagesSnafu {
|
||||
reason: "Expect 'AffectedRows' Flight messages to be one and only!"
|
||||
}
|
||||
.context(error::CreateRecordBatchesSnafu)?;
|
||||
Output::RecordBatches(recordbatches)
|
||||
}
|
||||
ObjectResult::Mutate(mutate) => {
|
||||
if mutate.failure != 0 {
|
||||
return error::MutateFailureSnafu {
|
||||
failure: mutate.failure,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Output::AffectedRows(mutate.success as usize)
|
||||
}
|
||||
);
|
||||
Output::AffectedRows(*rows)
|
||||
} else {
|
||||
let recordbatches = flight_messages_to_recordbatches(flight_messages)
|
||||
.context(ConvertFlightDataSnafu)?;
|
||||
Output::RecordBatches(recordbatches)
|
||||
};
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::Column;
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic, Column};
|
||||
use common_grpc::select::{null_mask, values};
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use datatypes::prelude::{Vector, VectorRef};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
|
||||
UInt32Vector, UInt64Vector, UInt8Vector,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
use crate::database::FlightContext;
|
||||
|
||||
#[test]
|
||||
fn test_column_to_vector() {
|
||||
@@ -326,4 +335,26 @@ mod tests {
|
||||
datatype: wrapper.datatype() as i32,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
let mut ctx = FlightContext::default();
|
||||
assert!(ctx.auth_header.is_none());
|
||||
|
||||
let basic = AuthScheme::Basic(Basic {
|
||||
username: "u".to_string(),
|
||||
password: "p".to_string(),
|
||||
});
|
||||
|
||||
ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(basic),
|
||||
});
|
||||
|
||||
assert!(matches!(
|
||||
ctx.auth_header,
|
||||
Some(AuthHeader {
|
||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,67 +13,39 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::serde::DecodeError;
|
||||
use common_error::prelude::*;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Connect failed to {}, source: {}", url, source))]
|
||||
ConnectFailed {
|
||||
url: String,
|
||||
source: tonic::transport::Error,
|
||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||
IllegalFlightMessages {
|
||||
reason: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing {}, expected {}, actual {}", name, expected, actual))]
|
||||
MissingResult {
|
||||
name: String,
|
||||
expected: usize,
|
||||
actual: usize,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing result header"))]
|
||||
MissingHeader,
|
||||
|
||||
#[snafu(display("Tonic internal error, addr: {}, source: {}", addr, source))]
|
||||
TonicStatus {
|
||||
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
|
||||
FlightGet {
|
||||
addr: String,
|
||||
source: tonic::Status,
|
||||
backtrace: Backtrace,
|
||||
tonic_code: Code,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to decode select result, source: {}", source))]
|
||||
DecodeSelect { source: DecodeError },
|
||||
|
||||
#[snafu(display("Error occurred on the data node, code: {}, msg: {}", code, msg))]
|
||||
Datanode { code: u32, msg: String },
|
||||
|
||||
#[snafu(display("Failed to encode physical plan: {:?}, source: {}", physical, source))]
|
||||
EncodePhysical {
|
||||
physical: Arc<dyn ExecutionPlan>,
|
||||
#[snafu(display("Failed to convert FlightData, source: {}", source))]
|
||||
ConvertFlightData {
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Mutate result has failure {}", failure))]
|
||||
MutateFailure { failure: u32, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create RecordBatches, source: {}", source))]
|
||||
CreateRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
@@ -83,12 +55,6 @@ pub enum Error {
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to convert schema, source: {}", source))]
|
||||
ConvertSchema {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to create gRPC channel, peer address: {}, source: {}",
|
||||
addr,
|
||||
@@ -100,11 +66,12 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert column to vector, source: {}", source))]
|
||||
ColumnToVector {
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -112,21 +79,17 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::ConnectFailed { .. }
|
||||
| Error::MissingResult { .. }
|
||||
| Error::MissingHeader { .. }
|
||||
| Error::TonicStatus { .. }
|
||||
| Error::DecodeSelect { .. }
|
||||
| Error::Datanode { .. }
|
||||
| Error::EncodePhysical { .. }
|
||||
| Error::MutateFailure { .. }
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
Error::ConvertSchema { source } => source.status_code(),
|
||||
Error::CreateRecordBatches { source } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } => source.status_code(),
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ColumnToVector { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,3 +101,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod admin;
|
||||
mod client;
|
||||
mod database;
|
||||
mod error;
|
||||
pub mod load_balance;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::{Database, ObjectResult, Select};
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -9,27 +9,46 @@ default-run = "greptime"
|
||||
name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
catalog = { path = "../catalog" }
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
datanode = { path = "../datanode" }
|
||||
either = "1.8"
|
||||
frontend = { path = "../frontend" }
|
||||
futures = "0.3"
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
serde = "1.0"
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { path = "../partition" }
|
||||
query = { path = "../query" }
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tokio = { version = "1.18", features = ["full"] }
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
tikv-jemalloc-ctl = { version = "0.5", optional = true }
|
||||
tikv-jemallocator = { version = "0.5", optional = true }
|
||||
tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1.0"
|
||||
tempdir = "0.3"
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
serde.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
build-data = "0.1.3"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,7 +16,7 @@ use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::{datanode, frontend, metasrv, standalone};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info};
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -30,9 +30,39 @@ struct Command {
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
Datanode(datanode::Instance),
|
||||
Frontend(frontend::Instance),
|
||||
Metasrv(metasrv::Instance),
|
||||
Standalone(standalone::Instance),
|
||||
Cli(cli::Instance),
|
||||
}
|
||||
|
||||
impl Application {
|
||||
async fn run(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.run().await,
|
||||
Application::Frontend(instance) => instance.run().await,
|
||||
Application::Metasrv(instance) => instance.run().await,
|
||||
Application::Standalone(instance) => instance.run().await,
|
||||
Application::Cli(instance) => instance.run().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.stop().await,
|
||||
Application::Frontend(instance) => instance.stop().await,
|
||||
Application::Metasrv(instance) => instance.stop().await,
|
||||
Application::Standalone(instance) => instance.stop().await,
|
||||
Application::Cli(instance) => instance.stop().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
async fn build(self) -> Result<Application> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,15 +76,33 @@ enum SubCommand {
|
||||
Metasrv(metasrv::Command),
|
||||
#[clap(name = "standalone")]
|
||||
Standalone(standalone::Command),
|
||||
#[clap(name = "cli")]
|
||||
Cli(cli::Command),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Application> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.run().await,
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Datanode(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
SubCommand::Metasrv(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
SubCommand::Standalone(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
SubCommand::Cli(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -66,6 +114,7 @@ impl fmt::Display for SubCommand {
|
||||
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
|
||||
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
|
||||
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
|
||||
SubCommand::Cli(_) => write!(f, "greptime-cli"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,6 +132,10 @@ fn print_version() -> &'static str {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mem-prof")]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let cmd = Command::parse();
|
||||
@@ -96,13 +149,18 @@ async fn main() -> Result<()> {
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||
|
||||
let mut app = cmd.build().await?;
|
||||
|
||||
tokio::select! {
|
||||
result = cmd.run() => {
|
||||
result = app.run() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
}
|
||||
|
||||
78
src/cmd/src/cli.rs
Normal file
78
src/cmd/src/cli.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cmd;
|
||||
mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
pub use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.repl.run().await
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
cmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.cmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct AttachCommand {
|
||||
#[clap(long)]
|
||||
pub(crate) grpc_addr: String,
|
||||
#[clap(long)]
|
||||
pub(crate) meta_addr: Option<String>,
|
||||
#[clap(long, action)]
|
||||
pub(crate) disable_helper: bool,
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance { repl })
|
||||
}
|
||||
}
|
||||
154
src/cmd/src/cli/cmd.rs
Normal file
154
src/cmd/src/cli/cmd.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, InvalidReplCommandSnafu, Result};
|
||||
|
||||
/// Represents the parsed command from the user (which may be over many lines)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ReplCommand {
|
||||
Help,
|
||||
UseDatabase { db_name: String },
|
||||
Sql { sql: String },
|
||||
Exit,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for ReplCommand {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(input: &str) -> Result<Self> {
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
return InvalidReplCommandSnafu {
|
||||
reason: "No command specified".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
// If line ends with ';', it must be treated as a complete input.
|
||||
// However, the opposite is not true.
|
||||
let input_is_completed = input.ends_with(';');
|
||||
|
||||
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
|
||||
let lowercase = input.to_lowercase();
|
||||
match lowercase.as_str() {
|
||||
"help" => Ok(Self::Help),
|
||||
"exit" | "quit" => Ok(Self::Exit),
|
||||
_ => match input.split_once(' ') {
|
||||
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
|
||||
Ok(Self::UseDatabase {
|
||||
db_name: database.trim().to_string(),
|
||||
})
|
||||
}
|
||||
// Any valid SQL must contains at least one whitespace.
|
||||
Some(_) if input_is_completed => Ok(Self::Sql {
|
||||
sql: input.to_string(),
|
||||
}),
|
||||
_ => InvalidReplCommandSnafu {
|
||||
reason: format!("unknown command '{input}', maybe input is not completed"),
|
||||
}
|
||||
.fail(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplCommand {
|
||||
pub fn help() -> &'static str {
|
||||
r#"
|
||||
Available commands (case insensitive):
|
||||
- 'help': print this help
|
||||
- 'exit' or 'quit': exit the REPL
|
||||
- 'use <your database name>': switch to another database/schema context
|
||||
- Other typed in text will be treated as SQL.
|
||||
You can enter new line while typing, just remember to end it with ';'.
|
||||
"#
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error::InvalidReplCommand;
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
fn test_ok(s: &str, expected: ReplCommand) {
|
||||
let actual: ReplCommand = s.try_into().unwrap();
|
||||
assert_eq!(expected, actual, "'{}'", s);
|
||||
}
|
||||
|
||||
fn test_err(s: &str) {
|
||||
let result: Result<ReplCommand> = s.try_into();
|
||||
assert!(matches!(result, Err(InvalidReplCommand { .. })))
|
||||
}
|
||||
|
||||
test_err("");
|
||||
test_err(" ");
|
||||
test_err("\t");
|
||||
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok(" help", ReplCommand::Help);
|
||||
test_ok(" help ", ReplCommand::Help);
|
||||
test_ok(" HELP ", ReplCommand::Help);
|
||||
test_ok(" Help; ", ReplCommand::Help);
|
||||
test_ok(" help ; ", ReplCommand::Help);
|
||||
|
||||
test_ok("exit", ReplCommand::Exit);
|
||||
test_ok("exit;", ReplCommand::Exit);
|
||||
test_ok("exit ;", ReplCommand::Exit);
|
||||
test_ok("EXIT", ReplCommand::Exit);
|
||||
|
||||
test_ok("quit", ReplCommand::Exit);
|
||||
test_ok("quit;", ReplCommand::Exit);
|
||||
test_ok("quit ;", ReplCommand::Exit);
|
||||
test_ok("QUIT", ReplCommand::Exit);
|
||||
|
||||
test_ok(
|
||||
"use Foo",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
test_ok(
|
||||
" use Foo ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
// ensure that database name is case sensitive
|
||||
test_ok(
|
||||
" use FOO ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "FOO".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
// ensure that we aren't messing with capitalization
|
||||
test_ok(
|
||||
"SELECT * from foo;",
|
||||
ReplCommand::Sql {
|
||||
sql: "SELECT * from foo".to_string(),
|
||||
},
|
||||
);
|
||||
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
|
||||
test_err("insert blah");
|
||||
test_ok(
|
||||
"insert blah;",
|
||||
ReplCommand::Sql {
|
||||
sql: "insert blah".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
112
src/cmd/src/cli/helper.rs
Normal file
112
src/cmd/src/cli/helper.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use rustyline::completion::Completer;
|
||||
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
|
||||
use rustyline::hint::{Hinter, HistoryHinter};
|
||||
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
|
||||
pub(crate) struct RustylineHelper {
|
||||
hinter: HistoryHinter,
|
||||
highlighter: MatchingBracketHighlighter,
|
||||
}
|
||||
|
||||
impl Default for RustylineHelper {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hinter: HistoryHinter {},
|
||||
highlighter: MatchingBracketHighlighter::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl rustyline::Helper for RustylineHelper {}
|
||||
|
||||
impl Validator for RustylineHelper {
|
||||
fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
|
||||
let input = ctx.input();
|
||||
match ReplCommand::try_from(input) {
|
||||
Ok(_) => Ok(ValidationResult::Valid(None)),
|
||||
Err(e) => {
|
||||
if input.trim_end().ends_with(';') {
|
||||
// If line ends with ';', it HAS to be a valid command.
|
||||
Ok(ValidationResult::Invalid(Some(e.to_string())))
|
||||
} else {
|
||||
Ok(ValidationResult::Incomplete)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Hinter for RustylineHelper {
|
||||
type Hint = String;
|
||||
|
||||
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
|
||||
self.hinter.hint(line, pos, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Highlighter for RustylineHelper {
|
||||
fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
|
||||
self.highlighter.highlight(line, pos)
|
||||
}
|
||||
|
||||
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
|
||||
&'s self,
|
||||
prompt: &'p str,
|
||||
default: bool,
|
||||
) -> Cow<'b, str> {
|
||||
self.highlighter.highlight_prompt(prompt, default)
|
||||
}
|
||||
|
||||
fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
|
||||
use nu_ansi_term::Style;
|
||||
Cow::Owned(Style::new().dimmed().paint(hint).to_string())
|
||||
}
|
||||
|
||||
fn highlight_candidate<'c>(
|
||||
&self,
|
||||
candidate: &'c str,
|
||||
completion: rustyline::CompletionType,
|
||||
) -> Cow<'c, str> {
|
||||
self.highlighter.highlight_candidate(candidate, completion)
|
||||
}
|
||||
|
||||
fn highlight_char(&self, line: &str, pos: usize) -> bool {
|
||||
self.highlighter.highlight_char(line, pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl Completer for RustylineHelper {
|
||||
type Candidate = String;
|
||||
|
||||
fn complete(
|
||||
&self,
|
||||
line: &str,
|
||||
pos: usize,
|
||||
ctx: &rustyline::Context<'_>,
|
||||
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
|
||||
// If there is a hint, use that as the auto-complete when user hits `tab`
|
||||
if let Some(hint) = self.hinter.hint(line, pos, ctx) {
|
||||
Ok((pos, vec![hint]))
|
||||
} else {
|
||||
Ok((0, vec![]))
|
||||
}
|
||||
}
|
||||
}
|
||||
273
src/cmd/src/cli/repl.rs
Normal file
273
src/cmd/src/cli/repl.rs
Normal file
@@ -0,0 +1,273 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
use either::Either;
|
||||
use frontend::catalog::FrontendCatalogManager;
|
||||
use frontend::datanode::DatanodeClients;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use partition::route::TableRoutes;
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::QueryEngineState;
|
||||
use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ErrorCompat, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu,
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
/// Current prompt
|
||||
prompt: String,
|
||||
|
||||
/// Client for interacting with GreptimeDB
|
||||
database: Database,
|
||||
|
||||
query_engine: Option<DatafusionQueryEngine>,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
impl Repl {
|
||||
fn print_help(&self) {
|
||||
println!("{}", ReplCommand::help())
|
||||
}
|
||||
|
||||
pub(crate) async fn try_new(cmd: &AttachCommand) -> Result<Self> {
|
||||
let mut rl = Editor::new().context(ReplCreationSnafu)?;
|
||||
|
||||
if !cmd.disable_helper {
|
||||
rl.set_helper(Some(RustylineHelper::default()));
|
||||
|
||||
let history_file = history_file();
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to load history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let client = Client::with_urls([&cmd.grpc_addr]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
let query_engine = if let Some(meta_addr) = &cmd.meta_addr {
|
||||
create_query_engine(meta_addr).await.map(Some)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
rl,
|
||||
prompt: "> ".to_string(),
|
||||
database,
|
||||
query_engine,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the next command
|
||||
fn next_command(&mut self) -> Result<ReplCommand> {
|
||||
match self.rl.readline(&self.prompt) {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
|
||||
// Some sort of real underlying error
|
||||
Err(e) => Err(e).context(ReadlineSnafu),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
|
||||
///
|
||||
/// Inspired / based on repl.rs from InfluxDB IOX
|
||||
pub(crate) async fn run(&mut self) -> Result<()> {
|
||||
println!("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
loop {
|
||||
match self.next_command()? {
|
||||
ReplCommand::Help => {
|
||||
self.print_help();
|
||||
}
|
||||
ReplCommand::UseDatabase { db_name } => {
|
||||
if self.execute_sql(format!("USE {db_name}")).await {
|
||||
println!("Using {db_name}");
|
||||
self.database.set_schema(&db_name);
|
||||
self.prompt = format!("[{db_name}] > ");
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sql(&self, sql: String) -> bool {
|
||||
self.do_execute_sql(sql)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.iter_chain().last().unwrap();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn do_execute_sql(&self, sql: String) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
let output = if let Some(query_engine) = &self.query_engine {
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql)
|
||||
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
||||
|
||||
let query_ctx = Arc::new(QueryContext::with(
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
|
||||
let plan = query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let LogicalPlan::DfPlan(plan) =
|
||||
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
} else {
|
||||
self.database.sql(&sql).await
|
||||
}
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output {
|
||||
Output::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
Output::RecordBatches(x) => Either::Left(x),
|
||||
Output::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
|
||||
match either {
|
||||
Either::Left(recordbatches) => {
|
||||
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
|
||||
if total_rows > 0 {
|
||||
println!(
|
||||
"{}",
|
||||
recordbatches
|
||||
.pretty_print()
|
||||
.context(PrettyPrintRecordBatchesSnafu)?
|
||||
);
|
||||
}
|
||||
println!("Total Rows: {total_rows}")
|
||||
}
|
||||
Either::Right(rows) => println!("Affected Rows: {rows}"),
|
||||
};
|
||||
|
||||
println!("Cost {} ms", (end - start).as_millis());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repl {
|
||||
fn drop(&mut self) {
|
||||
if self.rl.helper().is_some() {
|
||||
let history_file = history_file();
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to save history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
|
||||
fn history_file() -> PathBuf {
|
||||
let mut buf = match std::env::var("HOME") {
|
||||
Ok(home) => PathBuf::from(home),
|
||||
Err(_) => PathBuf::new(),
|
||||
};
|
||||
buf.push(".greptimedb_cli_history");
|
||||
buf
|
||||
}
|
||||
|
||||
async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
let mut meta_client = MetaClientBuilder::default().enable_store().build();
|
||||
meta_client
|
||||
.start([meta_addr])
|
||||
.await
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let backend = Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
});
|
||||
|
||||
let table_routes = Arc::new(TableRoutes::new(meta_client));
|
||||
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
|
||||
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let catalog_list = Arc::new(FrontendCatalogManager::new(
|
||||
backend,
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
));
|
||||
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(state))
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,14 +14,33 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOpts;
|
||||
use datanode::datanode::{
|
||||
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
|
||||
};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -29,8 +48,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,9 +59,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -54,6 +73,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
@@ -63,22 +84,21 @@ struct StartCommand {
|
||||
data_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
|
||||
let opts: DatanodeOptions = self.try_into()?;
|
||||
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
Datanode::new(opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?
|
||||
.start()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +114,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
opts.rpc_addr = addr;
|
||||
}
|
||||
|
||||
if cmd.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = cmd.rpc_hostname;
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_addr = addr;
|
||||
}
|
||||
@@ -103,8 +128,8 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(meta_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = meta_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
@@ -121,12 +146,16 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage = ObjectStoreConfig::File { data_dir };
|
||||
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal_dir = wal_dir;
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
@@ -134,44 +163,96 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use datanode::datanode::ObjectStoreConfig;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let cmd = StartCommand {
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/datanode.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let options: DatanodeOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(4, options.mysql_runtime_size);
|
||||
let MetaClientOpts {
|
||||
assert_eq!(2, options.mysql_runtime_size);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
|
||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
assert!(!options.wal.sync_write);
|
||||
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = options.meta_client_opts.unwrap();
|
||||
} = options.meta_client_options.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(!tcp_nodelay);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
match options.storage {
|
||||
ObjectStoreConfig::File { data_dir } => {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_purge_tasks: 32,
|
||||
},
|
||||
options.compaction
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -205,27 +286,4 @@ mod tests {
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_config() {
|
||||
let dn_opts = DatanodeOptions::try_from(StartCommand {
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/datanode.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(Some(42), dn_opts.node_id);
|
||||
let MetaClientOpts {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = dn_opts.meta_client_opts.unwrap();
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert!(!tcp_nodelay);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use rustyline::error::ReadlineError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -25,18 +26,42 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
StartMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
|
||||
ReadConfig {
|
||||
path: String,
|
||||
@@ -61,6 +86,72 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::auth::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL: {}", source))]
|
||||
ReplCreation {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command: {}", source))]
|
||||
Readline {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client, source: {}", source))]
|
||||
StartMetaClient {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -70,12 +161,29 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::StartMetaClient { source } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,15 +15,16 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::Instance;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::Plugins;
|
||||
use meta_client::MetaClientOpts;
|
||||
use frontend::prom::PromOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -33,6 +34,26 @@ use snafu::ResultExt;
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownFrontendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -40,8 +61,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,9 +72,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -67,6 +88,8 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
prom_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
opentsdb_addr: Option<String>,
|
||||
@@ -87,17 +110,20 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = Instance::try_new_distributed(&opts)
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
instance.set_plugins(plugins.clone());
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
instance
|
||||
.build_servers(&opts, plugins)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,6 +168,9 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr });
|
||||
}
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
@@ -159,8 +188,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
@@ -174,8 +203,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
|
||||
use super::*;
|
||||
@@ -185,6 +216,7 @@ mod tests {
|
||||
let command = StartCommand {
|
||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||
grpc_addr: None,
|
||||
prom_addr: Some("127.0.0.1:4444".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
|
||||
@@ -208,6 +240,7 @@ mod tests {
|
||||
opts.opentsdb_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4321"
|
||||
);
|
||||
assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
|
||||
|
||||
let default_opts = FrontendOptions::default();
|
||||
assert_eq!(
|
||||
@@ -232,17 +265,25 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/frontend.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
@@ -268,6 +309,7 @@ mod tests {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
@@ -287,7 +329,7 @@ mod tests {
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod cli;
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod frontend;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,14 +13,34 @@
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use meta_srv::bootstrap;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.instance
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownMetaServerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +48,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +59,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -56,19 +76,24 @@ struct StartCommand {
|
||||
store_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
selector: Option<String>,
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
let opts: MetaSrvOptions = self.try_into()?;
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
bootstrap::bootstrap_meta_srv(opts)
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,6 +116,17 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
if let Some(addr) = cmd.store_addr {
|
||||
opts.store_addr = addr;
|
||||
}
|
||||
if let Some(selector_type) = &cmd.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
info!("Using {} selector", selector_type);
|
||||
}
|
||||
|
||||
if cmd.use_memory_store {
|
||||
warn!("Using memory store for Meta. Make sure you are in running tests.");
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -98,6 +134,11 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -107,28 +148,42 @@ mod tests {
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
store_addr = "127.0.0.1:2379"
|
||||
datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
use_memory_store = false
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let cmd = StartCommand {
|
||||
bind_addr: None,
|
||||
server_addr: None,
|
||||
store_addr: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/metasrv.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
selector: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(15, options.datanode_lease_secs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,25 +15,31 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::Instance as FeInstance;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use frontend::Plugins;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -44,8 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,15 +61,18 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
@@ -71,15 +80,18 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb_options: Option<OpentsdbOptions>,
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub mode: Mode,
|
||||
pub wal_dir: String,
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub compaction: CompactionConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_memory_catalog: false,
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
@@ -87,10 +99,11 @@ impl Default for StandaloneOptions {
|
||||
opentsdb_options: Some(OpentsdbOptions::default()),
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
wal_dir: "/tmp/greptimedb/wal".to_string(),
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
compaction: CompactionConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -98,6 +111,7 @@ impl Default for StandaloneOptions {
|
||||
impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
FrontendOptions {
|
||||
mode: self.mode,
|
||||
http_options: self.http_options,
|
||||
grpc_options: self.grpc_options,
|
||||
mysql_options: self.mysql_options,
|
||||
@@ -105,21 +119,57 @@ impl StandaloneOptions {
|
||||
opentsdb_options: self.opentsdb_options,
|
||||
influxdb_options: self.influxdb_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
mode: self.mode,
|
||||
meta_client_opts: None,
|
||||
prom_options: self.prom_options,
|
||||
meta_client_options: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
wal_dir: self.wal_dir,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
compaction: self.compaction,
|
||||
procedure: self.procedure,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode
|
||||
.start_instance()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown_instance()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
@@ -129,6 +179,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
prom_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
opentsdb_addr: Option<String>,
|
||||
@@ -149,7 +201,7 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
@@ -169,33 +221,30 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let mut datanode = Datanode::new(dn_opts.clone())
|
||||
let datanode = Datanode::new(dn_opts.clone())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
|
||||
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
datanode
|
||||
.start_instance()
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts, plugins)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
Ok(Instance { datanode, frontend })
|
||||
}
|
||||
}
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
fe_opts: FrontendOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
datanode_instance: InstanceRef,
|
||||
) -> Result<Frontend<FeInstance>> {
|
||||
) -> Result<FeInstance> {
|
||||
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
||||
frontend_instance.set_script_handler(datanode_instance);
|
||||
frontend_instance.set_plugins(plugins.clone());
|
||||
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
|
||||
Ok(frontend_instance)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
@@ -241,6 +290,11 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr })
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
@@ -289,6 +343,7 @@ mod tests {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: Some(format!(
|
||||
@@ -322,6 +377,10 @@ mod tests {
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
|
||||
assert_eq!(
|
||||
None,
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
}
|
||||
|
||||
@@ -330,6 +389,7 @@ mod tests {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
@@ -349,8 +409,15 @@ mod tests {
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = StandaloneOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -29,20 +29,31 @@ mod tests {
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
|
||||
#[serde(default)]
|
||||
struct MockConfig {
|
||||
path: String,
|
||||
port: u32,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Default for MockConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "test".to_string(),
|
||||
port: 0,
|
||||
host: "localhost".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_file() -> Result<()> {
|
||||
let config = MockConfig {
|
||||
@@ -51,7 +62,7 @@ mod tests {
|
||||
host: "greptime.test".to_string(),
|
||||
};
|
||||
|
||||
let dir = TempDir::new("test_from_file").unwrap();
|
||||
let dir = create_temp_dir("test_from_file");
|
||||
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
|
||||
|
||||
let s = toml::to_string(&config).unwrap();
|
||||
@@ -63,6 +74,21 @@ mod tests {
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, config);
|
||||
|
||||
// Only host in file
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config.host, "greptime.test");
|
||||
assert_eq!(loaded_config.port, 0);
|
||||
assert_eq!(loaded_config.path, "test");
|
||||
|
||||
// Truncate the file.
|
||||
let file = File::create(&test_file).unwrap();
|
||||
file.set_len(0).unwrap();
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, MockConfig::default());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
148
src/cmd/tests/cli.rs
Normal file
148
src/cmd/tests/cli.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use rexpect::session::PtyReplSession;
|
||||
|
||||
struct Repl {
|
||||
repl: PtyReplSession,
|
||||
}
|
||||
|
||||
impl Repl {
|
||||
fn send_line(&mut self, line: &str) {
|
||||
self.repl.send_line(line).unwrap();
|
||||
|
||||
// read a line to consume the prompt
|
||||
self.read_line();
|
||||
}
|
||||
|
||||
fn read_line(&mut self) -> String {
|
||||
self.repl.read_line().unwrap()
|
||||
}
|
||||
|
||||
fn read_expect(&mut self, expect: &str) {
|
||||
assert_eq!(self.read_line(), expect);
|
||||
}
|
||||
|
||||
fn read_contains(&mut self, pat: &str) {
|
||||
assert!(self.read_line().contains(pat));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(LFC): Un-ignore this REPL test.
|
||||
// Ignore this REPL test because some logical plans like create database are not supported yet in Datanode.
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_dir = create_temp_dir("data");
|
||||
let wal_dir = create_temp_dir("wal");
|
||||
|
||||
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
bin_path.push("../../target/debug");
|
||||
let bin_path = bin_path.to_str().unwrap();
|
||||
|
||||
let mut datanode = Command::new("./greptime")
|
||||
.current_dir(bin_path)
|
||||
.args([
|
||||
"datanode",
|
||||
"start",
|
||||
"--rpc-addr=0.0.0.0:4321",
|
||||
"--node-id=1",
|
||||
&format!("--data-dir={}", data_dir.path().display()),
|
||||
&format!("--wal-dir={}", wal_dir.path().display()),
|
||||
])
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.unwrap();
|
||||
|
||||
// wait for Datanode actually started
|
||||
std::thread::sleep(Duration::from_secs(3));
|
||||
|
||||
let mut repl_cmd = Command::new("./greptime");
|
||||
repl_cmd.current_dir(bin_path).args([
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
"--grpc-addr=0.0.0.0:4321",
|
||||
// history commands can sneaky into stdout and mess up our tests, so disable it
|
||||
"--disable-helper",
|
||||
]);
|
||||
let pty_session = rexpect::session::spawn_command(repl_cmd, Some(5_000)).unwrap();
|
||||
let repl = PtyReplSession {
|
||||
prompt: "> ".to_string(),
|
||||
pty_session,
|
||||
quit_command: None,
|
||||
echo_on: false,
|
||||
};
|
||||
let repl = &mut Repl { repl };
|
||||
repl.read_expect("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
test_create_database(repl);
|
||||
|
||||
test_use_database(repl);
|
||||
|
||||
test_create_table(repl);
|
||||
|
||||
test_insert(repl);
|
||||
|
||||
test_select(repl);
|
||||
|
||||
datanode.kill().unwrap();
|
||||
datanode.wait().unwrap();
|
||||
}
|
||||
|
||||
fn test_create_database(repl: &mut Repl) {
|
||||
repl.send_line("CREATE DATABASE db;");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_use_database(repl: &mut Repl) {
|
||||
repl.send_line("USE db");
|
||||
repl.read_expect("Total Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
repl.read_expect("Using db");
|
||||
}
|
||||
|
||||
fn test_create_table(repl: &mut Repl) {
|
||||
repl.send_line("CREATE TABLE t(x STRING, ts TIMESTAMP TIME INDEX);");
|
||||
repl.read_expect("Affected Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_insert(repl: &mut Repl) {
|
||||
repl.send_line("INSERT INTO t(x, ts) VALUES ('hello', 1676895812239);");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_select(repl: &mut Repl) {
|
||||
repl.send_line("SELECT * FROM t;");
|
||||
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| x | ts |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| hello | 2023-02-20T12:23:32.239 |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("Total Rows: 1");
|
||||
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user