mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
178 Commits
v0.1.0-alp
...
v0.1.1-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b5d953d56 | ||
|
|
3f6cbc378d | ||
|
|
9619940569 | ||
|
|
ed8252157a | ||
|
|
3e0fb7e75b | ||
|
|
ba3ce436df | ||
|
|
b31a6cb506 | ||
|
|
95090592f0 | ||
|
|
3a527c0fd5 | ||
|
|
819b60ca13 | ||
|
|
7169fe2989 | ||
|
|
b70672be77 | ||
|
|
a4c01f4a3a | ||
|
|
bd98a26cca | ||
|
|
1b4236d698 | ||
|
|
e8cc9b4b29 | ||
|
|
379f581780 | ||
|
|
ff6cfe8e70 | ||
|
|
5a397917c0 | ||
|
|
559880cb84 | ||
|
|
b76b27f3bf | ||
|
|
d4e0dc3685 | ||
|
|
b022556b79 | ||
|
|
bd065ea6e8 | ||
|
|
9a87f5edf8 | ||
|
|
e851b6d019 | ||
|
|
e7b92f24e8 | ||
|
|
4b8db408cf | ||
|
|
98659899c0 | ||
|
|
b1311801da | ||
|
|
f1b65d9b77 | ||
|
|
d5a2a26916 | ||
|
|
8e7e68708f | ||
|
|
9c1118b06d | ||
|
|
3fb93efbd0 | ||
|
|
3fd9c2f144 | ||
|
|
75e48c5f20 | ||
|
|
d402f83442 | ||
|
|
c5c6494e0b | ||
|
|
dc50095af3 | ||
|
|
8cd69f441e | ||
|
|
f52fc9b7d4 | ||
|
|
50d2685365 | ||
|
|
11d45e2918 | ||
|
|
30287e7e41 | ||
|
|
0b3f955ca7 | ||
|
|
4b58a8a18d | ||
|
|
bd377ef329 | ||
|
|
df751c38b4 | ||
|
|
f6e871708a | ||
|
|
819c990a89 | ||
|
|
a8b4e8d933 | ||
|
|
710e2ed133 | ||
|
|
81eab74b90 | ||
|
|
8f67d8ca93 | ||
|
|
4cc3ac37d5 | ||
|
|
b48c851b96 | ||
|
|
fdd17c6eeb | ||
|
|
51641db39e | ||
|
|
98ef74bff4 | ||
|
|
f42acc90c2 | ||
|
|
2df8143ad5 | ||
|
|
fb2e0c7cf3 | ||
|
|
390e9095f6 | ||
|
|
bcd44b90c1 | ||
|
|
c6f2db8ae0 | ||
|
|
e17d5a1c41 | ||
|
|
23092a5208 | ||
|
|
4bbad6ab1e | ||
|
|
6833b405d9 | ||
|
|
aaaf24143d | ||
|
|
9161796dfa | ||
|
|
68b231987c | ||
|
|
6e9964ac97 | ||
|
|
6afd79cab8 | ||
|
|
4e88a01638 | ||
|
|
af1f8d6101 | ||
|
|
a9c8584c98 | ||
|
|
7787cfdd42 | ||
|
|
2f39a77137 | ||
|
|
16f86a9d77 | ||
|
|
5ec1a7027b | ||
|
|
ddbc97befb | ||
|
|
a8c2b35ec6 | ||
|
|
04afee216e | ||
|
|
5533040be7 | ||
|
|
34fdba77df | ||
|
|
cd0d58cb24 | ||
|
|
8b869642b8 | ||
|
|
a33d1e9863 | ||
|
|
dfe7bfb07f | ||
|
|
5d1f231004 | ||
|
|
40eec85cf7 | ||
|
|
e17d564bf0 | ||
|
|
301656d568 | ||
|
|
a19dee1dc0 | ||
|
|
75b8afe043 | ||
|
|
e2904b99ac | ||
|
|
de0b8aa0a0 | ||
|
|
63e396e9e9 | ||
|
|
4d8276790b | ||
|
|
374acc8830 | ||
|
|
8491f65093 | ||
|
|
5e6f340dd9 | ||
|
|
7b98718cd9 | ||
|
|
0f7e5a2fb2 | ||
|
|
9ad6c45913 | ||
|
|
7fe417e740 | ||
|
|
c1a9f84c7f | ||
|
|
be897efd01 | ||
|
|
c06e04afbb | ||
|
|
e77a7f253c | ||
|
|
7d6f4cd88b | ||
|
|
83ac6598b6 | ||
|
|
4c925e0079 | ||
|
|
c6128ec0a4 | ||
|
|
7c34b009ec | ||
|
|
70edd4d55b | ||
|
|
6beea73590 | ||
|
|
c0d3533d10 | ||
|
|
9989a8c192 | ||
|
|
19dd8b1246 | ||
|
|
1e9918ddf9 | ||
|
|
4ce62f850b | ||
|
|
83d57f9111 | ||
|
|
803b7f0633 | ||
|
|
37ca5ba380 | ||
|
|
c1d32bdf2b | ||
|
|
83509f31f4 | ||
|
|
926022e14c | ||
|
|
2f2609d8c6 | ||
|
|
ecadbc1435 | ||
|
|
afac885c10 | ||
|
|
5d62e193bd | ||
|
|
7d77913e88 | ||
|
|
3f45a0d337 | ||
|
|
a1e97c990f | ||
|
|
4ae63b7089 | ||
|
|
b0925d94ed | ||
|
|
fc9276c79d | ||
|
|
184ca78a4d | ||
|
|
ebbf1e43b5 | ||
|
|
54fe81dad9 | ||
|
|
af935671b2 | ||
|
|
74adb077bc | ||
|
|
54c7a8be02 | ||
|
|
ea5146762a | ||
|
|
788b5362a1 | ||
|
|
028a69e349 | ||
|
|
9a30ba00c4 | ||
|
|
8149932bad | ||
|
|
89e4084af4 | ||
|
|
39df25a8f6 | ||
|
|
b2ad0e972b | ||
|
|
18e6740ac9 | ||
|
|
a7dc86ffe5 | ||
|
|
71482b38d7 | ||
|
|
dc9b5339bf | ||
|
|
5e05c8f884 | ||
|
|
aafc26c788 | ||
|
|
64243e3a7d | ||
|
|
36a13dafb7 | ||
|
|
637837ae44 | ||
|
|
ae8afd3711 | ||
|
|
3db8f95169 | ||
|
|
43aefc5d74 | ||
|
|
b33937f48e | ||
|
|
9bc4c0d9c7 | ||
|
|
302d7ec41b | ||
|
|
cc46194f29 | ||
|
|
5dfc24e4f6 | ||
|
|
4987136850 | ||
|
|
6960739b3d | ||
|
|
49d83abc0c | ||
|
|
ecb71f81be | ||
|
|
6f5639fccd | ||
|
|
1e9d09099e | ||
|
|
daad38360f |
@@ -1,2 +1,5 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
@@ -2,3 +2,9 @@
|
||||
GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
GT_OSS_ACCESS_KEY=OSS access key
|
||||
GT_OSS_ENDPOINT=OSS endpoint
|
||||
|
||||
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "breaking change",
|
||||
"color": "D93F0B"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(?:(?!!:).)*$",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
],
|
||||
"alwaysPassCI": true
|
||||
}
|
||||
}
|
||||
18
.github/pr-title-checker-config.json
vendored
18
.github/pr-title-checker-config.json
vendored
@@ -1,10 +1,12 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*",
|
||||
"ignoreLabels" : ["ignore-title"]
|
||||
}
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
42
.github/workflows/apidoc.yml
vendored
Normal file
42
.github/workflows/apidoc.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; url='greptime/'" />
|
||||
</head>
|
||||
<body></body></html>
|
||||
EOF
|
||||
- name: Publish dist directory
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
70
.github/workflows/coverage.yml
vendored
70
.github/workflows/coverage.yml
vendored
@@ -1,70 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Code coverage
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
76
.github/workflows/develop.yml
vendored
76
.github/workflows/develop.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
@@ -23,15 +24,15 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: crate-ci/typos@v1.0.4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
@@ -115,6 +116,7 @@ jobs:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
@@ -125,8 +127,25 @@ jobs:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo run --bin sqlness-runner
|
||||
run: cargo sqlness && ls /tmp
|
||||
- name: Upload sqlness logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
@@ -165,3 +184,52 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
16
.github/workflows/doc-issue.yml
vendored
16
.github/workflows/doc-issue.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Create Issue in docs repo on doc related changes
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
@@ -23,3 +23,17 @@ jobs:
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
|
||||
55
.github/workflows/docs.yml
vendored
Normal file
55
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
|
||||
# To pass the required status check, see:
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@main
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
|
||||
9
.github/workflows/pr-title-checker.yml
vendored
9
.github/workflows/pr-title-checker.yml
vendored
@@ -18,3 +18,12 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||
|
||||
102
.github/workflows/release.yml
vendored
102
.github/workflows/release.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
@@ -28,18 +28,24 @@ jobs:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-latest-16-cores
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-latest-16-cores
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: true
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: true
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: true
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -69,6 +75,25 @@ jobs:
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -113,6 +138,7 @@ jobs:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -155,34 +181,11 @@ jobs:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -217,8 +220,37 @@ jobs:
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Build and push
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
id: unzip-amd64
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
@@ -229,3 +261,17 @@ jobs:
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'failure' # Only build and push amd64 platform if unzip-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Welcome!
|
||||
# Welcome 👋
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
@@ -50,34 +50,33 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed.
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||
|
||||
1. Install `pre-commit`
|
||||
```
|
||||
$ pip install pre-commit
|
||||
```
|
||||
or
|
||||
```
|
||||
$ brew install pre-commit
|
||||
```
|
||||
|
||||
pip install pre-commit
|
||||
|
||||
or
|
||||
|
||||
brew install pre-commit
|
||||
|
||||
2. Install the `pre-commit` hooks
|
||||
```
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
|
||||
$ pre-commit install --hook-type commit-msg
|
||||
pre-commit installed at .git/hooks/commit-msg
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
|
||||
$ pre-commit install --hook-type pre-push
|
||||
pre-commit installed at .git/hooks/pre-pus
|
||||
```
|
||||
$ pre-commit install --hook-type commit-msg
|
||||
pre-commit installed at .git/hooks/commit-msg
|
||||
|
||||
now `pre-commit` will run automatically on `git commit`.
|
||||
$ pre-commit install --hook-type pre-push
|
||||
pre-commit installed at .git/hooks/pre-push
|
||||
|
||||
Now, `pre-commit` will run automatically on `git commit`.
|
||||
|
||||
### Title
|
||||
|
||||
@@ -102,10 +101,12 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
## Community
|
||||
|
||||
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
- [GreptimeDB Docs](https://greptime.com/docs)
|
||||
- [Learn GreptimeDB](https://greptime.com/products/db)
|
||||
- [Greptime Inc. Website](https://greptime.com)
|
||||
|
||||
1934
Cargo.lock
generated
1934
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
40
Cargo.toml
40
Cargo.toml
@@ -12,11 +12,14 @@ members = [
|
||||
"src/common/function-macro",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/procedure",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
@@ -26,6 +29,7 @@ members = [
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/object-store",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
"src/script",
|
||||
@@ -35,6 +39,7 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/table-procedure",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
@@ -45,28 +50,33 @@ edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = "29.0"
|
||||
arrow-flight = "29.0"
|
||||
arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
arrow = { version = "33.0", features = ["pyarrow"] }
|
||||
arrow-array = "33.0"
|
||||
arrow-flight = "33.0"
|
||||
arrow-schema = { version = "33.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "29.0"
|
||||
parquet = "33.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.28"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tonic = "0.8"
|
||||
sqlparser = "0.30"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
tonic = { version = "0.8", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
@@ -74,6 +84,6 @@ debug = true
|
||||
[profile.weekly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
lto = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
6
Makefile
6
Makefile
@@ -19,6 +19,10 @@ clean: ## Clean the project.
|
||||
fmt: ## Format all the Rust code.
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
@@ -35,7 +39,7 @@ integration-test: ## Run integation test.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo run --bin sqlness-runner
|
||||
cargo sqlness
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
|
||||
11
README.md
11
README.md
@@ -61,6 +61,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
|
||||
in cpython): this install a Python shared library required for running python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -153,6 +159,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Java
|
||||
@@ -169,7 +178,7 @@ For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam
|
||||
|
||||
## Community
|
||||
|
||||
Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
|
||||
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||
community, please check out:
|
||||
|
||||
@@ -27,12 +27,11 @@ use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::{Client, Database};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const DATABASE_NAME: &str = "greptime";
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
@@ -100,7 +99,6 @@ async fn write_data(
|
||||
let record_batch = record_batch.unwrap();
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
@@ -210,6 +208,7 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
| DataType::Dictionary(_, _)
|
||||
| DataType::Decimal128(_, _)
|
||||
| DataType::Decimal256(_, _)
|
||||
| DataType::RunEndEncoded(_, _)
|
||||
| DataType::Map(_, _) => todo!(),
|
||||
}
|
||||
}
|
||||
@@ -424,7 +423,7 @@ fn main() {
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DATABASE_NAME, client);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
|
||||
@@ -8,3 +8,5 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -1,25 +1,52 @@
|
||||
node_id = 42
|
||||
mode = 'distributed'
|
||||
rpc_addr = '127.0.0.1:3001'
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = '127.0.0.1:4406'
|
||||
mysql_runtime_size = 4
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# Whether to use in-memory catalog, see `standalone.example.toml`.
|
||||
enable_memory_catalog = false
|
||||
# The datanode identifier, should be unique.
|
||||
node_id = 42
|
||||
# gRPC server address, "127.0.0.1:3001" by default.
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
# Hostname of this node.
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# MySQL server address, "127.0.0.1:4406" by default.
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
# The number of MySQL server worker threads, 2 by default.
|
||||
mysql_runtime_size = 2
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 5000
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
# WAL options, see `standalone.example.toml`.
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = '1GB'
|
||||
purge_interval = '10m'
|
||||
purge_threshold = '50GB'
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[meta_client_opts]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = false
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
# [procedure.store]
|
||||
# type = 'File'
|
||||
# data_dir = '/tmp/greptimedb/procedure/'
|
||||
|
||||
@@ -1,12 +1,58 @@
|
||||
mode = 'distributed'
|
||||
datanode_rpc_addr = '127.0.0.1:3001'
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
|
||||
[meta_client_opts]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc_options]
|
||||
addr = "127.0.0.1:4001"
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options, see `standalone.example.toml`.
|
||||
[mysql_options]
|
||||
addr = "127.0.0.1:4002"
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options, see `standalone.example.toml`.
|
||||
[mysql_options.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres_options]
|
||||
addr = "127.0.0.1:4003"
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||
[postgres_options.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb_options]
|
||||
addr = "127.0.0.1:4242"
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||
[influxdb_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prometheus_options]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prom_options]
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = false
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -1,4 +1,15 @@
|
||||
bind_addr = '127.0.0.1:3002'
|
||||
server_addr = '127.0.0.1:3002'
|
||||
store_addr = '127.0.0.1:2379'
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode lease in seconds, 15 seconds by default.
|
||||
datanode_lease_secs = 15
|
||||
# Datanode selector type.
|
||||
# - "LeaseBased" (default value).
|
||||
# - "LoadBased"
|
||||
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
|
||||
@@ -1,44 +1,116 @@
|
||||
node_id = 0
|
||||
mode = 'standalone'
|
||||
# Node running mode, "standalone" or "distributed".
|
||||
mode = "standalone"
|
||||
# Whether to use in-memory catalog, `false` by default.
|
||||
enable_memory_catalog = false
|
||||
|
||||
# HTTP server options.
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
# Server address, "127.0.0.1:4000" by default.
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = '1GB'
|
||||
purge_interval = '10m'
|
||||
purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
# gRPC server options.
|
||||
[grpc_options]
|
||||
addr = '127.0.0.1:4001'
|
||||
# Server address, "127.0.0.1:4001" by default.
|
||||
addr = "127.0.0.1:4001"
|
||||
# The number of server worker threads, 8 by default.
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options.
|
||||
[mysql_options]
|
||||
addr = '127.0.0.1:4002'
|
||||
# Server address, "127.0.0.1:4002" by default.
|
||||
addr = "127.0.0.1:4002"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
[influxdb_options]
|
||||
enable = true
|
||||
|
||||
[opentsdb_options]
|
||||
addr = '127.0.0.1:4242'
|
||||
enable = true
|
||||
runtime_size = 2
|
||||
|
||||
[prometheus_options]
|
||||
enable = true
|
||||
# MySQL server TLS options.
|
||||
[mysql_options.tls]
|
||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
# - "disable" (default value)
|
||||
# - "prefer"
|
||||
# - "require"
|
||||
# - "verify-ca"
|
||||
# - "verify-full"
|
||||
mode = "disable"
|
||||
# Certificate file path.
|
||||
cert_path = ""
|
||||
# Private key file path.
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options.
|
||||
[postgres_options]
|
||||
addr = '127.0.0.1:4003'
|
||||
# Server address, "127.0.0.1:4003" by default.
|
||||
addr = "127.0.0.1:4003"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
check_pwd = false
|
||||
|
||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
||||
[postgres_options.tls]
|
||||
# TLS mode.
|
||||
mode = "disable"
|
||||
# certificate file path.
|
||||
cert_path = ""
|
||||
# private key file path.
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb_options]
|
||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
||||
addr = "127.0.0.1:4242"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options.
|
||||
[influxdb_options]
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options.
|
||||
[prometheus_options]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prom protocol options.
|
||||
[prom_options]
|
||||
# Prometheus API server address, "127.0.0.1:4004" by default.
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory.
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "1GB"
|
||||
# WAL purge threshold in bytes.
|
||||
purge_threshold = "50GB"
|
||||
# WAL purge interval in seconds.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options.
|
||||
[compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Procedure storage options.
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
# # Storage type.
|
||||
# type = "File"
|
||||
# # Procedure data path.
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
|
||||
@@ -9,7 +9,10 @@ RUN apt-get update && apt-get install -y \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& pip install pyarrow
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
@@ -149,10 +149,10 @@ inputs:
|
||||
- title: 'Series Normalize: \noffset = 0'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: filter
|
||||
inputs:
|
||||
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: scan -->
|
||||
|
||||

|
||||
|
||||
151
docs/rfcs/2023-01-03-procedure-framework.md
Normal file
151
docs/rfcs/2023-01-03-procedure-framework.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
Feature Name: "procedure-framework"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/286
|
||||
Date: 2023-01-03
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Procedure Framework
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
A framework for executing operations in a fault-tolerant manner.
|
||||
|
||||
# Motivation
|
||||
Some operations in GreptimeDB require multiple steps to implement. For example, creating a table needs:
|
||||
1. Check whether the table exists
|
||||
2. Create the table in the table engine
|
||||
1. Create a region for the table in the storage engine
|
||||
2. Persist the metadata of the table to the table manifest
|
||||
3. Add the table to the catalog manager
|
||||
|
||||
If the node dies or restarts in the middle of creating a table, it could leave the system in an inconsistent state. The procedure framework, inspired by [Apache HBase's ProcedureV2 framework](https://github.com/apache/hbase/blob/bfc9fc9605de638785435e404430a9408b99a8d0/src/main/asciidoc/_chapters/pv2.adoc) and [Apache Accumulo’s FATE framework](https://accumulo.apache.org/docs/2.x/administration/fate), aims to provide a unified way to implement multi-step operations that is tolerant to failure.
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
The procedure framework consists of the following primary components:
|
||||
- A `Procedure` represents an operation or a set of operations to be performed step-by-step
|
||||
- `ProcedureManager`, the runtime to run `Procedures`. It executes the submitted procedures, stores procedures' states to the `ProcedureStore` and restores procedures from `ProcedureStore` while the database restarts.
|
||||
- `ProcedureStore` is a storage layer for persisting the procedure state
|
||||
|
||||
|
||||
## Procedures
|
||||
The `ProcedureManager` keeps calling `Procedure::execute()` until the Procedure is done, so the operation of the Procedure should be [idempotent](https://developer.mozilla.org/en-US/docs/Glossary/Idempotent): it needs to be able to undo or replay a partial execution of itself.
|
||||
|
||||
```rust
|
||||
trait Procedure {
|
||||
fn execute(&mut self, ctx: &Context) -> Result<Status>;
|
||||
|
||||
fn dump(&self) -> Result<String>;
|
||||
|
||||
fn rollback(&self) -> Result<()>;
|
||||
|
||||
// other methods...
|
||||
}
|
||||
```
|
||||
|
||||
The `Status` is an enum that has the following variants:
|
||||
```rust
|
||||
enum Status {
|
||||
Executing {
|
||||
persist: bool,
|
||||
},
|
||||
Suspended {
|
||||
subprocedures: Vec<ProcedureWithId>,
|
||||
persist: bool,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
```
|
||||
|
||||
A call to `execute()` can result in the following possibilities:
|
||||
- `Ok(Status::Done)`: we are done
|
||||
- `Ok(Status::Executing { .. })`: there are remaining steps to do
|
||||
- `Ok(Status::Suspend { sub_procedure, .. })`: execution is suspended and can be resumed later after the sub-procedure is done.
|
||||
- `Err(e)`: error occurs during execution and the procedure is unable to proceed anymore.
|
||||
|
||||
Users need to assign a unique `ProcedureId` to the procedure and the procedure can get this id via the `Context`. The `ProcedureId` is typically a UUID.
|
||||
|
||||
```rust
|
||||
struct Context {
|
||||
id: ProcedureId,
|
||||
// other fields ...
|
||||
}
|
||||
```
|
||||
|
||||
The `ProcedureManager` calls `Procedure::dump()` to serialize the internal state of the procedure and writes to the `ProcedureStore`. The `Status` has a field `persist` to tell the `ProcedureManager` whether it needs persistence.
|
||||
|
||||
## Sub-procedures
|
||||
A procedure may need to create some sub-procedures to process its subtasks. For example, creating a distributed table with multiple regions (partitions) needs to set up the regions in each node, thus the parent procedure should instantiate a sub-procedure for each region. The `ProcedureManager` makes sure that the parent procedure does not proceed till all sub-procedures are successfully finished.
|
||||
|
||||
The procedure can submit sub-procedures to the `ProcedureManager` by returning `Status::Suspended`. It needs to assign a procedure id to each procedure manually so it can track the status of the sub-procedures.
|
||||
```rust
|
||||
struct ProcedureWithId {
|
||||
id: ProcedureId,
|
||||
procedure: BoxedProcedure,
|
||||
}
|
||||
```
|
||||
|
||||
## ProcedureStore
|
||||
We might need to provide two different ProcedureStore implementations:
|
||||
- In standalone mode, it stores data on the local disk.
|
||||
- In distributed mode, it stores data on the meta server or the object store service.
|
||||
|
||||
These implementations should share the same storage structure. They store each procedure's state in a unique path based on the procedure id:
|
||||
|
||||
```
|
||||
Sample paths:
|
||||
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.step
|
||||
/procedures/{PROCEDURE_ID}/000003.commit
|
||||
```
|
||||
|
||||
`ProcedureStore` behaves like a WAL. Before performing each step, the `ProcedureManager` can write the procedure's current state to the ProcedureStore, which stores the state in the `.step` file. The `000001` in the path is a monotonic increasing sequence of the step. After the procedure is done, the `ProcedureManager` puts a `.commit` file to indicate the procedure is finished (committed).
|
||||
|
||||
The `ProcedureManager` can remove the procedure's files once the procedure is done, but it needs to leave the `.commit` as the last file to remove in case of failure during removal.
|
||||
|
||||
## ProcedureManager
|
||||
`ProcedureManager` executes procedures submitted to it.
|
||||
|
||||
```rust
|
||||
trait ProcedureManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
It supports the following operations:
|
||||
- Register a `ProcedureLoader` by the type name of the `Procedure`.
|
||||
- Submit a `Procedure` to the manager and execute it.
|
||||
|
||||
When `ProcedureManager` starts, it loads procedures from the `ProcedureStore` and restores the procedures by the `ProcedureLoader`. The manager stores the type name from `Procedure::type_name()` with the data from `Procedure::dump()` in the `.step` file and uses the type name to find a `ProcedureLoader` to recover the procedure from its data.
|
||||
|
||||
```rust
|
||||
type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Send>;
|
||||
```
|
||||
|
||||
## Rollback
|
||||
The rollback step is supposed to clean up the resources created during the execute() step. When a procedure has failed, the `ProcedureManager` puts a `rollback` file and calls the `Procedure::rollback()` method.
|
||||
|
||||
|
||||
```text
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.rollback
|
||||
```
|
||||
|
||||
Rollback is complicated to implement so some procedures might not support rollback or only provide a best-efforts approach.
|
||||
|
||||
## Locking
|
||||
The `ProcedureManager` can provide a locking mechanism that gives a procedure read/write access to a database object such as a table so other procedures are unable to modify the same table while the current one is executing.
|
||||
|
||||
# Drawbacks
|
||||
The `Procedure` framework introduces additional complexity and overhead to our database.
|
||||
- To execute a `Procedure`, we need to write to the `ProcedureStore` multiple times, which may slow down the server
|
||||
- We need to rewrite the logic of creating/dropping/altering a table using the procedure framework
|
||||
|
||||
# Alternatives
|
||||
Another approach is to tolerate failure during execution and allow users to retry the operation until it succeeds. But we still need to:
|
||||
- Make each step idempotent
|
||||
- Record the status in some place to check whether we are done
|
||||
92
docs/rfcs/2023-02-01-table-compaction.md
Normal file
92
docs/rfcs/2023-02-01-table-compaction.md
Normal file
@@ -0,0 +1,92 @@
|
||||
---
|
||||
Feature Name: "table-compaction"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/930
|
||||
Date: 2023-02-01
|
||||
Author: "Lei, HUANG <mrsatangel@gmail.com>"
|
||||
---
|
||||
|
||||
# Table Compaction
|
||||
|
||||
---
|
||||
|
||||
## Background
|
||||
|
||||
GreptimeDB uses an LSM-tree based storage engine that flushes memtables to SSTs for persistence.
|
||||
But currently it only supports level 0. SST files in level 0 does not guarantee to contain only rows with disjoint time ranges.
|
||||
That is to say, different SST files in level 0 may contain overlapped timestamps.
|
||||
The consequence is, in order to retrieve rows in some time range, all files need to be scanned, which brings a lot of IO overhead.
|
||||
|
||||
Also, just like other LSMT engines, delete/update to existing primary keys are converted to new rows with delete/update mark and appended to SSTs on flushing.
|
||||
We need to merge the operations to same primary keys so that we don't have to go through all SST files to find the final state of these primary keys.
|
||||
|
||||
## Goal
|
||||
|
||||
Implement a compaction framework to:
|
||||
- maintain SSTs in timestamp order to accelerate queries with timestamp condition;
|
||||
- merge rows with same primary key;
|
||||
- purge expired SSTs;
|
||||
- accommodate other tasks like data rollup/indexing.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Table compaction involves following components:
|
||||
- Compaction scheduler: run compaction tasks, limit the consumed resources;
|
||||
- Compaction strategy: find the SSTs to compact and determine the output files of compaction.
|
||||
- Compaction task: read the rows from input SSTs and write to the output files.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Compaction scheduler
|
||||
|
||||
`CompactionScheduler` is an executor that continuously polls and executes compaction request from a task queue.
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait CompactionScheduler {
|
||||
/// Schedules a compaction task.
|
||||
async fn schedule(&self, task: CompactionRequest) -> Result<()>;
|
||||
|
||||
/// Stops compaction scheduler.
|
||||
async fn stop(&self) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Compaction triggering
|
||||
|
||||
Currently, we can check whether to compact tables when memtable is flushed to SST.
|
||||
|
||||
https://github.com/GreptimeTeam/greptimedb/blob/4015dd80752e1e6aaa3d7cacc3203cb67ed9be6d/src/storage/src/flush.rs#L245
|
||||
|
||||
|
||||
### Compaction strategy
|
||||
|
||||
`CompactionStrategy` defines how to pick SSTs in all levels for compaction.
|
||||
|
||||
```rust
|
||||
pub trait CompactionStrategy {
|
||||
fn pick(
|
||||
&self,
|
||||
ctx: CompactionContext,
|
||||
levels: &LevelMetas,
|
||||
) -> Result<CompactionTask>;
|
||||
}
|
||||
```
|
||||
|
||||
The most suitable compaction strategy for time-series scenario would be
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
|
||||
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
|
||||
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
|
||||
SSTs from different time windows are neven compacted together.
|
||||
That strategy guarantees SSTs in each level are mainly sorted in timestamp order which boosts queries with
|
||||
explicit timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Currently, GreptimeDB's storage engine [only support two levels](https://github.com/GreptimeTeam/greptimedb/blob/43aefc5d74dfa73b7819cae77b7eb546d8534a41/src/storage/src/sst.rs#L32).
|
||||
For level 0, we can start with a simple time-window based leveled compaction, which reads from all SSTs in level 0,
|
||||
align them to time windows with a fixed duration, merge them with SSTs in level 1 within the same time window
|
||||
to ensure there is only one sorted run in level 1.
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2022-12-20
|
||||
2
rust-toolchain.toml
Normal file
2
rust-toolchain.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-02-26"
|
||||
@@ -10,6 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ad0187295035e83f76272da553453e649b7570de" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message Column {
|
||||
string column_name = 1;
|
||||
|
||||
enum SemanticType {
|
||||
TAG = 0;
|
||||
FIELD = 1;
|
||||
TIMESTAMP = 2;
|
||||
}
|
||||
SemanticType semantic_type = 2;
|
||||
|
||||
message Values {
|
||||
repeated int32 i8_values = 1;
|
||||
repeated int32 i16_values = 2;
|
||||
repeated int32 i32_values = 3;
|
||||
repeated int64 i64_values = 4;
|
||||
|
||||
repeated uint32 u8_values = 5;
|
||||
repeated uint32 u16_values = 6;
|
||||
repeated uint32 u32_values = 7;
|
||||
repeated uint64 u64_values = 8;
|
||||
|
||||
repeated float f32_values = 9;
|
||||
repeated double f64_values = 10;
|
||||
|
||||
repeated bool bool_values = 11;
|
||||
repeated bytes binary_values = 12;
|
||||
repeated string string_values = 13;
|
||||
|
||||
repeated int32 date_values = 14;
|
||||
repeated int64 datetime_values = 15;
|
||||
repeated int64 ts_second_values = 16;
|
||||
repeated int64 ts_millisecond_values = 17;
|
||||
repeated int64 ts_microsecond_values = 18;
|
||||
repeated int64 ts_nanosecond_values = 19;
|
||||
}
|
||||
// The array of non-null values in this column.
|
||||
//
|
||||
// For example: suppose there is a column "foo" that contains some int32 values (1, 2, 3, 4, 5, null, 7, 8, 9, null);
|
||||
// column:
|
||||
// column_name: foo
|
||||
// semantic_type: Tag
|
||||
// values: 1, 2, 3, 4, 5, 7, 8, 9
|
||||
// null_masks: 00100000 00000010
|
||||
Values values = 3;
|
||||
|
||||
// Mask maps the positions of null values.
|
||||
// If a bit in null_mask is 1, it indicates that the column value at that position is null.
|
||||
bytes null_mask = 4;
|
||||
|
||||
// Helpful in creating vector from column.
|
||||
ColumnDataType datatype = 5;
|
||||
}
|
||||
|
||||
message ColumnDef {
|
||||
string name = 1;
|
||||
ColumnDataType datatype = 2;
|
||||
bool is_nullable = 3;
|
||||
bytes default_constraint = 4;
|
||||
}
|
||||
|
||||
enum ColumnDataType {
|
||||
BOOLEAN = 0;
|
||||
INT8 = 1;
|
||||
INT16 = 2;
|
||||
INT32 = 3;
|
||||
INT64 = 4;
|
||||
UINT8 = 5;
|
||||
UINT16 = 6;
|
||||
UINT32 = 7;
|
||||
UINT64 = 8;
|
||||
FLOAT32 = 9;
|
||||
FLOAT64 = 10;
|
||||
BINARY = 11;
|
||||
STRING = 12;
|
||||
DATE = 13;
|
||||
DATETIME = 14;
|
||||
TIMESTAMP_SECOND = 15;
|
||||
TIMESTAMP_MILLISECOND = 16;
|
||||
TIMESTAMP_MICROSECOND = 17;
|
||||
TIMESTAMP_NANOSECOND = 18;
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/ddl.proto";
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
message GreptimeRequest {
|
||||
oneof request {
|
||||
InsertRequest insert = 1;
|
||||
QueryRequest query = 2;
|
||||
DdlRequest ddl = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message QueryRequest {
|
||||
oneof query {
|
||||
string sql = 1;
|
||||
bytes logical_plan = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InsertRequest {
|
||||
string schema_name = 1;
|
||||
string table_name = 2;
|
||||
|
||||
// Data is represented here.
|
||||
repeated Column columns = 3;
|
||||
|
||||
// The row_count of all columns, which include null and non-null values.
|
||||
//
|
||||
// Note: the row_count of all columns in a InsertRequest must be same.
|
||||
uint32 row_count = 4;
|
||||
|
||||
// The region number of current insert request.
|
||||
uint32 region_number = 5;
|
||||
}
|
||||
|
||||
message FlightDataExt {
|
||||
uint32 affected_rows = 1;
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
// "Data Definition Language" requests, that create, modify or delete the database structures but not the data.
|
||||
// `DdlRequest` could carry more information than plain SQL, for example, the "table_id" in `CreateTableExpr`.
|
||||
// So create a new DDL expr if you need it.
|
||||
message DdlRequest {
|
||||
oneof expr {
|
||||
CreateDatabaseExpr create_database = 1;
|
||||
CreateTableExpr create_table = 2;
|
||||
AlterExpr alter = 3;
|
||||
DropTableExpr drop_table = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message CreateTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
string desc = 4;
|
||||
repeated ColumnDef column_defs = 5;
|
||||
string time_index = 6;
|
||||
repeated string primary_keys = 7;
|
||||
bool create_if_not_exists = 8;
|
||||
map<string, string> table_options = 9;
|
||||
TableId table_id = 10;
|
||||
repeated uint32 region_ids = 11;
|
||||
}
|
||||
|
||||
message AlterExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
oneof kind {
|
||||
AddColumns add_columns = 4;
|
||||
DropColumns drop_columns = 5;
|
||||
RenameTable rename_table = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message DropTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message CreateDatabaseExpr {
|
||||
//TODO(hl): maybe rename to schema_name?
|
||||
string database_name = 1;
|
||||
bool create_if_not_exists = 2;
|
||||
}
|
||||
|
||||
message AddColumns {
|
||||
repeated AddColumn add_columns = 1;
|
||||
}
|
||||
|
||||
message DropColumns {
|
||||
repeated DropColumn drop_columns = 1;
|
||||
}
|
||||
|
||||
message RenameTable {
|
||||
string new_table_name = 1;
|
||||
}
|
||||
|
||||
message AddColumn {
|
||||
ColumnDef column_def = 1;
|
||||
bool is_key = 2;
|
||||
}
|
||||
|
||||
message DropColumn {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message TableId {
|
||||
uint32 id = 1;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
message RequestHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which be sent to.
|
||||
uint64 cluster_id = 2;
|
||||
// member_id is the ID of the sender server.
|
||||
uint64 member_id = 3;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which sent the response.
|
||||
uint64 cluster_id = 2;
|
||||
Error error = 3;
|
||||
}
|
||||
|
||||
message Error {
|
||||
int32 code = 1;
|
||||
string err_msg = 2;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
uint64 id = 1;
|
||||
string addr = 2;
|
||||
}
|
||||
|
||||
message TableName {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message TimeInterval {
|
||||
// The unix timestamp in millis of the start of this period.
|
||||
uint64 start_timestamp_millis = 1;
|
||||
// The unix timestamp in millis of the end of this period.
|
||||
uint64 end_timestamp_millis = 2;
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
// key is the key in bytes. An empty key is not allowed.
|
||||
bytes key = 1;
|
||||
// value is the value held by the key, in bytes.
|
||||
bytes value = 2;
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Heartbeat {
|
||||
// Heartbeat, there may be many contents of the heartbeat, such as:
|
||||
// 1. Metadata to be registered to meta server and discoverable by other nodes.
|
||||
// 2. Some performance metrics, such as Load, CPU usage, etc.
|
||||
// 3. The number of computing tasks being executed.
|
||||
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
|
||||
|
||||
// Ask leader's endpoint.
|
||||
rpc AskLeader(AskLeaderRequest) returns (AskLeaderResponse) {}
|
||||
}
|
||||
|
||||
message HeartbeatRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// Self peer
|
||||
Peer peer = 2;
|
||||
// Leader node
|
||||
bool is_leader = 3;
|
||||
// Actually reported time interval
|
||||
TimeInterval report_interval = 4;
|
||||
// Node stat
|
||||
NodeStat node_stat = 5;
|
||||
// Region stats on this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
// Follower nodes and stats, empty on follower nodes
|
||||
repeated ReplicaStat replica_stats = 7;
|
||||
}
|
||||
|
||||
message NodeStat {
|
||||
// The read capacity units during this period
|
||||
int64 rcus = 1;
|
||||
// The write capacity units during this period
|
||||
int64 wcus = 2;
|
||||
// How many tables on this node
|
||||
int64 table_num = 3;
|
||||
// How many regions on this node
|
||||
int64 region_num = 4;
|
||||
|
||||
double cpu_usage = 5;
|
||||
double load = 6;
|
||||
// Read disk IO on this node
|
||||
double read_io_rate = 7;
|
||||
// Write disk IO on this node
|
||||
double write_io_rate = 8;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
// The read capacity units during this period
|
||||
int64 rcus = 3;
|
||||
// The write capacity units during this period
|
||||
int64 wcus = 4;
|
||||
// Approximate bytes of this region
|
||||
int64 approximate_bytes = 5;
|
||||
// Approximate number of rows in this region
|
||||
int64 approximate_rows = 6;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message ReplicaStat {
|
||||
Peer peer = 1;
|
||||
bool in_sync = 2;
|
||||
bool is_learner = 3;
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated bytes payload = 2;
|
||||
}
|
||||
|
||||
message AskLeaderRequest {
|
||||
RequestHeader header = 1;
|
||||
}
|
||||
|
||||
message AskLeaderResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
Peer leader = 2;
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Router {
|
||||
rpc Create(CreateRequest) returns (RouteResponse) {}
|
||||
|
||||
// Fetch routing information for tables. The smallest unit is the complete
|
||||
// routing information(all regions) of a table.
|
||||
//
|
||||
// ```text
|
||||
// table_1
|
||||
// table_name
|
||||
// table_schema
|
||||
// regions
|
||||
// region_1
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2
|
||||
// region_2
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2, follower_peer_3
|
||||
// region_xxx
|
||||
// table_2
|
||||
// ...
|
||||
// ```
|
||||
//
|
||||
rpc Route(RouteRequest) returns (RouteResponse) {}
|
||||
|
||||
rpc Delete(DeleteRequest) returns (RouteResponse) {}
|
||||
}
|
||||
|
||||
message CreateRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
repeated Partition partitions = 3;
|
||||
}
|
||||
|
||||
message RouteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated TableName table_names = 2;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
}
|
||||
|
||||
message RouteResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated Peer peers = 2;
|
||||
repeated TableRoute table_routes = 3;
|
||||
}
|
||||
|
||||
message TableRoute {
|
||||
Table table = 1;
|
||||
repeated RegionRoute region_routes = 2;
|
||||
}
|
||||
|
||||
message RegionRoute {
|
||||
Region region = 1;
|
||||
// single leader node for write task
|
||||
uint64 leader_peer_index = 2;
|
||||
// multiple follower nodes for read task
|
||||
repeated uint64 follower_peer_indexes = 3;
|
||||
}
|
||||
|
||||
message Table {
|
||||
uint64 id = 1;
|
||||
TableName table_name = 2;
|
||||
bytes table_schema = 3;
|
||||
}
|
||||
|
||||
message Region {
|
||||
// TODO(LFC): Maybe use message RegionNumber?
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
// PARTITION `region_name` VALUES LESS THAN (value_list)
|
||||
message Partition {
|
||||
repeated bytes column_list = 1;
|
||||
repeated bytes value_list = 2;
|
||||
}
|
||||
|
||||
// This message is only for saving into store.
|
||||
message TableRouteValue {
|
||||
repeated Peer peers = 1;
|
||||
TableRoute table_route = 2;
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Store {
|
||||
// Range gets the keys in the range from the key-value store.
|
||||
rpc Range(RangeRequest) returns (RangeResponse);
|
||||
|
||||
// Put puts the given key into the key-value store.
|
||||
rpc Put(PutRequest) returns (PutResponse);
|
||||
|
||||
// BatchPut atomically puts the given keys into the key-value store.
|
||||
rpc BatchPut(BatchPutRequest) returns (BatchPutResponse);
|
||||
|
||||
// CompareAndPut atomically puts the value to the given updated
|
||||
// value if the current value == the expected value.
|
||||
rpc CompareAndPut(CompareAndPutRequest) returns (CompareAndPutResponse);
|
||||
|
||||
// DeleteRange deletes the given range from the key-value store.
|
||||
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
|
||||
|
||||
// MoveValue atomically renames the key to the given updated key.
|
||||
rpc MoveValue(MoveValueRequest) returns (MoveValueResponse);
|
||||
}
|
||||
|
||||
message RangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key for the range, If range_end is not given, the
|
||||
// request only looks up key.
|
||||
bytes key = 2;
|
||||
// range_end is the upper bound on the requested range [key, range_end).
|
||||
// If range_end is '\0', the range is all keys >= key.
|
||||
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||
// then the range request gets all keys prefixed with key.
|
||||
// If both key and range_end are '\0', then the range request returns all
|
||||
// keys.
|
||||
bytes range_end = 3;
|
||||
// limit is a limit on the number of keys returned for the request. When
|
||||
// limit is set to 0, it is treated as no limit.
|
||||
int64 limit = 4;
|
||||
// keys_only when set returns only the keys and not the values.
|
||||
bool keys_only = 5;
|
||||
}
|
||||
|
||||
message RangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// kvs is the list of key-value pairs matched by the range request.
|
||||
repeated KeyValue kvs = 2;
|
||||
// more indicates if there are more keys to return in the requested range.
|
||||
bool more = 3;
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 3;
|
||||
// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||
// The previous key-value pair will be returned in the put response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pair will be
|
||||
// returned.
|
||||
KeyValue prev_kv = 2;
|
||||
}
|
||||
|
||||
message BatchPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated KeyValue kvs = 2;
|
||||
// If prev_kv is set, gets the previous key-value pairs before changing it.
|
||||
// The previous key-value pairs will be returned in the batch put response.
|
||||
bool prev_kv = 3;
|
||||
}
|
||||
|
||||
message BatchPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 2;
|
||||
}
|
||||
|
||||
message CompareAndPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// expect is the previous value, in bytes
|
||||
bytes expect = 3;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
message CompareAndPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
bool success = 2;
|
||||
KeyValue prev_kv = 3;
|
||||
}
|
||||
|
||||
message DeleteRangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key to delete in the range.
|
||||
bytes key = 2;
|
||||
// range_end is the key following the last key to delete for the range
|
||||
// [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key
|
||||
// argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the keys with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the
|
||||
// key argument.
|
||||
bytes range_end = 3;
|
||||
// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delete response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message DeleteRangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// deleted is the number of keys deleted by the delete range request.
|
||||
int64 deleted = 2;
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 3;
|
||||
}
|
||||
|
||||
message MoveValueRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, move the value of from_key to to_key (i.e. rename),
|
||||
// and return the value.
|
||||
bytes from_key = 2;
|
||||
bytes to_key = 3;
|
||||
}
|
||||
|
||||
message MoveValueResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, return the value of from_key.
|
||||
KeyValue kv = 2;
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2016 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "prometheus/remote/types.proto";
|
||||
|
||||
message WriteRequest {
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
// Cortex uses this field to determine the source of the write request.
|
||||
// We reserve it to avoid any compatibility issues.
|
||||
reserved 2;
|
||||
repeated prometheus.MetricMetadata metadata = 3;
|
||||
}
|
||||
|
||||
// ReadRequest represents a remote read request.
|
||||
message ReadRequest {
|
||||
repeated Query queries = 1;
|
||||
|
||||
enum ResponseType {
|
||||
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||
// It's recommended to use streamed response types instead.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
// Content-Encoding: ""
|
||||
STREAMED_XOR_CHUNKS = 1;
|
||||
}
|
||||
|
||||
// accepted_response_types allows negotiating the content type of the response.
|
||||
//
|
||||
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||
// implemented by server, error is returned.
|
||||
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||
repeated ResponseType accepted_response_types = 2;
|
||||
}
|
||||
|
||||
// ReadResponse is a response when response_type equals SAMPLES.
|
||||
message ReadResponse {
|
||||
// In same order as the request's queries.
|
||||
repeated QueryResult results = 1;
|
||||
}
|
||||
|
||||
message Query {
|
||||
int64 start_timestamp_ms = 1;
|
||||
int64 end_timestamp_ms = 2;
|
||||
repeated prometheus.LabelMatcher matchers = 3;
|
||||
prometheus.ReadHints hints = 4;
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
// Samples within a time series must be ordered by time.
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
}
|
||||
|
||||
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||
message ChunkedReadResponse {
|
||||
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||
|
||||
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||
int64 query_index = 2;
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
message MetricMetadata {
|
||||
enum MetricType {
|
||||
UNKNOWN = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
HISTOGRAM = 3;
|
||||
GAUGEHISTOGRAM = 4;
|
||||
SUMMARY = 5;
|
||||
INFO = 6;
|
||||
STATESET = 7;
|
||||
}
|
||||
|
||||
// Represents the metric type, these match the set from Prometheus.
|
||||
// Refer to model/textparse/interface.go for details.
|
||||
MetricType type = 1;
|
||||
string metric_family_name = 2;
|
||||
string help = 4;
|
||||
string unit = 5;
|
||||
}
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
// Optional, can be empty.
|
||||
repeated Label labels = 1;
|
||||
double value = 2;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1;
|
||||
repeated Sample samples = 2;
|
||||
repeated Exemplar exemplars = 3;
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1;
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
||||
|
||||
message ReadHints {
|
||||
int64 step_ms = 1; // Query step size in milliseconds.
|
||||
string func = 2; // String representation of surrounding function or aggregation.
|
||||
int64 start_ms = 3; // Start time in milliseconds.
|
||||
int64 end_ms = 4; // End time in milliseconds.
|
||||
repeated string grouping = 5; // List of label names used in aggregation.
|
||||
bool by = 6; // Indicate whether it is without or by.
|
||||
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||
}
|
||||
|
||||
// Chunk represents a TSDB chunk.
|
||||
// Time range [min, max] is inclusive.
|
||||
message Chunk {
|
||||
int64 min_time_ms = 1;
|
||||
int64 max_time_ms = 2;
|
||||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
// ChunkedSeries represents single, encoded time series.
|
||||
message ChunkedSeries {
|
||||
// Labels should be sorted.
|
||||
repeated Label labels = 1;
|
||||
// Chunks will be in start time order and may overlap.
|
||||
repeated Chunk chunks = 2;
|
||||
}
|
||||
@@ -97,7 +97,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
|
||||
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
|
||||
},
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
@@ -105,125 +107,121 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
impl Values {
|
||||
pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl Column {
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(&mut self, origin_count: usize, vector: VectorRef) {
|
||||
let values = self.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&self.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
let values = column.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&column.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
|
||||
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
self.null_mask = null_mask.into_vec();
|
||||
}
|
||||
(0..len).for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -239,59 +237,59 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_values_with_capacity() {
|
||||
let values = Values::with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values.i8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values.i32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values.i64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values.u8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values.u32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values.u64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values.f32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values.f64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values.binary_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values.bool_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::String, 2);
|
||||
let values = values_with_capacity(ColumnDataType::String, 2);
|
||||
let values = values.string_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values.date_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values.datetime_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values.ts_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
@@ -462,28 +460,28 @@ mod tests {
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().ts_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().ts_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().ts_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().ts_second_values
|
||||
@@ -507,7 +505,7 @@ mod tests {
|
||||
let row_count = 4;
|
||||
|
||||
let vector = Arc::new(BooleanVector::from(vec![Some(true), None, Some(false)]));
|
||||
column.push_vals(row_count, vector);
|
||||
push_vals(&mut column, row_count, vector);
|
||||
// Some(false), None, Some(true), Some(true), Some(true), None, Some(false)
|
||||
let bool_values = column.values.unwrap().bool_values;
|
||||
assert_eq!(vec![false, true, true, true, false], bool_values);
|
||||
|
||||
@@ -14,8 +14,13 @@
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod prometheus;
|
||||
pub mod serde;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
}
|
||||
|
||||
pub mod v1;
|
||||
|
||||
pub use prost::DecodeError;
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use prost::DecodeError;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::meta::TableRouteValue;
|
||||
|
||||
macro_rules! impl_convert_with_bytes {
|
||||
($data_type: ty) => {
|
||||
impl From<$data_type> for Vec<u8> {
|
||||
fn from(entity: $data_type) -> Self {
|
||||
entity.encode_to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for $data_type {
|
||||
type Error = DecodeError;
|
||||
|
||||
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
|
||||
<$data_type>::decode(value.as_ref())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_convert_with_bytes!(TableRouteValue);
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
tonic::include_proto!("greptime.v1");
|
||||
pub mod column_def;
|
||||
|
||||
mod column_def;
|
||||
pub mod meta;
|
||||
pub mod meta {
|
||||
pub use greptime_proto::v1::meta::*;
|
||||
}
|
||||
|
||||
pub use greptime_proto::v1::*;
|
||||
|
||||
@@ -19,21 +19,24 @@ use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
impl ColumnDef {
|
||||
pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
|
||||
|
||||
let constraint = if self.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(self.default_constraint.as_slice())
|
||||
.context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
|
||||
)
|
||||
};
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(column_def.default_constraint.as_slice()).context(
|
||||
error::ConvertColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
},
|
||||
)?,
|
||||
)
|
||||
};
|
||||
|
||||
ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu { column: &self.name })
|
||||
}
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
tonic::include_proto!("greptime.v1.meta");
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
pub const PROTOCOL_VERSION: u64 = 1;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PeerDict {
|
||||
peers: HashMap<Peer, usize>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl PeerDict {
|
||||
pub fn get_or_insert(&mut self, peer: Peer) -> usize {
|
||||
let index = self.peers.entry(peer).or_insert_with(|| {
|
||||
let v = self.index;
|
||||
self.index += 1;
|
||||
v
|
||||
});
|
||||
|
||||
*index
|
||||
}
|
||||
|
||||
pub fn into_peers(self) -> Vec<Peer> {
|
||||
let mut array = vec![Peer::default(); self.index];
|
||||
for (p, i) in self.peers {
|
||||
array[i] = p;
|
||||
}
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
impl Hash for Peer {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.id.hash(state);
|
||||
self.addr.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Peer {}
|
||||
|
||||
impl RequestHeader {
|
||||
#[inline]
|
||||
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
member_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseHeader {
|
||||
#[inline]
|
||||
pub fn success(cluster_id: u64) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn failed(cluster_id: u64, error: Error) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
error: Some(error),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(error) = &self.error {
|
||||
if error.code == ErrorCode::NotLeader as i32 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ErrorCode {
|
||||
NoActiveDatanodes = 1,
|
||||
NotLeader = 2,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[inline]
|
||||
pub fn no_active_datanodes() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NoActiveDatanodes as i32,
|
||||
err_msg: "No active datanodes".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NotLeader as i32,
|
||||
err_msg: "Current server is not leader".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatResponse {
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(header) = &self.header {
|
||||
return header.is_not_leader();
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! gen_set_header {
|
||||
($req: ty) => {
|
||||
impl $req {
|
||||
#[inline]
|
||||
pub fn set_header(&mut self, (cluster_id, member_id): (u64, u64)) {
|
||||
self.header = Some(RequestHeader::new((cluster_id, member_id)));
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
gen_set_header!(HeartbeatRequest);
|
||||
gen_set_header!(RouteRequest);
|
||||
gen_set_header!(CreateRequest);
|
||||
gen_set_header!(RangeRequest);
|
||||
gen_set_header!(DeleteRequest);
|
||||
gen_set_header!(PutRequest);
|
||||
gen_set_header!(BatchPutRequest);
|
||||
gen_set_header!(CompareAndPutRequest);
|
||||
gen_set_header!(DeleteRangeRequest);
|
||||
gen_set_header!(MoveValueRequest);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::vec;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_peer_dict() {
|
||||
let mut dict = PeerDict::default();
|
||||
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
|
||||
assert_eq!(2, dict.index);
|
||||
assert_eq!(
|
||||
vec![
|
||||
Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
},
|
||||
Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
}
|
||||
],
|
||||
dict.into_peers()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -18,25 +18,28 @@ common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
storage = { path = "../storage" }
|
||||
table = { path = "../table" }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
storage = { path = "../storage" }
|
||||
tempdir = "0.3"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -13,14 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
@@ -96,18 +98,15 @@ pub enum Error {
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
TableNotExist { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to register table"))]
|
||||
RegisterTable {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
@@ -142,6 +141,17 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to deregister table, request: {:?}, source: {}",
|
||||
request,
|
||||
source
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal catalog manager state: {}", msg))]
|
||||
IllegalManagerState { backtrace: Backtrace, msg: String },
|
||||
|
||||
@@ -151,21 +161,17 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
|
||||
table_info,
|
||||
schema,
|
||||
source
|
||||
))]
|
||||
InvalidTableSchema {
|
||||
table_info: String,
|
||||
schema: RawSchema,
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation { source: BoxedError },
|
||||
#[snafu(display("{source}"))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
SystemCatalogTableScanExec {
|
||||
@@ -178,15 +184,6 @@ pub enum Error {
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("IO error occurred while fetching catalog info, source: {}", source))]
|
||||
Io {
|
||||
backtrace: Backtrace,
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Local and remote catalog data are inconsistent, msg: {}", msg))]
|
||||
CatalogStateInconsistent { msg: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
MetaSrv {
|
||||
#[snafu(backtrace)]
|
||||
@@ -199,11 +196,14 @@ pub enum Error {
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog internal error: {}", source))]
|
||||
Internal {
|
||||
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
|
||||
CatalogEntrySerde {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -216,39 +216,40 @@ impl ErrorExt for Error {
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. }
|
||||
| Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::RegisterTable { .. } | Error::SystemCatalogTypeMismatch { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. } => source.status_code(),
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableSchema { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
|
||||
Error::Internal { source, .. } | Error::SchemaProviderOperation { source } => {
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,10 +24,10 @@ use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
@@ -370,4 +370,10 @@ mod tests {
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ pub mod local;
|
||||
pub mod remote;
|
||||
pub mod schema;
|
||||
pub mod system;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
/// Represent a list of named catalogs
|
||||
@@ -107,7 +108,12 @@ pub trait CatalogManager: CatalogList {
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>>;
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
@@ -154,7 +160,7 @@ pub struct RenameTableRequest {
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DeregisterTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
@@ -167,11 +173,6 @@ pub struct RegisterSchemaRequest {
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
@@ -191,15 +192,18 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
let table_name = &req.create_table_request.table_name;
|
||||
let table_id = req.create_table_request.id;
|
||||
|
||||
let table = if let Some(table) = manager.table(catalog_name, schema_name, table_name)? {
|
||||
let table = manager.table(catalog_name, schema_name, table_name).await?;
|
||||
let table = if let Some(table) = table {
|
||||
table
|
||||
} else {
|
||||
let table = engine
|
||||
.create_table(&EngineContext::default(), req.create_table_request.clone())
|
||||
.await
|
||||
.with_context(|_| CreateTableSnafu {
|
||||
table_info: format!(
|
||||
"{catalog_name}.{schema_name}.{table_name}, id: {table_id}",
|
||||
table_info: common_catalog::format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
),
|
||||
})?;
|
||||
manager
|
||||
@@ -222,7 +226,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
}
|
||||
|
||||
/// The number of regions in the datanode node.
|
||||
pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
let mut region_number: u64 = 0;
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
@@ -242,11 +246,13 @@ pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
})?;
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table = schema
|
||||
.table(&table_name)?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
let table =
|
||||
schema
|
||||
.table(&table_name)
|
||||
.await?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
@@ -20,6 +20,7 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
@@ -34,9 +35,9 @@ use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
|
||||
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
|
||||
TableExistsSnafu, TableNotFoundSnafu, UnimplementedSnafu,
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
@@ -45,10 +46,9 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
|
||||
CatalogProvider, CatalogProviderRef, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest, SchemaProvider,
|
||||
SchemaProviderRef,
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -252,7 +252,6 @@ impl LocalCatalogManager {
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
let option = self
|
||||
@@ -346,7 +345,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
if let Some(existing) = schema.table(&request.table_name)? {
|
||||
if let Some(existing) = schema.table(&request.table_name).await? {
|
||||
if existing.table_info().ident.table_id != request.table_id {
|
||||
error!(
|
||||
"Unexpected table register request: {:?}, existing: {:?}",
|
||||
@@ -419,11 +418,37 @@ impl CatalogManager for LocalCatalogManager {
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
|
||||
let DeregisterTableRequest {
|
||||
catalog,
|
||||
schema,
|
||||
table_name,
|
||||
} = &request;
|
||||
let table_id = self
|
||||
.catalogs
|
||||
.table(catalog, schema, table_name)
|
||||
.await?
|
||||
.with_context(|| error::TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog, schema, table_name),
|
||||
})?
|
||||
.table_info()
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.catalogs.deregister_table(request).await
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -481,7 +506,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
fn table(
|
||||
async fn table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
@@ -497,7 +522,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,15 +18,16 @@ use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::OptionExt;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
@@ -155,16 +156,20 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>> {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let catalog = if let Some(c) = c.get(catalog) {
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let Some(c) = c.get(catalog) else { return Ok(None) };
|
||||
c.clone()
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
match catalog.schema(schema)? {
|
||||
None => Ok(None),
|
||||
Some(s) => s.table(table_name),
|
||||
Some(s) => s.table(table_name).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -250,6 +255,10 @@ impl CatalogProvider for MemoryCatalogProvider {
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
@@ -279,6 +288,7 @@ impl Default for MemorySchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -289,7 +299,7 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
@@ -351,8 +361,8 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_new_memory_catalog_list() {
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_list().unwrap();
|
||||
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
|
||||
|
||||
@@ -365,9 +375,9 @@ mod tests {
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
.unwrap();
|
||||
|
||||
let table = default_schema.table("numbers").unwrap();
|
||||
let table = default_schema.table("numbers").await.unwrap();
|
||||
assert!(table.is_some());
|
||||
assert!(default_schema.table("not_exists").unwrap().is_none());
|
||||
assert!(default_schema.table("not_exists").await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -415,7 +425,7 @@ mod tests {
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).unwrap().unwrap();
|
||||
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
@@ -464,6 +474,7 @@ mod tests {
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
|
||||
@@ -13,16 +13,19 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use dashmap::DashMap;
|
||||
use futures::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
@@ -32,12 +35,13 @@ use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
|
||||
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
|
||||
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::helper::{
|
||||
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
|
||||
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
|
||||
CATALOG_KEY_PREFIX,
|
||||
};
|
||||
use crate::remote::{Kv, KvBackendRef};
|
||||
use crate::{
|
||||
@@ -50,10 +54,9 @@ use crate::{
|
||||
pub struct RemoteCatalogManager {
|
||||
node_id: u64,
|
||||
backend: KvBackendRef,
|
||||
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
|
||||
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
|
||||
engine: TableEngineRef,
|
||||
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
|
||||
mutex: Arc<Mutex<()>>,
|
||||
}
|
||||
|
||||
impl RemoteCatalogManager {
|
||||
@@ -64,7 +67,6 @@ impl RemoteCatalogManager {
|
||||
backend,
|
||||
catalogs: Default::default(),
|
||||
system_table_requests: Default::default(),
|
||||
mutex: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,9 +110,13 @@ impl RemoteCatalogManager {
|
||||
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
|
||||
continue;
|
||||
}
|
||||
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
yield Ok(key)
|
||||
|
||||
let catalog_key = String::from_utf8_lossy(&k);
|
||||
if let Ok(key) = CatalogKey::parse(&catalog_key) {
|
||||
yield Ok(key)
|
||||
} else {
|
||||
error!("Invalid catalog key: {:?}", catalog_key);
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
@@ -324,7 +330,6 @@ impl RemoteCatalogManager {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
region_numbers: region_numbers.clone(),
|
||||
};
|
||||
match self
|
||||
.engine
|
||||
@@ -347,21 +352,13 @@ impl RemoteCatalogManager {
|
||||
);
|
||||
|
||||
let meta = &table_info.meta;
|
||||
let schema = meta
|
||||
.schema
|
||||
.clone()
|
||||
.try_into()
|
||||
.context(InvalidTableSchemaSnafu {
|
||||
table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
|
||||
schema: meta.schema.clone(),
|
||||
})?;
|
||||
let req = CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: Arc::new(schema),
|
||||
schema: meta.schema.clone(),
|
||||
region_numbers: region_numbers.clone(),
|
||||
primary_key_indices: meta.primary_key_indices.clone(),
|
||||
create_if_not_exists: true,
|
||||
@@ -390,7 +387,14 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
"Initialized catalogs: {:?}",
|
||||
catalogs.keys().cloned().collect::<Vec<_>>()
|
||||
);
|
||||
self.catalogs.store(Arc::new(catalogs));
|
||||
|
||||
{
|
||||
let self_catalogs = self.catalogs.read();
|
||||
catalogs.into_iter().for_each(|(k, v)| {
|
||||
self_catalogs.insert(k, v);
|
||||
});
|
||||
}
|
||||
|
||||
info!("Max table id allocated: {}", max_table_id);
|
||||
|
||||
let mut system_table_requests = self.system_table_requests.lock().await;
|
||||
@@ -431,11 +435,18 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
let schema = self
|
||||
.schema(catalog_name, schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let result = schema.deregister_table(&request.table_name)?;
|
||||
Ok(result.is_none())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -470,7 +481,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
fn table(
|
||||
async fn table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
@@ -485,7 +496,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,12 +512,10 @@ impl CatalogList for RemoteCatalogManager {
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
let key = self.build_catalog_key(&name).to_string();
|
||||
let backend = self.backend.clone();
|
||||
let mutex = self.mutex.clone();
|
||||
let catalogs = self.catalogs.clone();
|
||||
|
||||
std::thread::spawn(|| {
|
||||
common_runtime::block_on_write(async move {
|
||||
let _guard = mutex.lock().await;
|
||||
backend
|
||||
.set(
|
||||
key.as_bytes(),
|
||||
@@ -515,11 +524,10 @@ impl CatalogList for RemoteCatalogManager {
|
||||
.context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
let prev_catalogs = catalogs.load();
|
||||
let mut new_catalogs = HashMap::with_capacity(prev_catalogs.len() + 1);
|
||||
new_catalogs.clone_from(&prev_catalogs);
|
||||
let prev = new_catalogs.insert(name, catalog);
|
||||
catalogs.store(Arc::new(new_catalogs));
|
||||
|
||||
let catalogs = catalogs.read();
|
||||
let prev = catalogs.insert(name, catalog.clone());
|
||||
|
||||
Ok(prev)
|
||||
})
|
||||
})
|
||||
@@ -529,12 +537,65 @@ impl CatalogList for RemoteCatalogManager {
|
||||
|
||||
/// List all catalogs from metasrv
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.catalogs.load().keys().cloned().collect::<Vec<_>>())
|
||||
let catalogs = self.catalogs.read();
|
||||
Ok(catalogs.iter().map(|k| k.key().to_string()).collect())
|
||||
}
|
||||
|
||||
/// Read catalog info of given name from metasrv.
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
Ok(self.catalogs.load().get(name).cloned())
|
||||
{
|
||||
let catalogs = self.catalogs.read();
|
||||
let catalog = catalogs.get(name);
|
||||
|
||||
if let Some(catalog) = catalog {
|
||||
return Ok(Some(catalog.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
let catalogs = self.catalogs.write();
|
||||
|
||||
let catalog = catalogs.get(name);
|
||||
if let Some(catalog) = catalog {
|
||||
return Ok(Some(catalog.clone()));
|
||||
}
|
||||
|
||||
// It's for lack of incremental catalog syncing between datanode and meta. Here we fetch catalog
|
||||
// from meta on demand. This can be removed when incremental catalog syncing is done in datanode.
|
||||
|
||||
let backend = self.backend.clone();
|
||||
|
||||
let catalogs_from_meta: HashSet<String> = std::thread::spawn(|| {
|
||||
common_runtime::block_on_read(async move {
|
||||
let mut stream = backend.range(CATALOG_KEY_PREFIX.as_bytes());
|
||||
let mut catalogs = HashSet::new();
|
||||
|
||||
while let Some(catalog) = stream.next().await {
|
||||
if let Ok(catalog) = catalog {
|
||||
let catalog_key = String::from_utf8_lossy(&catalog.0);
|
||||
|
||||
if let Ok(key) = CatalogKey::parse(&catalog_key) {
|
||||
catalogs.insert(key.catalog_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
catalogs
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap();
|
||||
|
||||
catalogs.retain(|catalog_name, _| catalogs_from_meta.get(catalog_name).is_some());
|
||||
|
||||
for catalog in catalogs_from_meta {
|
||||
catalogs
|
||||
.entry(catalog.clone())
|
||||
.or_insert(self.new_catalog_provider(&catalog));
|
||||
}
|
||||
|
||||
let catalog = catalogs.get(name);
|
||||
|
||||
Ok(catalog.as_deref().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -694,6 +755,7 @@ impl RemoteSchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for RemoteSchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -703,7 +765,7 @@ impl SchemaProvider for RemoteSchemaProvider {
|
||||
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
Ok(self.tables.load().get(name).cloned())
|
||||
}
|
||||
|
||||
|
||||
@@ -15,11 +15,13 @@
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Represents a schema, comprising a number of named tables.
|
||||
#[async_trait]
|
||||
pub trait SchemaProvider: Sync + Send {
|
||||
/// Returns the schema provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
@@ -29,7 +31,7 @@ pub trait SchemaProvider: Sync + Send {
|
||||
fn table_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific table from the schema by name, provided it exists.
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, adds a new table to this schema.
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
|
||||
@@ -25,29 +25,29 @@ use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
|
||||
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
|
||||
};
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
pub const ENTRY_TYPE_INDEX: usize = 0;
|
||||
pub const KEY_INDEX: usize = 1;
|
||||
pub const VALUE_INDEX: usize = 3;
|
||||
|
||||
pub struct SystemCatalogTable {
|
||||
table_info: TableInfoRef,
|
||||
pub table: TableRef,
|
||||
}
|
||||
pub struct SystemCatalogTable(TableRef);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for SystemCatalogTable {
|
||||
@@ -56,25 +56,29 @@ impl Table for SystemCatalogTable {
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.table_info.meta.schema.clone()
|
||||
self.0.schema()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
panic!("System catalog table does not support scan!")
|
||||
self.0.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
/// Insert values into table.
|
||||
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
|
||||
self.table.insert(request).await
|
||||
self.0.insert(request).await
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
self.table_info.clone()
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,9 +89,8 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let schema = Arc::new(build_system_catalog_schema());
|
||||
let schema = build_system_catalog_schema();
|
||||
let ctx = EngineContext::default();
|
||||
|
||||
if let Some(table) = engine
|
||||
@@ -95,10 +98,7 @@ impl SystemCatalogTable {
|
||||
.await
|
||||
.context(OpenSystemCatalogSnafu)?
|
||||
{
|
||||
Ok(Self {
|
||||
table_info: table.table_info(),
|
||||
table,
|
||||
})
|
||||
Ok(Self(table))
|
||||
} else {
|
||||
// system catalog table is not yet created, try to create
|
||||
let request = CreateTableRequest {
|
||||
@@ -107,19 +107,18 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
desc: Some("System catalog table".to_string()),
|
||||
schema: schema.clone(),
|
||||
schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
|
||||
create_if_not_exists: true,
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
|
||||
let table = engine
|
||||
.create_table(&ctx, request)
|
||||
.await
|
||||
.context(CreateSystemCatalogSnafu)?;
|
||||
let table_info = table.table_info();
|
||||
Ok(Self { table, table_info })
|
||||
Ok(Self(table))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,7 +127,6 @@ impl SystemCatalogTable {
|
||||
let full_projection = None;
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.table
|
||||
.scan(full_projection, &[], None)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
@@ -147,7 +145,7 @@ impl SystemCatalogTable {
|
||||
/// - value: JSON-encoded value of entry's metadata.
|
||||
/// - gmt_created: create time of this metadata.
|
||||
/// - gmt_modified: last updated time of this metadata.
|
||||
fn build_system_catalog_schema() -> Schema {
|
||||
fn build_system_catalog_schema() -> RawSchema {
|
||||
let cols = vec![
|
||||
ColumnSchema::new(
|
||||
"entry_type".to_string(),
|
||||
@@ -182,8 +180,7 @@ fn build_system_catalog_schema() -> Schema {
|
||||
),
|
||||
];
|
||||
|
||||
// The schema of this table must be valid.
|
||||
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
|
||||
RawSchema::new(cols)
|
||||
}
|
||||
|
||||
/// Formats key string for table entry in system catalog
|
||||
@@ -208,6 +205,34 @@ pub fn build_table_insert_request(
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
|
||||
);
|
||||
m
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
let full_schema_name = format!("{catalog_name}.{schema_name}");
|
||||
build_insert_request(
|
||||
@@ -220,22 +245,10 @@ pub fn build_schema_insert_request(catalog_name: String, schema_name: String) ->
|
||||
}
|
||||
|
||||
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
|
||||
let primary_key_columns = build_primary_key_columns(entry_type, key);
|
||||
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
columns_values.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
|
||||
);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
|
||||
columns_values.insert(
|
||||
"value".to_string(),
|
||||
@@ -245,12 +258,12 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
let now = util::current_time_millis();
|
||||
columns_values.insert(
|
||||
"gmt_created".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"gmt_modified".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
InsertRequest {
|
||||
@@ -258,6 +271,7 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
columns_values,
|
||||
region_number: 0, // system catalog table has only one region
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,15 +394,18 @@ pub struct TableEntryValue {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datatypes::value::Value;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::metadata::TableType;
|
||||
use table::metadata::TableType::Base;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -463,19 +480,21 @@ mod tests {
|
||||
}
|
||||
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = TempDir::new("system-table-test").unwrap();
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::backend::fs::Builder::default()
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
@@ -500,4 +519,53 @@ mod tests {
|
||||
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
|
||||
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_catalog_table_records() {
|
||||
let (_, table_engine) = prepare_table_engine().await;
|
||||
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
|
||||
let table_insertion = build_table_insert_request(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"my_table".to_string(),
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.insert(table_insertion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 1);
|
||||
let batch = batches.remove(0);
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
|
||||
let row = batch.rows().next().unwrap();
|
||||
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
|
||||
let Value::Binary(key) = row[1].clone() else { unreachable!() };
|
||||
let Value::Binary(value) = row[3].clone() else { unreachable!() };
|
||||
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
|
||||
let expected = Entry::Table(TableEntry {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
let table_deletion = build_table_deletion_request(
|
||||
&DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
178
src/catalog/src/table_source.rs
Normal file
178
src/catalog/src/table_source.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
|
||||
};
|
||||
use crate::CatalogListRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_list: CatalogListRef,
|
||||
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
|
||||
disallow_cross_schema_query: bool,
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
pub fn new(
|
||||
catalog_list: CatalogListRef,
|
||||
disallow_cross_schema_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_list,
|
||||
disallow_cross_schema_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolve_table_ref<'a>(
|
||||
&'a self,
|
||||
table_ref: TableReference<'a>,
|
||||
) -> Result<ResolvedTableReference<'a>> {
|
||||
if self.disallow_cross_schema_query {
|
||||
match &table_ref {
|
||||
TableReference::Bare { .. } => (),
|
||||
TableReference::Partial { schema, .. } => {
|
||||
ensure!(
|
||||
schema.as_ref() == self.default_schema,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: &self.default_catalog,
|
||||
schema: schema.as_ref(),
|
||||
}
|
||||
);
|
||||
}
|
||||
TableReference::Full {
|
||||
catalog, schema, ..
|
||||
} => {
|
||||
ensure!(
|
||||
catalog.as_ref() == self.default_catalog
|
||||
&& schema.as_ref() == self.default_schema,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: catalog.as_ref(),
|
||||
schema: schema.as_ref()
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(table_ref.resolve(&self.default_catalog, &self.default_schema))
|
||||
}
|
||||
|
||||
pub async fn resolve_table(
|
||||
&mut self,
|
||||
table_ref: OwnedTableReference,
|
||||
) -> Result<Arc<dyn TableSource>> {
|
||||
let table_ref = table_ref.as_table_reference();
|
||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||
|
||||
let resolved_name = table_ref.to_string();
|
||||
if let Some(table) = self.resolved_tables.get(&resolved_name) {
|
||||
return Ok(table.clone());
|
||||
}
|
||||
|
||||
let catalog_name = table_ref.catalog.as_ref();
|
||||
let schema_name = table_ref.schema.as_ref();
|
||||
let table_name = table_ref.table.as_ref();
|
||||
|
||||
let catalog = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
let table = schema
|
||||
.table(table_name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let table = DfTableProviderAdapter::new(table);
|
||||
let table = provider_as_source(Arc::new(table));
|
||||
self.resolved_tables.insert(resolved_name, table.clone());
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::local::MemoryCatalogManager;
|
||||
|
||||
#[test]
|
||||
fn test_validate_table_ref() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
|
||||
|
||||
let table_ref = TableReference::Bare {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("wrong_schema"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("wrong_catalog"),
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@ use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::logical_plan::Expr;
|
||||
@@ -38,9 +39,14 @@ use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::scan::SimpleTableScan;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{Error, InsertCatalogRecordSnafu};
|
||||
use crate::system::{build_schema_insert_request, build_table_insert_request, SystemCatalogTable};
|
||||
use crate::{CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef};
|
||||
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::system::{
|
||||
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
|
||||
SystemCatalogTable,
|
||||
};
|
||||
use crate::{
|
||||
CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Tables holds all tables created by user.
|
||||
pub struct Tables {
|
||||
@@ -157,16 +163,10 @@ fn tables_to_record_batch(
|
||||
|
||||
for table_name in table_names {
|
||||
// Safety: All these vectors are string type.
|
||||
catalog_vec
|
||||
.push_value_ref(ValueRef::String(catalog_name))
|
||||
.unwrap();
|
||||
schema_vec
|
||||
.push_value_ref(ValueRef::String(schema_name))
|
||||
.unwrap();
|
||||
table_name_vec
|
||||
.push_value_ref(ValueRef::String(&table_name))
|
||||
.unwrap();
|
||||
engine_vec.push_value_ref(ValueRef::String(engine)).unwrap();
|
||||
catalog_vec.push_value_ref(ValueRef::String(catalog_name));
|
||||
schema_vec.push_value_ref(ValueRef::String(schema_name));
|
||||
table_name_vec.push_value_ref(ValueRef::String(&table_name));
|
||||
engine_vec.push_value_ref(ValueRef::String(engine));
|
||||
}
|
||||
|
||||
vec![
|
||||
@@ -201,6 +201,7 @@ pub struct InformationSchema {
|
||||
pub system: Arc<SystemCatalogTable>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for InformationSchema {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -213,7 +214,7 @@ impl SchemaProvider for InformationSchema {
|
||||
])
|
||||
}
|
||||
|
||||
fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
|
||||
if name.eq_ignore_ascii_case("tables") {
|
||||
Ok(Some(self.tables.clone()))
|
||||
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
|
||||
@@ -279,6 +280,21 @@ impl SystemCatalog {
|
||||
.context(InsertCatalogRecordSnafu)
|
||||
}
|
||||
|
||||
pub(crate) async fn deregister_table(
|
||||
&self,
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> CatalogResult<bool> {
|
||||
self.information_schema
|
||||
.system
|
||||
.delete(build_table_deletion_request(request, table_id))
|
||||
.await
|
||||
.map(|x| x == 1)
|
||||
.with_context(|_| error::DeregisterTableSnafu {
|
||||
request: request.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn register_schema(
|
||||
&self,
|
||||
catalog: String,
|
||||
|
||||
@@ -71,6 +71,7 @@ mod tests {
|
||||
|
||||
let registered_table = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
@@ -158,6 +159,7 @@ mod tests {
|
||||
let table = guard.as_ref().unwrap();
|
||||
let table_registered = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
|
||||
@@ -147,6 +147,7 @@ impl TableEngine for MockTableEngine {
|
||||
let table_id = TableId::from_str(
|
||||
request
|
||||
.table_options
|
||||
.extra_options
|
||||
.get("table_id")
|
||||
.unwrap_or(&default_table_id),
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ mod tests {
|
||||
};
|
||||
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::Schema;
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -116,7 +116,7 @@ mod tests {
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let table_name = "fail_table".to_string();
|
||||
// this schema has no effect
|
||||
let table_schema = Arc::new(Schema::new(vec![]));
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
@@ -126,7 +126,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: table_schema.clone(),
|
||||
schema: table_schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
@@ -176,7 +176,7 @@ mod tests {
|
||||
let table_name = "test_table".to_string();
|
||||
let table_id = 1;
|
||||
// this schema has no effect
|
||||
let table_schema = Arc::new(Schema::new(vec![]));
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
@@ -186,7 +186,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: table_schema.clone(),
|
||||
schema: table_schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: "".to_string(),
|
||||
desc: None,
|
||||
schema: Arc::new(Schema::new(vec![])),
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
|
||||
@@ -9,6 +9,7 @@ api = { path = "../api" }
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
@@ -31,12 +32,8 @@ substrait = { path = "../common/substrait" }
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
|
||||
[dev-dependencies.prost_09]
|
||||
package = "prost"
|
||||
version = "0.9"
|
||||
prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.2"
|
||||
version = "0.4"
|
||||
|
||||
@@ -14,11 +14,12 @@
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
|
||||
use client::{Client, Database};
|
||||
use prost_09::Message;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::protobuf::rel::RelType;
|
||||
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use prost::Message;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
@@ -65,13 +66,12 @@ async fn run() {
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let db = Database::new("create table", client.clone());
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let db = Database::new("greptime", client);
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
@@ -89,12 +89,8 @@ fn mock_logical_plan() -> Vec<u8> {
|
||||
let read_type = ReadType::NamedTable(named_table);
|
||||
|
||||
let read_rel = ReadRel {
|
||||
common: None,
|
||||
base_schema: None,
|
||||
filter: None,
|
||||
projection: None,
|
||||
advanced_extension: None,
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut buf = vec![];
|
||||
|
||||
@@ -14,12 +14,13 @@
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest, InsertRequest,
|
||||
QueryRequest,
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
||||
InsertRequest, QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
@@ -34,83 +35,106 @@ use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Database {
|
||||
name: String,
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub fn new(name: impl Into<String>, client: Client) -> Self {
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
pub fn catalog(&self) -> &String {
|
||||
&self.catalog
|
||||
}
|
||||
|
||||
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &String {
|
||||
&self.schema
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(GreptimeRequest {
|
||||
request: Some(Request::Insert(request)),
|
||||
})
|
||||
.await
|
||||
self.do_get(Request::Insert(request)).await
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
self.do_get(GreptimeRequest {
|
||||
request: Some(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
})),
|
||||
})
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
self.do_get(GreptimeRequest {
|
||||
request: Some(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
})),
|
||||
})
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(GreptimeRequest {
|
||||
request: Some(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
})),
|
||||
})
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
self.do_get(GreptimeRequest {
|
||||
request: Some(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
})),
|
||||
})
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
self.do_get(GreptimeRequest {
|
||||
request: Some(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
})),
|
||||
})
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: GreptimeRequest) -> Result<Output> {
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
.mut_inner()
|
||||
.do_get(Ticket {
|
||||
ticket: request.encode_to_vec(),
|
||||
})
|
||||
.do_get(request)
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -157,12 +181,18 @@ fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::Column;
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic, Column};
|
||||
use common_grpc::select::{null_mask, values};
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use datatypes::prelude::{Vector, VectorRef};
|
||||
@@ -172,6 +202,8 @@ mod tests {
|
||||
UInt32Vector, UInt64Vector, UInt8Vector,
|
||||
};
|
||||
|
||||
use crate::database::FlightContext;
|
||||
|
||||
#[test]
|
||||
fn test_column_to_vector() {
|
||||
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
|
||||
@@ -255,4 +287,26 @@ mod tests {
|
||||
datatype: wrapper.datatype() as i32,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
let mut ctx = FlightContext::default();
|
||||
assert!(ctx.auth_header.is_none());
|
||||
|
||||
let basic = AuthScheme::Basic(Basic {
|
||||
username: "u".to_string(),
|
||||
password: "p".to_string(),
|
||||
});
|
||||
|
||||
ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(basic),
|
||||
});
|
||||
|
||||
assert!(matches!(
|
||||
ctx.auth_header,
|
||||
Some(AuthHeader {
|
||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ mod error;
|
||||
pub mod load_balance;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
|
||||
@@ -9,27 +9,46 @@ default-run = "greptime"
|
||||
name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
catalog = { path = "../catalog" }
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
datanode = { path = "../datanode" }
|
||||
either = "1.8"
|
||||
frontend = { path = "../frontend" }
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { path = "../partition" }
|
||||
query = { path = "../query" }
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
tokio = { version = "1.18", features = ["full"] }
|
||||
substrait = { path = "../common/substrait" }
|
||||
tikv-jemalloc-ctl = { version = "0.5", optional = true }
|
||||
tikv-jemallocator = { version = "0.5", optional = true }
|
||||
tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
serde.workspace = true
|
||||
tempdir = "0.3"
|
||||
|
||||
[build-dependencies]
|
||||
build-data = "0.1.3"
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::{datanode, frontend, metasrv, standalone};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info};
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -46,6 +46,8 @@ enum SubCommand {
|
||||
Metasrv(metasrv::Command),
|
||||
#[clap(name = "standalone")]
|
||||
Standalone(standalone::Command),
|
||||
#[clap(name = "cli")]
|
||||
Cli(cli::Command),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
@@ -55,6 +57,7 @@ impl SubCommand {
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Cli(cmd) => cmd.run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -66,6 +69,7 @@ impl fmt::Display for SubCommand {
|
||||
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
|
||||
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
|
||||
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
|
||||
SubCommand::Cli(_) => write!(f, "greptime-cli"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,6 +87,10 @@ fn print_version() -> &'static str {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mem-prof")]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let cmd = Command::parse();
|
||||
|
||||
64
src/cmd/src/cli.rs
Normal file
64
src/cmd/src/cli.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cmd;
|
||||
mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
cmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.cmd.run().await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct AttachCommand {
|
||||
#[clap(long)]
|
||||
pub(crate) grpc_addr: String,
|
||||
#[clap(long)]
|
||||
pub(crate) meta_addr: Option<String>,
|
||||
#[clap(long, action)]
|
||||
pub(crate) disable_helper: bool,
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let mut repl = Repl::try_new(&self).await?;
|
||||
repl.run().await
|
||||
}
|
||||
}
|
||||
154
src/cmd/src/cli/cmd.rs
Normal file
154
src/cmd/src/cli/cmd.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, InvalidReplCommandSnafu, Result};
|
||||
|
||||
/// Represents the parsed command from the user (which may be over many lines)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ReplCommand {
|
||||
Help,
|
||||
UseDatabase { db_name: String },
|
||||
Sql { sql: String },
|
||||
Exit,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for ReplCommand {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(input: &str) -> Result<Self> {
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
return InvalidReplCommandSnafu {
|
||||
reason: "No command specified".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
// If line ends with ';', it must be treated as a complete input.
|
||||
// However, the opposite is not true.
|
||||
let input_is_completed = input.ends_with(';');
|
||||
|
||||
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
|
||||
let lowercase = input.to_lowercase();
|
||||
match lowercase.as_str() {
|
||||
"help" => Ok(Self::Help),
|
||||
"exit" | "quit" => Ok(Self::Exit),
|
||||
_ => match input.split_once(' ') {
|
||||
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
|
||||
Ok(Self::UseDatabase {
|
||||
db_name: database.trim().to_string(),
|
||||
})
|
||||
}
|
||||
// Any valid SQL must contains at least one whitespace.
|
||||
Some(_) if input_is_completed => Ok(Self::Sql {
|
||||
sql: input.to_string(),
|
||||
}),
|
||||
_ => InvalidReplCommandSnafu {
|
||||
reason: format!("unknown command '{input}', maybe input is not completed"),
|
||||
}
|
||||
.fail(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplCommand {
|
||||
pub fn help() -> &'static str {
|
||||
r#"
|
||||
Available commands (case insensitive):
|
||||
- 'help': print this help
|
||||
- 'exit' or 'quit': exit the REPL
|
||||
- 'use <your database name>': switch to another database/schema context
|
||||
- Other typed in text will be treated as SQL.
|
||||
You can enter new line while typing, just remember to end it with ';'.
|
||||
"#
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error::InvalidReplCommand;
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
fn test_ok(s: &str, expected: ReplCommand) {
|
||||
let actual: ReplCommand = s.try_into().unwrap();
|
||||
assert_eq!(expected, actual, "'{}'", s);
|
||||
}
|
||||
|
||||
fn test_err(s: &str) {
|
||||
let result: Result<ReplCommand> = s.try_into();
|
||||
assert!(matches!(result, Err(InvalidReplCommand { .. })))
|
||||
}
|
||||
|
||||
test_err("");
|
||||
test_err(" ");
|
||||
test_err("\t");
|
||||
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok(" help", ReplCommand::Help);
|
||||
test_ok(" help ", ReplCommand::Help);
|
||||
test_ok(" HELP ", ReplCommand::Help);
|
||||
test_ok(" Help; ", ReplCommand::Help);
|
||||
test_ok(" help ; ", ReplCommand::Help);
|
||||
|
||||
test_ok("exit", ReplCommand::Exit);
|
||||
test_ok("exit;", ReplCommand::Exit);
|
||||
test_ok("exit ;", ReplCommand::Exit);
|
||||
test_ok("EXIT", ReplCommand::Exit);
|
||||
|
||||
test_ok("quit", ReplCommand::Exit);
|
||||
test_ok("quit;", ReplCommand::Exit);
|
||||
test_ok("quit ;", ReplCommand::Exit);
|
||||
test_ok("QUIT", ReplCommand::Exit);
|
||||
|
||||
test_ok(
|
||||
"use Foo",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
test_ok(
|
||||
" use Foo ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
// ensure that database name is case sensitive
|
||||
test_ok(
|
||||
" use FOO ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "FOO".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
// ensure that we aren't messing with capitalization
|
||||
test_ok(
|
||||
"SELECT * from foo;",
|
||||
ReplCommand::Sql {
|
||||
sql: "SELECT * from foo".to_string(),
|
||||
},
|
||||
);
|
||||
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
|
||||
test_err("insert blah");
|
||||
test_ok(
|
||||
"insert blah;",
|
||||
ReplCommand::Sql {
|
||||
sql: "insert blah".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
112
src/cmd/src/cli/helper.rs
Normal file
112
src/cmd/src/cli/helper.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use rustyline::completion::Completer;
|
||||
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
|
||||
use rustyline::hint::{Hinter, HistoryHinter};
|
||||
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
|
||||
pub(crate) struct RustylineHelper {
|
||||
hinter: HistoryHinter,
|
||||
highlighter: MatchingBracketHighlighter,
|
||||
}
|
||||
|
||||
impl Default for RustylineHelper {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hinter: HistoryHinter {},
|
||||
highlighter: MatchingBracketHighlighter::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl rustyline::Helper for RustylineHelper {}
|
||||
|
||||
impl Validator for RustylineHelper {
|
||||
fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
|
||||
let input = ctx.input();
|
||||
match ReplCommand::try_from(input) {
|
||||
Ok(_) => Ok(ValidationResult::Valid(None)),
|
||||
Err(e) => {
|
||||
if input.trim_end().ends_with(';') {
|
||||
// If line ends with ';', it HAS to be a valid command.
|
||||
Ok(ValidationResult::Invalid(Some(e.to_string())))
|
||||
} else {
|
||||
Ok(ValidationResult::Incomplete)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Hinter for RustylineHelper {
|
||||
type Hint = String;
|
||||
|
||||
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
|
||||
self.hinter.hint(line, pos, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Highlighter for RustylineHelper {
|
||||
fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
|
||||
self.highlighter.highlight(line, pos)
|
||||
}
|
||||
|
||||
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
|
||||
&'s self,
|
||||
prompt: &'p str,
|
||||
default: bool,
|
||||
) -> Cow<'b, str> {
|
||||
self.highlighter.highlight_prompt(prompt, default)
|
||||
}
|
||||
|
||||
fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
|
||||
use nu_ansi_term::Style;
|
||||
Cow::Owned(Style::new().dimmed().paint(hint).to_string())
|
||||
}
|
||||
|
||||
fn highlight_candidate<'c>(
|
||||
&self,
|
||||
candidate: &'c str,
|
||||
completion: rustyline::CompletionType,
|
||||
) -> Cow<'c, str> {
|
||||
self.highlighter.highlight_candidate(candidate, completion)
|
||||
}
|
||||
|
||||
fn highlight_char(&self, line: &str, pos: usize) -> bool {
|
||||
self.highlighter.highlight_char(line, pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl Completer for RustylineHelper {
|
||||
type Candidate = String;
|
||||
|
||||
fn complete(
|
||||
&self,
|
||||
line: &str,
|
||||
pos: usize,
|
||||
ctx: &rustyline::Context<'_>,
|
||||
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
|
||||
// If there is a hint, use that as the auto-complete when user hits `tab`
|
||||
if let Some(hint) = self.hinter.hint(line, pos, ctx) {
|
||||
Ok((pos, vec![hint]))
|
||||
} else {
|
||||
Ok((0, vec![]))
|
||||
}
|
||||
}
|
||||
}
|
||||
267
src/cmd/src/cli/repl.rs
Normal file
267
src/cmd/src/cli/repl.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
use either::Either;
|
||||
use frontend::catalog::FrontendCatalogManager;
|
||||
use frontend::datanode::DatanodeClients;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use partition::route::TableRoutes;
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ErrorCompat, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu,
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub(crate) struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
/// Current prompt
|
||||
prompt: String,
|
||||
|
||||
/// Client for interacting with GreptimeDB
|
||||
database: Database,
|
||||
|
||||
query_engine: Option<DatafusionQueryEngine>,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
impl Repl {
|
||||
fn print_help(&self) {
|
||||
println!("{}", ReplCommand::help())
|
||||
}
|
||||
|
||||
pub(crate) async fn try_new(cmd: &AttachCommand) -> Result<Self> {
|
||||
let mut rl = Editor::new().context(ReplCreationSnafu)?;
|
||||
|
||||
if !cmd.disable_helper {
|
||||
rl.set_helper(Some(RustylineHelper::default()));
|
||||
|
||||
let history_file = history_file();
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to load history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let client = Client::with_urls([&cmd.grpc_addr]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
let query_engine = if let Some(meta_addr) = &cmd.meta_addr {
|
||||
create_query_engine(meta_addr).await.map(Some)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
rl,
|
||||
prompt: "> ".to_string(),
|
||||
database,
|
||||
query_engine,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the next command
|
||||
fn next_command(&mut self) -> Result<ReplCommand> {
|
||||
match self.rl.readline(&self.prompt) {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
|
||||
// Some sort of real underlying error
|
||||
Err(e) => Err(e).context(ReadlineSnafu),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
|
||||
///
|
||||
/// Inspired / based on repl.rs from InfluxDB IOX
|
||||
pub(crate) async fn run(&mut self) -> Result<()> {
|
||||
println!("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
loop {
|
||||
match self.next_command()? {
|
||||
ReplCommand::Help => {
|
||||
self.print_help();
|
||||
}
|
||||
ReplCommand::UseDatabase { db_name } => {
|
||||
if self.execute_sql(format!("USE {db_name}")).await {
|
||||
println!("Using {db_name}");
|
||||
self.database.set_schema(&db_name);
|
||||
self.prompt = format!("[{db_name}] > ");
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sql(&self, sql: String) -> bool {
|
||||
self.do_execute_sql(sql)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.iter_chain().last().unwrap();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn do_execute_sql(&self, sql: String) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
let output = if let Some(query_engine) = &self.query_engine {
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql)
|
||||
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
||||
|
||||
let query_ctx = Arc::new(QueryContext::with(
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
let LogicalPlan::DfPlan(plan) = query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
.await
|
||||
.and_then(|x| query_engine.optimize(&x))
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
} else {
|
||||
self.database.sql(&sql).await
|
||||
}
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output {
|
||||
Output::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
Output::RecordBatches(x) => Either::Left(x),
|
||||
Output::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
|
||||
match either {
|
||||
Either::Left(recordbatches) => {
|
||||
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
|
||||
if total_rows > 0 {
|
||||
println!(
|
||||
"{}",
|
||||
recordbatches
|
||||
.pretty_print()
|
||||
.context(PrettyPrintRecordBatchesSnafu)?
|
||||
);
|
||||
}
|
||||
println!("Total Rows: {total_rows}")
|
||||
}
|
||||
Either::Right(rows) => println!("Affected Rows: {rows}"),
|
||||
};
|
||||
|
||||
println!("Cost {} ms", (end - start).as_millis());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repl {
|
||||
fn drop(&mut self) {
|
||||
if self.rl.helper().is_some() {
|
||||
let history_file = history_file();
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to save history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
|
||||
fn history_file() -> PathBuf {
|
||||
let mut buf = match std::env::var("HOME") {
|
||||
Ok(home) => PathBuf::from(home),
|
||||
Err(_) => PathBuf::new(),
|
||||
};
|
||||
buf.push(".greptimedb_cli_history");
|
||||
buf
|
||||
}
|
||||
|
||||
async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
let mut meta_client = MetaClientBuilder::default().enable_store().build();
|
||||
meta_client
|
||||
.start([meta_addr])
|
||||
.await
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let backend = Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
});
|
||||
|
||||
let table_routes = Arc::new(TableRoutes::new(meta_client));
|
||||
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
|
||||
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let catalog_list = Arc::new(FrontendCatalogManager::new(
|
||||
backend,
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(catalog_list, Default::default()))
|
||||
}
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOpts;
|
||||
use datanode::datanode::{
|
||||
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
|
||||
};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -54,6 +56,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
@@ -63,6 +67,8 @@ struct StartCommand {
|
||||
data_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -94,6 +100,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
opts.rpc_addr = addr;
|
||||
}
|
||||
|
||||
if cmd.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = cmd.rpc_hostname;
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_addr = addr;
|
||||
}
|
||||
@@ -103,8 +114,8 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(meta_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = meta_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
@@ -121,12 +132,17 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage = ObjectStoreConfig::File { data_dir };
|
||||
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
@@ -134,45 +150,96 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use datanode::datanode::ObjectStoreConfig;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let cmd = StartCommand {
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/datanode.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let options: DatanodeOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(4, options.mysql_runtime_size);
|
||||
let MetaClientOpts {
|
||||
assert_eq!(2, options.mysql_runtime_size);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
|
||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
assert!(!options.wal.sync_write);
|
||||
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = options.meta_client_opts.unwrap();
|
||||
} = options.meta_client_options.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(!tcp_nodelay);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
match options.storage {
|
||||
ObjectStoreConfig::File { data_dir } => {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_purge_tasks: 32,
|
||||
},
|
||||
options.compaction
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -206,32 +273,4 @@ mod tests {
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_config() {
|
||||
let dn_opts = DatanodeOptions::try_from(StartCommand {
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/datanode.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!("/tmp/greptimedb/wal", dn_opts.wal.dir);
|
||||
assert_eq!(Duration::from_secs(600), dn_opts.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, dn_opts.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
|
||||
assert!(!dn_opts.wal.sync_write);
|
||||
assert_eq!(Some(42), dn_opts.node_id);
|
||||
let MetaClientOpts {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = dn_opts.meta_client_opts.unwrap();
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert!(!tcp_nodelay);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use rustyline::error::ReadlineError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -61,6 +62,72 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::auth::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL: {}", source))]
|
||||
ReplCreation {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command: {}", source))]
|
||||
Readline {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client, source: {}", source))]
|
||||
StartMetaClient {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -71,11 +138,24 @@ impl ErrorExt for Error {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::StartMetaClient { source } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
@@ -22,8 +23,8 @@ use frontend::instance::Instance;
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::Plugins;
|
||||
use meta_client::MetaClientOpts;
|
||||
use frontend::prom::PromOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -67,6 +68,8 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
prom_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
opentsdb_addr: Option<String>,
|
||||
@@ -91,10 +94,9 @@ impl StartCommand {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = Instance::try_new_distributed(&opts)
|
||||
let instance = Instance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
instance.set_plugins(plugins.clone());
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
@@ -142,6 +144,9 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr });
|
||||
}
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
@@ -159,8 +164,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
@@ -174,8 +179,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
|
||||
use super::*;
|
||||
@@ -185,6 +192,7 @@ mod tests {
|
||||
let command = StartCommand {
|
||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||
grpc_addr: None,
|
||||
prom_addr: Some("127.0.0.1:4444".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
|
||||
@@ -208,6 +216,7 @@ mod tests {
|
||||
opts.opentsdb_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4321"
|
||||
);
|
||||
assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
|
||||
|
||||
let default_opts = FrontendOptions::default();
|
||||
assert_eq!(
|
||||
@@ -232,17 +241,25 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/frontend.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
@@ -268,6 +285,7 @@ mod tests {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
@@ -287,7 +305,7 @@ mod tests {
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod cli;
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod frontend;
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
@@ -56,6 +56,10 @@ struct StartCommand {
|
||||
store_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
selector: Option<String>,
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -91,6 +95,17 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
if let Some(addr) = cmd.store_addr {
|
||||
opts.store_addr = addr;
|
||||
}
|
||||
if let Some(selector_type) = &cmd.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
info!("Using {} selector", selector_type);
|
||||
}
|
||||
|
||||
if cmd.use_memory_store {
|
||||
warn!("Using memory store for Meta. Make sure you are in running tests.");
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -98,6 +113,11 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -107,28 +127,42 @@ mod tests {
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
store_addr = "127.0.0.1:2379"
|
||||
datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
use_memory_store = false
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let cmd = StartCommand {
|
||||
bind_addr: None,
|
||||
server_addr: None,
|
||||
store_addr: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/metasrv.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
selector: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(15, options.datanode_lease_secs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,8 +15,11 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -25,8 +28,8 @@ use frontend::instance::Instance as FeInstance;
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use frontend::Plugins;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -65,6 +68,8 @@ impl SubCommand {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
@@ -72,15 +77,18 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb_options: Option<OpentsdbOptions>,
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub mode: Mode,
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub compaction: CompactionConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_memory_catalog: false,
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
@@ -88,10 +96,11 @@ impl Default for StandaloneOptions {
|
||||
opentsdb_options: Some(OpentsdbOptions::default()),
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
compaction: CompactionConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -99,6 +108,7 @@ impl Default for StandaloneOptions {
|
||||
impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
FrontendOptions {
|
||||
mode: self.mode,
|
||||
http_options: self.http_options,
|
||||
grpc_options: self.grpc_options,
|
||||
mysql_options: self.mysql_options,
|
||||
@@ -106,16 +116,18 @@ impl StandaloneOptions {
|
||||
opentsdb_options: self.opentsdb_options,
|
||||
influxdb_options: self.influxdb_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
mode: self.mode,
|
||||
meta_client_opts: None,
|
||||
prom_options: self.prom_options,
|
||||
meta_client_options: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
compaction: self.compaction,
|
||||
procedure: self.procedure,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -130,6 +142,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
prom_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
opentsdb_addr: Option<String>,
|
||||
@@ -242,6 +256,11 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr })
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
@@ -290,6 +309,7 @@ mod tests {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: Some(format!(
|
||||
@@ -323,6 +343,10 @@ mod tests {
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
|
||||
assert_eq!(
|
||||
None,
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
}
|
||||
|
||||
@@ -331,6 +355,7 @@ mod tests {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
@@ -350,8 +375,15 @@ mod tests {
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = StandaloneOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,9 +29,9 @@ mod tests {
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Result;
|
||||
@@ -62,7 +62,7 @@ mod tests {
|
||||
host: "greptime.test".to_string(),
|
||||
};
|
||||
|
||||
let dir = TempDir::new("test_from_file").unwrap();
|
||||
let dir = create_temp_dir("test_from_file");
|
||||
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
|
||||
|
||||
let s = toml::to_string(&config).unwrap();
|
||||
|
||||
145
src/cmd/tests/cli.rs
Normal file
145
src/cmd/tests/cli.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use rexpect::session::PtyReplSession;
|
||||
|
||||
struct Repl {
|
||||
repl: PtyReplSession,
|
||||
}
|
||||
|
||||
impl Repl {
|
||||
fn send_line(&mut self, line: &str) {
|
||||
self.repl.send_line(line).unwrap();
|
||||
|
||||
// read a line to consume the prompt
|
||||
self.read_line();
|
||||
}
|
||||
|
||||
fn read_line(&mut self) -> String {
|
||||
self.repl.read_line().unwrap()
|
||||
}
|
||||
|
||||
fn read_expect(&mut self, expect: &str) {
|
||||
assert_eq!(self.read_line(), expect);
|
||||
}
|
||||
|
||||
fn read_contains(&mut self, pat: &str) {
|
||||
assert!(self.read_line().contains(pat));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_dir = create_temp_dir("data");
|
||||
let wal_dir = create_temp_dir("wal");
|
||||
|
||||
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
bin_path.push("../../target/debug");
|
||||
let bin_path = bin_path.to_str().unwrap();
|
||||
|
||||
let mut datanode = Command::new("./greptime")
|
||||
.current_dir(bin_path)
|
||||
.args([
|
||||
"datanode",
|
||||
"start",
|
||||
"--rpc-addr=0.0.0.0:4321",
|
||||
"--node-id=1",
|
||||
&format!("--data-dir={}", data_dir.path().display()),
|
||||
&format!("--wal-dir={}", wal_dir.path().display()),
|
||||
])
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.unwrap();
|
||||
|
||||
// wait for Datanode actually started
|
||||
std::thread::sleep(Duration::from_secs(3));
|
||||
|
||||
let mut repl_cmd = Command::new("./greptime");
|
||||
repl_cmd.current_dir(bin_path).args([
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
"--grpc-addr=0.0.0.0:4321",
|
||||
// history commands can sneaky into stdout and mess up our tests, so disable it
|
||||
"--disable-helper",
|
||||
]);
|
||||
let pty_session = rexpect::session::spawn_command(repl_cmd, Some(5_000)).unwrap();
|
||||
let repl = PtyReplSession {
|
||||
prompt: "> ".to_string(),
|
||||
pty_session,
|
||||
quit_command: None,
|
||||
echo_on: false,
|
||||
};
|
||||
let repl = &mut Repl { repl };
|
||||
repl.read_expect("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
test_create_database(repl);
|
||||
|
||||
test_use_database(repl);
|
||||
|
||||
test_create_table(repl);
|
||||
|
||||
test_insert(repl);
|
||||
|
||||
test_select(repl);
|
||||
|
||||
datanode.kill().unwrap();
|
||||
datanode.wait().unwrap();
|
||||
}
|
||||
|
||||
fn test_create_database(repl: &mut Repl) {
|
||||
repl.send_line("CREATE DATABASE db;");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_use_database(repl: &mut Repl) {
|
||||
repl.send_line("USE db");
|
||||
repl.read_expect("Total Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
repl.read_expect("Using db");
|
||||
}
|
||||
|
||||
fn test_create_table(repl: &mut Repl) {
|
||||
repl.send_line("CREATE TABLE t(x STRING, ts TIMESTAMP TIME INDEX);");
|
||||
repl.read_expect("Affected Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_insert(repl: &mut Repl) {
|
||||
repl.send_line("INSERT INTO t(x, ts) VALUES ('hello', 1676895812239);");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_select(repl: &mut Repl) {
|
||||
repl.send_line("SELECT * FROM t;");
|
||||
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| x | ts |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| hello | 2023-02-20T12:23:32.239 |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("Total Rows: 1");
|
||||
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { path = "../error" }
|
||||
|
||||
@@ -20,6 +20,12 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
|
||||
pub struct Bytes(bytes::Bytes);
|
||||
|
||||
impl From<Bytes> for bytes::Bytes {
|
||||
fn from(value: Bytes) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bytes::Bytes> for Bytes {
|
||||
fn from(bytes: bytes::Bytes) -> Bytes {
|
||||
Bytes(bytes)
|
||||
|
||||
@@ -19,3 +19,5 @@ pub mod bytes;
|
||||
pub mod readable_size;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
pub type Plugins = anymap::Map<dyn core::any::Any + Send + Sync>;
|
||||
|
||||
@@ -16,6 +16,5 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
tempdir = "0.3"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
chrono.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -14,3 +14,9 @@
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
#[inline]
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
@@ -5,5 +5,5 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
strum = "0.24.1"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
strum = { version = "0.24", features = ["std", "derive"] }
|
||||
|
||||
@@ -88,6 +88,45 @@ impl crate::snafu::ErrorCompat for BoxedError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type with plain error message
|
||||
#[derive(Debug)]
|
||||
pub struct PlainError {
|
||||
msg: String,
|
||||
status_code: StatusCode,
|
||||
}
|
||||
|
||||
impl PlainError {
|
||||
pub fn new(msg: String, status_code: StatusCode) -> Self {
|
||||
Self { msg, status_code }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PlainError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for PlainError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::ext::ErrorExt for PlainError {
|
||||
fn status_code(&self) -> crate::status_code::StatusCode {
|
||||
self.status_code
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self as _
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::error::Error;
|
||||
|
||||
@@ -77,6 +77,8 @@ pub enum StatusCode {
|
||||
AuthHeaderNotFound = 7003,
|
||||
/// Invalid http authorization header
|
||||
InvalidAuthHeader = 7004,
|
||||
/// Illegal request to connect catalog-schema
|
||||
AccessDenied = 7005,
|
||||
// ====== End of auth related status code =====
|
||||
}
|
||||
|
||||
@@ -84,6 +86,34 @@ impl StatusCode {
|
||||
pub fn is_success(code: u32) -> bool {
|
||||
Self::Success as u32 == code
|
||||
}
|
||||
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
match self {
|
||||
StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::Internal => true,
|
||||
|
||||
StatusCode::Success
|
||||
| StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
| StatusCode::InvalidAuthHeader
|
||||
| StatusCode::AccessDenied => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StatusCode {
|
||||
|
||||
@@ -10,6 +10,7 @@ proc-macro = true
|
||||
[dependencies]
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
proc-macro2 = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
arc-swap = "1.0"
|
||||
|
||||
@@ -12,8 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod range_fn;
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::{quote, quote_spanned};
|
||||
use range_fn::process_range_fn;
|
||||
use syn::parse::Parser;
|
||||
use syn::spanned::Spanned;
|
||||
use syn::{parse_macro_input, DeriveInput, ItemStruct};
|
||||
@@ -83,3 +86,31 @@ pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStre
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Attribute macro to convert an arithimetic function to a range function. The annotated function
|
||||
/// should accept servaral arrays as input and return a single value as output. This procedure
|
||||
/// macro can works on any number of input parameters. Return type can be either primitive type
|
||||
/// or wrapped in `Option`.
|
||||
///
|
||||
/// # Example
|
||||
/// Take `count_over_time()` in PromQL as an example:
|
||||
/// ```rust, ignore
|
||||
/// /// The count of all values in the specified interval.
|
||||
/// #[range_fn(
|
||||
/// name = "CountOverTime",
|
||||
/// ret = "Float64Array",
|
||||
/// display_name = "prom_count_over_time"
|
||||
/// )]
|
||||
/// pub fn count_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> f64 {
|
||||
/// values.len() as f64
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `name`: The name of the generated [ScalarUDF] struct.
|
||||
/// - `ret`: The return type of the generated UDF function.
|
||||
/// - `display_name`: The display name of the generated UDF function.
|
||||
#[proc_macro_attribute]
|
||||
pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_range_fn(args, input)
|
||||
}
|
||||
|
||||
230
src/common/function-macro/src/range_fn.rs
Normal file
230
src/common/function-macro/src/range_fn.rs
Normal file
@@ -0,0 +1,230 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use proc_macro2::Span;
|
||||
use quote::quote;
|
||||
use syn::punctuated::Punctuated;
|
||||
use syn::spanned::Spanned;
|
||||
use syn::token::Comma;
|
||||
use syn::{
|
||||
parse_macro_input, Attribute, AttributeArgs, FnArg, Ident, ItemFn, Meta, MetaNameValue,
|
||||
NestedMeta, Signature, Type, TypeReference, Visibility,
|
||||
};
|
||||
|
||||
/// Internal util macro to early return on error.
|
||||
macro_rules! ok {
|
||||
($item:expr) => {
|
||||
match $item {
|
||||
Ok(item) => item,
|
||||
Err(e) => return e.into_compile_error().into(),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
// extract arg map
|
||||
let arg_pairs = parse_macro_input!(args as AttributeArgs);
|
||||
let arg_span = arg_pairs[0].span();
|
||||
let arg_map = ok!(extract_arg_map(arg_pairs));
|
||||
|
||||
// decompose the fn block
|
||||
let compute_fn = parse_macro_input!(input as ItemFn);
|
||||
let ItemFn {
|
||||
attrs,
|
||||
vis,
|
||||
sig,
|
||||
block,
|
||||
} = compute_fn;
|
||||
|
||||
// extract fn arg list
|
||||
let Signature {
|
||||
inputs,
|
||||
ident: fn_name,
|
||||
..
|
||||
} = &sig;
|
||||
let arg_types = ok!(extract_input_types(inputs));
|
||||
|
||||
// build the struct and its impl block
|
||||
let struct_code = build_struct(
|
||||
attrs,
|
||||
vis,
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
ok!(get_ident(&arg_map, "display_name", arg_span)),
|
||||
);
|
||||
let calc_fn_code = build_calc_fn(
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
arg_types,
|
||||
fn_name.clone(),
|
||||
ok!(get_ident(&arg_map, "ret", arg_span)),
|
||||
);
|
||||
// preserve this fn, but remove its `pub` modifier
|
||||
let input_fn_code: TokenStream = quote! {
|
||||
#sig { #block }
|
||||
}
|
||||
.into();
|
||||
|
||||
let mut result = TokenStream::new();
|
||||
result.extend(struct_code);
|
||||
result.extend(calc_fn_code);
|
||||
result.extend(input_fn_code);
|
||||
result
|
||||
}
|
||||
|
||||
/// Extract a String <-> Ident map from the attribute args.
|
||||
fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
|
||||
args.into_iter()
|
||||
.map(|meta| {
|
||||
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
|
||||
let name = path.get_ident().unwrap().to_string();
|
||||
let ident = match lit {
|
||||
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
|
||||
_ => Err(syn::Error::new(
|
||||
lit.span(),
|
||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
||||
)),
|
||||
}?;
|
||||
Ok((name, ident))
|
||||
} else {
|
||||
Err(syn::Error::new(
|
||||
meta.span(),
|
||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
|
||||
}
|
||||
|
||||
/// Helper function to get an Ident from the previous arg map.
|
||||
fn get_ident(map: &HashMap<String, Ident>, key: &str, span: Span) -> Result<Ident, syn::Error> {
|
||||
map.get(key)
|
||||
.cloned()
|
||||
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
|
||||
}
|
||||
|
||||
/// Extract the argument list from the annotated function.
|
||||
fn extract_input_types(inputs: &Punctuated<FnArg, Comma>) -> Result<Vec<Type>, syn::Error> {
|
||||
inputs
|
||||
.iter()
|
||||
.map(|arg| match arg {
|
||||
FnArg::Receiver(receiver) => Err(syn::Error::new(receiver.span(), "expected bool")),
|
||||
FnArg::Typed(pat_type) => Ok(*pat_type.ty.clone()),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn build_struct(
|
||||
attrs: Vec<Attribute>,
|
||||
vis: Visibility,
|
||||
name: Ident,
|
||||
display_name_ident: Ident,
|
||||
) -> TokenStream {
|
||||
let display_name = display_name_ident.to_string();
|
||||
quote! {
|
||||
#(#attrs)*
|
||||
#[derive(Debug)]
|
||||
#vis struct #name {}
|
||||
|
||||
impl #name {
|
||||
pub const fn name() -> &'static str {
|
||||
#display_name
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
ScalarUDF {
|
||||
name: Self::name().to_string(),
|
||||
signature: Signature::new(
|
||||
TypeSignature::Exact(Self::input_type()),
|
||||
Volatility::Immutable,
|
||||
),
|
||||
return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
|
||||
fun: Arc::new(Self::calc),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ruihang): this should be parameterized
|
||||
// time index column and value column
|
||||
fn input_type() -> Vec<DataType> {
|
||||
vec![
|
||||
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
|
||||
RangeArray::convert_data_type(DataType::Float64),
|
||||
]
|
||||
}
|
||||
|
||||
// TODO(ruihang): this should be parameterized
|
||||
fn return_type() -> DataType {
|
||||
DataType::Float64
|
||||
}
|
||||
}
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
fn build_calc_fn(
|
||||
name: Ident,
|
||||
param_types: Vec<Type>,
|
||||
fn_name: Ident,
|
||||
ret_type: Ident,
|
||||
) -> TokenStream {
|
||||
let param_names = param_types
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, ty)| Ident::new(&format!("param_{}", i), ty.span()))
|
||||
.collect::<Vec<_>>();
|
||||
let unref_param_types = param_types
|
||||
.iter()
|
||||
.map(|ty| {
|
||||
if let Type::Reference(TypeReference { elem, .. }) = ty {
|
||||
elem.as_ref().clone()
|
||||
} else {
|
||||
ty.clone()
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let num_params = param_types.len();
|
||||
let param_numbers = (0..num_params).collect::<Vec<_>>();
|
||||
let range_array_names = param_names
|
||||
.iter()
|
||||
.map(|name| Ident::new(&format!("{}_range_array", name), name.span()))
|
||||
.collect::<Vec<_>>();
|
||||
let first_range_array_name = range_array_names.first().unwrap().clone();
|
||||
|
||||
quote! {
|
||||
impl #name {
|
||||
fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||
assert_eq!(input.len(), #num_params);
|
||||
|
||||
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.data().clone().into())?; )*
|
||||
|
||||
// TODO(ruihang): add ensure!()
|
||||
|
||||
let mut result_array = Vec::new();
|
||||
for index in 0..#first_range_array_name.len(){
|
||||
#( let #param_names = #range_array_names.get(index).unwrap().as_any().downcast_ref::<#unref_param_types>().unwrap().clone(); )*
|
||||
|
||||
// TODO(ruihang): add ensure!() to check length
|
||||
|
||||
let result = #fn_name(#( &#param_names, )*);
|
||||
result_array.push(result);
|
||||
}
|
||||
|
||||
let result = ColumnarValue::Array(Arc::new(#ret_type::from_iter(result_array)));
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
.into()
|
||||
}
|
||||
@@ -20,6 +20,7 @@ use static_assertions::{assert_fields, assert_impl_all};
|
||||
struct Foo {}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::extra_unused_type_parameters)]
|
||||
fn test_derive() {
|
||||
Foo::default();
|
||||
assert_fields!(Foo: input_types);
|
||||
|
||||
@@ -19,7 +19,7 @@ num-traits = "0.2"
|
||||
once_cell = "1.10"
|
||||
paste = "1.0"
|
||||
snafu.workspace = true
|
||||
statrs = "0.15"
|
||||
statrs = "0.16"
|
||||
|
||||
[dev-dependencies]
|
||||
ron = "0.7"
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{ExecuteFunctionSnafu, FromScalarValueSnafu};
|
||||
use common_query::error::FromScalarValueSnafu;
|
||||
use common_query::prelude::{
|
||||
ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf, ScalarValue,
|
||||
ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf,
|
||||
};
|
||||
use datatypes::error::Error as DataTypeError;
|
||||
use datatypes::prelude::*;
|
||||
@@ -54,16 +54,8 @@ pub fn create_udf(func: FunctionRef) -> ScalarUdf {
|
||||
.collect();
|
||||
|
||||
let result = func_cloned.eval(func_ctx, &args.context(FromScalarValueSnafu)?);
|
||||
|
||||
let udf = if len.is_some() {
|
||||
result.map(ColumnarValue::Vector)?
|
||||
} else {
|
||||
ScalarValue::try_from_array(&result?.to_arrow_array(), 0)
|
||||
.map(ColumnarValue::Scalar)
|
||||
.context(ExecuteFunctionSnafu)?
|
||||
};
|
||||
|
||||
Ok(udf)
|
||||
let udf_result = result.map(ColumnarValue::Vector)?;
|
||||
Ok(udf_result)
|
||||
});
|
||||
|
||||
ScalarUdf::new(func.name(), &func.signature(), &return_type, &fun)
|
||||
|
||||
@@ -12,33 +12,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropColumns, RenameTable};
|
||||
use api::v1::{column_def, AlterExpr, CreateTableExpr, DropColumns, RenameTable};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest, TableOptions,
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
ColumnNotFoundSnafu, CreateSchemaSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result,
|
||||
ColumnNotFoundSnafu, InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu,
|
||||
Result, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
|
||||
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
let catalog_name = if expr.catalog_name.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(expr.catalog_name)
|
||||
};
|
||||
let schema_name = if expr.schema_name.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(expr.schema_name)
|
||||
};
|
||||
let catalog_name = expr.catalog_name;
|
||||
let schema_name = expr.schema_name;
|
||||
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
|
||||
match kind {
|
||||
Kind::AddColumns(add_columns) => {
|
||||
@@ -50,12 +42,11 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
field: "column_def",
|
||||
})?;
|
||||
|
||||
let schema =
|
||||
column_def
|
||||
.try_as_column_schema()
|
||||
.context(InvalidColumnDefSnafu {
|
||||
column: &column_def.name,
|
||||
})?;
|
||||
let schema = column_def::try_as_column_schema(&column_def).context(
|
||||
InvalidColumnDefSnafu {
|
||||
column: &column_def.name,
|
||||
},
|
||||
)?;
|
||||
Ok(AddColumnRequest {
|
||||
column_schema: schema,
|
||||
is_key: ac.is_key,
|
||||
@@ -101,13 +92,12 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
|
||||
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
|
||||
let column_schemas = expr
|
||||
.column_defs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.try_as_column_schema()
|
||||
.context(InvalidColumnDefSnafu { column: &x.name })
|
||||
column_def::try_as_column_schema(x).context(InvalidColumnDefSnafu { column: &x.name })
|
||||
})
|
||||
.collect::<Result<Vec<ColumnSchema>>>()?;
|
||||
|
||||
@@ -131,12 +121,7 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.context(CreateSchemaSnafu)?
|
||||
.build()
|
||||
.context(CreateSchemaSnafu)?,
|
||||
))
|
||||
Ok(RawSchema::new(column_schemas))
|
||||
}
|
||||
|
||||
pub fn create_expr_to_request(
|
||||
@@ -148,8 +133,11 @@ pub fn create_expr_to_request(
|
||||
.primary_keys
|
||||
.iter()
|
||||
.map(|key| {
|
||||
// We do a linear search here.
|
||||
schema
|
||||
.column_index_by_name(key)
|
||||
.column_schemas
|
||||
.iter()
|
||||
.position(|column_schema| column_schema.name == *key)
|
||||
.context(ColumnNotFoundSnafu {
|
||||
column_name: key,
|
||||
table_name: &expr.table_name,
|
||||
@@ -177,6 +165,8 @@ pub fn create_expr_to_request(
|
||||
expr.region_ids
|
||||
};
|
||||
|
||||
let table_options =
|
||||
TableOptions::try_from(&expr.table_options).context(UnrecognizedTableOptionSnafu)?;
|
||||
Ok(CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name,
|
||||
@@ -187,7 +177,7 @@ pub fn create_expr_to_request(
|
||||
region_numbers: region_ids,
|
||||
primary_key_indices,
|
||||
create_if_not_exists: expr.create_if_not_exists,
|
||||
table_options: expr.table_options,
|
||||
table_options,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -219,8 +209,8 @@ mod tests {
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap();
|
||||
assert_eq!(None, alter_request.catalog_name);
|
||||
assert_eq!(None, alter_request.schema_name);
|
||||
assert_eq!(alter_request.catalog_name, "");
|
||||
assert_eq!(alter_request.schema_name, "");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
let add_column = match alter_request.alter_kind {
|
||||
AlterKind::AddColumns { mut columns } => columns.pop().unwrap(),
|
||||
@@ -250,8 +240,8 @@ mod tests {
|
||||
};
|
||||
|
||||
let alter_request = alter_expr_to_request(expr).unwrap();
|
||||
assert_eq!(Some("test_catalog".to_string()), alter_request.catalog_name);
|
||||
assert_eq!(Some("test_schema".to_string()), alter_request.schema_name);
|
||||
assert_eq!(alter_request.catalog_name, "test_catalog");
|
||||
assert_eq!(alter_request.schema_name, "test_schema");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
|
||||
let mut drop_names = match alter_request.alter_kind {
|
||||
|
||||
@@ -40,12 +40,6 @@ pub enum Error {
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create schema when creating table, source: {}", source))]
|
||||
CreateSchema {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Duplicated timestamp column in gRPC requests, exists {}, duplicated: {}",
|
||||
exists,
|
||||
@@ -90,6 +84,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -102,14 +102,15 @@ impl ErrorExt for Error {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ColumnDataType { .. } => StatusCode::Internal,
|
||||
Error::CreateSchema { .. }
|
||||
| Error::DuplicatedTimestampColumn { .. }
|
||||
| Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
|
||||
Error::DuplicatedTimestampColumn { .. } | Error::MissingTimestampColumn { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
|
||||
Error::CreateVector { .. } => StatusCode::InvalidArguments,
|
||||
Error::MissingField { .. } => StatusCode::InvalidArguments,
|
||||
Error::ColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
|
||||
@@ -21,17 +21,17 @@ use api::v1::{
|
||||
InsertRequest as GrpcInsertRequest,
|
||||
};
|
||||
use common_base::BitVec;
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use common_time::{Date, DateTime};
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::{ValueRef, VectorRef};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::MutableVector;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, InsertRequest};
|
||||
use table::requests::InsertRequest;
|
||||
|
||||
use crate::error::{
|
||||
ColumnDataTypeSnafu, CreateVectorSnafu, DuplicatedTimestampColumnSnafu, IllegalInsertDataSnafu,
|
||||
@@ -81,20 +81,6 @@ pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result<Option
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a alter table rqeusts that adding new columns.
|
||||
#[inline]
|
||||
pub fn build_alter_table_request(
|
||||
table_name: &str,
|
||||
columns: Vec<AddColumnRequest>,
|
||||
) -> AlterTableRequest {
|
||||
AlterTableRequest {
|
||||
catalog_name: None,
|
||||
schema_name: None,
|
||||
table_name: table_name.to_string(),
|
||||
alter_kind: AlterKind::AddColumns { columns },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
|
||||
let wrapper = ColumnDataTypeWrapper::try_new(column.datatype).context(ColumnDataTypeSnafu)?;
|
||||
let column_datatype = wrapper.datatype();
|
||||
@@ -111,9 +97,7 @@ pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
|
||||
|
||||
for i in 0..rows {
|
||||
if let Some(true) = nulls_iter.next() {
|
||||
vector
|
||||
.push_value_ref(ValueRef::Null)
|
||||
.context(CreateVectorSnafu)?;
|
||||
vector.push_null();
|
||||
} else {
|
||||
let value_ref = values_iter
|
||||
.next()
|
||||
@@ -124,16 +108,12 @@ pub fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
|
||||
),
|
||||
})?;
|
||||
vector
|
||||
.push_value_ref(value_ref)
|
||||
.try_push_value_ref(value_ref)
|
||||
.context(CreateVectorSnafu)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(0..rows).try_for_each(|_| {
|
||||
vector
|
||||
.push_value_ref(ValueRef::Null)
|
||||
.context(CreateVectorSnafu)
|
||||
})?;
|
||||
(0..rows).for_each(|_| vector.push_null());
|
||||
}
|
||||
Ok(vector.to_vector())
|
||||
}
|
||||
@@ -281,9 +261,11 @@ pub fn build_create_expr_from_insertion(
|
||||
Ok(expr)
|
||||
}
|
||||
|
||||
pub fn to_table_insert_request(request: GrpcInsertRequest) -> Result<InsertRequest> {
|
||||
let catalog_name = DEFAULT_CATALOG_NAME;
|
||||
let schema_name = &request.schema_name;
|
||||
pub fn to_table_insert_request(
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
request: GrpcInsertRequest,
|
||||
) -> Result<InsertRequest> {
|
||||
let table_name = &request.table_name;
|
||||
let row_count = request.row_count as usize;
|
||||
|
||||
@@ -319,6 +301,7 @@ pub fn to_table_insert_request(request: GrpcInsertRequest) -> Result<InsertReque
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
columns_values,
|
||||
region_number: request.region_number,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -336,7 +319,7 @@ fn add_values_to_builder(
|
||||
|
||||
values.iter().try_for_each(|value| {
|
||||
builder
|
||||
.push_value_ref(value.as_value_ref())
|
||||
.try_push_value_ref(value.as_value_ref())
|
||||
.context(CreateVectorSnafu)
|
||||
})?;
|
||||
} else {
|
||||
@@ -349,12 +332,10 @@ fn add_values_to_builder(
|
||||
let mut idx_of_values = 0;
|
||||
for idx in 0..row_count {
|
||||
match is_null(&null_mask, idx) {
|
||||
Some(true) => builder
|
||||
.push_value_ref(ValueRef::Null)
|
||||
.context(CreateVectorSnafu)?,
|
||||
Some(true) => builder.push_null(),
|
||||
_ => {
|
||||
builder
|
||||
.push_value_ref(values[idx_of_values].as_value_ref())
|
||||
.try_push_value_ref(values[idx_of_values].as_value_ref())
|
||||
.context(CreateVectorSnafu)?;
|
||||
idx_of_values += 1
|
||||
}
|
||||
@@ -434,13 +415,29 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
.into_iter()
|
||||
.map(|v| Value::Date(v.into()))
|
||||
.collect(),
|
||||
ConcreteDataType::Timestamp(_) => values
|
||||
ConcreteDataType::Timestamp(TimestampType::Second(_)) => values
|
||||
.ts_second_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_second(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => values
|
||||
.ts_millisecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) => unreachable!(),
|
||||
ConcreteDataType::List(_) => unreachable!(),
|
||||
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => values
|
||||
.ts_microsecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_microsecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => values
|
||||
.ts_nanosecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,6 +449,7 @@ fn is_null(null_mask: &BitVec, idx: usize) -> Option<bool> {
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::{assert_eq, unimplemented, vec};
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column::{self, SemanticType, Values};
|
||||
@@ -462,6 +460,7 @@ mod tests {
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::types::{TimestampMillisecondType, TimestampSecondType, TimestampType};
|
||||
use datatypes::value::Value;
|
||||
use snafu::ResultExt;
|
||||
use table::error::Result as TableResult;
|
||||
@@ -617,13 +616,12 @@ mod tests {
|
||||
fn test_to_table_insert_request() {
|
||||
let (columns, row_count) = mock_insert_batch();
|
||||
let request = GrpcInsertRequest {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
region_number: 0,
|
||||
};
|
||||
let insert_req = to_table_insert_request(request).unwrap();
|
||||
let insert_req = to_table_insert_request("greptime", "public", request).unwrap();
|
||||
|
||||
assert_eq!("greptime", insert_req.catalog_name);
|
||||
assert_eq!("public", insert_req.schema_name);
|
||||
@@ -666,6 +664,39 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_timestamp_values() {
|
||||
// second
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Timestamp(TimestampType::Second(TimestampSecondType)),
|
||||
Values {
|
||||
ts_second_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Timestamp(Timestamp::new_second(1_i64)),
|
||||
Value::Timestamp(Timestamp::new_second(2_i64)),
|
||||
Value::Timestamp(Timestamp::new_second(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// millisecond
|
||||
let actual = convert_values(
|
||||
&ConcreteDataType::Timestamp(TimestampType::Millisecond(TimestampMillisecondType)),
|
||||
Values {
|
||||
ts_millisecond_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Timestamp(Timestamp::new_millisecond(1_i64)),
|
||||
Value::Timestamp(Timestamp::new_millisecond(2_i64)),
|
||||
Value::Timestamp(Timestamp::new_millisecond(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_null() {
|
||||
let null_mask = BitVec::from_slice(&[0b0000_0001, 0b0000_1000]);
|
||||
|
||||
@@ -17,6 +17,4 @@ pub mod error;
|
||||
pub mod insert;
|
||||
|
||||
pub use alter::{alter_expr_to_request, create_expr_to_request, create_table_schema};
|
||||
pub use insert::{
|
||||
build_alter_table_request, build_create_expr_from_insertion, column_to_vector, find_new_columns,
|
||||
};
|
||||
pub use insert::{build_create_expr_from_insertion, column_to_vector, find_new_columns};
|
||||
|
||||
@@ -16,7 +16,7 @@ common-runtime = { path = "../runtime" }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
flatbuffers = "22"
|
||||
flatbuffers = "23.1"
|
||||
futures = "0.3"
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -18,18 +18,20 @@ use std::time::Duration;
|
||||
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
use snafu::ResultExt;
|
||||
use tonic::transport::{Channel as InnerChannel, Endpoint, Uri};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::transport::{
|
||||
Certificate, Channel as InnerChannel, ClientTlsConfig, Endpoint, Identity, Uri,
|
||||
};
|
||||
use tower::make::MakeConnection;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsConfigSnafu, Result};
|
||||
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ChannelManager {
|
||||
config: ChannelConfig,
|
||||
client_tls_config: Option<ClientTlsConfig>,
|
||||
pool: Arc<Pool>,
|
||||
}
|
||||
|
||||
@@ -52,7 +54,37 @@ impl ChannelManager {
|
||||
recycle_channel_in_loop(cloned_pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
|
||||
Self { config, pool }
|
||||
Self {
|
||||
config,
|
||||
client_tls_config: None,
|
||||
pool,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_tls_config(config: ChannelConfig) -> Result<Self> {
|
||||
let mut cm = Self::with_config(config.clone());
|
||||
|
||||
// setup tls
|
||||
let path_config = config.client_tls.context(InvalidTlsConfigSnafu {
|
||||
msg: "no config input",
|
||||
})?;
|
||||
|
||||
let server_root_ca_cert = std::fs::read_to_string(path_config.server_ca_cert_path)
|
||||
.context(InvalidConfigFilePathSnafu)?;
|
||||
let server_root_ca_cert = Certificate::from_pem(server_root_ca_cert);
|
||||
let client_cert = std::fs::read_to_string(path_config.client_cert_path)
|
||||
.context(InvalidConfigFilePathSnafu)?;
|
||||
let client_key = std::fs::read_to_string(path_config.client_key_path)
|
||||
.context(InvalidConfigFilePathSnafu)?;
|
||||
let client_identity = Identity::from_pem(client_cert, client_key);
|
||||
|
||||
cm.client_tls_config = Some(
|
||||
ClientTlsConfig::new()
|
||||
.ca_certificate(server_root_ca_cert)
|
||||
.identity(client_identity),
|
||||
);
|
||||
|
||||
Ok(cm)
|
||||
}
|
||||
|
||||
pub fn config(&self) -> &ChannelConfig {
|
||||
@@ -119,8 +151,7 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
|
||||
let mut endpoint =
|
||||
Endpoint::new(format!("http://{addr}")).context(error::CreateChannelSnafu)?;
|
||||
let mut endpoint = Endpoint::new(format!("http://{addr}")).context(CreateChannelSnafu)?;
|
||||
|
||||
if let Some(dur) = self.config.timeout {
|
||||
endpoint = endpoint.timeout(dur);
|
||||
@@ -152,6 +183,12 @@ impl ChannelManager {
|
||||
if let Some(enabled) = self.config.http2_adaptive_window {
|
||||
endpoint = endpoint.http2_adaptive_window(enabled);
|
||||
}
|
||||
if let Some(tls_config) = &self.client_tls_config {
|
||||
endpoint = endpoint
|
||||
.tls_config(tls_config.clone())
|
||||
.context(CreateChannelSnafu)?;
|
||||
}
|
||||
|
||||
endpoint = endpoint
|
||||
.tcp_keepalive(self.config.tcp_keepalive)
|
||||
.tcp_nodelay(self.config.tcp_nodelay);
|
||||
@@ -160,6 +197,13 @@ impl ChannelManager {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ClientTlsOption {
|
||||
pub server_ca_cert_path: String,
|
||||
pub client_cert_path: String,
|
||||
pub client_key_path: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ChannelConfig {
|
||||
pub timeout: Option<Duration>,
|
||||
@@ -174,6 +218,7 @@ pub struct ChannelConfig {
|
||||
pub http2_adaptive_window: Option<bool>,
|
||||
pub tcp_keepalive: Option<Duration>,
|
||||
pub tcp_nodelay: bool,
|
||||
pub client_tls: Option<ClientTlsOption>,
|
||||
}
|
||||
|
||||
impl Default for ChannelConfig {
|
||||
@@ -191,6 +236,7 @@ impl Default for ChannelConfig {
|
||||
http2_adaptive_window: None,
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
client_tls: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -307,6 +353,16 @@ impl ChannelConfig {
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the value of tls client auth.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn client_tls_config(self, client_tls_option: ClientTlsOption) -> Self {
|
||||
Self {
|
||||
client_tls: Some(client_tls_option),
|
||||
..self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -401,7 +457,11 @@ mod tests {
|
||||
async fn test_access_count() {
|
||||
let pool = Arc::new(Pool::default());
|
||||
let config = ChannelConfig::new();
|
||||
let mgr = Arc::new(ChannelManager { pool, config });
|
||||
let mgr = Arc::new(ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
});
|
||||
let addr = "test_uri";
|
||||
|
||||
let mut joins = Vec::with_capacity(10);
|
||||
@@ -443,6 +503,7 @@ mod tests {
|
||||
http2_adaptive_window: None,
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
client_tls: None,
|
||||
},
|
||||
default_cfg
|
||||
);
|
||||
@@ -459,7 +520,12 @@ mod tests {
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.http2_adaptive_window(true)
|
||||
.tcp_keepalive(Duration::from_secs(2))
|
||||
.tcp_nodelay(false);
|
||||
.tcp_nodelay(false)
|
||||
.client_tls_config(ClientTlsOption {
|
||||
server_ca_cert_path: "some_server_path".to_string(),
|
||||
client_cert_path: "some_cert_path".to_string(),
|
||||
client_key_path: "some_key_path".to_string(),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ChannelConfig {
|
||||
@@ -475,6 +541,11 @@ mod tests {
|
||||
http2_adaptive_window: Some(true),
|
||||
tcp_keepalive: Some(Duration::from_secs(2)),
|
||||
tcp_nodelay: false,
|
||||
client_tls: Some(ClientTlsOption {
|
||||
server_ca_cert_path: "some_server_path".to_string(),
|
||||
client_cert_path: "some_cert_path".to_string(),
|
||||
client_key_path: "some_key_path".to_string(),
|
||||
}),
|
||||
},
|
||||
cfg
|
||||
);
|
||||
@@ -496,7 +567,11 @@ mod tests {
|
||||
.http2_adaptive_window(true)
|
||||
.tcp_keepalive(Duration::from_secs(2))
|
||||
.tcp_nodelay(true);
|
||||
let mgr = ChannelManager { pool, config };
|
||||
let mgr = ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
};
|
||||
|
||||
let res = mgr.build_endpoint("test_addr");
|
||||
|
||||
@@ -512,7 +587,11 @@ mod tests {
|
||||
let pool = Arc::new(pool);
|
||||
|
||||
let config = ChannelConfig::new();
|
||||
let mgr = ChannelManager { pool, config };
|
||||
let mgr = ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
};
|
||||
|
||||
let addr = "test_addr";
|
||||
let res = mgr.get(addr);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::io;
|
||||
|
||||
use common_error::prelude::{ErrorExt, StatusCode};
|
||||
use snafu::{Backtrace, ErrorCompat, Snafu};
|
||||
@@ -22,6 +23,15 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid client tls config, {}", msg))]
|
||||
InvalidTlsConfig { msg: String },
|
||||
|
||||
#[snafu(display("Invalid config file path, {}", source))]
|
||||
InvalidConfigFilePath {
|
||||
source: io::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, backtrace: Backtrace },
|
||||
|
||||
@@ -81,7 +91,9 @@ pub enum Error {
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::MissingField { .. }
|
||||
Error::InvalidTlsConfig { .. }
|
||||
| Error::InvalidConfigFilePath { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::TypeMismatch { .. }
|
||||
| Error::InvalidFlightData { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::FlightDataExt;
|
||||
use arrow_flight::utils::{flight_data_from_arrow_batch, flight_data_to_arrow_batch};
|
||||
use api::v1::{AffectedRows, FlightMetadata};
|
||||
use arrow_flight::utils::flight_data_to_arrow_batch;
|
||||
use arrow_flight::{FlightData, IpcMessage, SchemaAsIpc};
|
||||
use common_base::bytes::Bytes;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::datatypes::Schema as ArrowSchema;
|
||||
@@ -39,38 +40,58 @@ pub enum FlightMessage {
|
||||
AffectedRows(usize),
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FlightEncoder {
|
||||
write_options: writer::IpcWriteOptions,
|
||||
data_gen: writer::IpcDataGenerator,
|
||||
dictionary_tracker: writer::DictionaryTracker,
|
||||
}
|
||||
|
||||
impl Default for FlightEncoder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
write_options: writer::IpcWriteOptions::default(),
|
||||
data_gen: writer::IpcDataGenerator::default(),
|
||||
dictionary_tracker: writer::DictionaryTracker::new(false),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FlightEncoder {
|
||||
pub fn encode(&self, flight_message: FlightMessage) -> FlightData {
|
||||
pub fn encode(&mut self, flight_message: FlightMessage) -> FlightData {
|
||||
match flight_message {
|
||||
FlightMessage::Schema(schema) => {
|
||||
SchemaAsIpc::new(schema.arrow_schema(), &self.write_options).into()
|
||||
}
|
||||
FlightMessage::Recordbatch(recordbatch) => {
|
||||
let (flight_dictionaries, flight_batch) = flight_data_from_arrow_batch(
|
||||
recordbatch.df_record_batch(),
|
||||
&self.write_options,
|
||||
);
|
||||
let (encoded_dictionaries, encoded_batch) = self
|
||||
.data_gen
|
||||
.encoded_batch(
|
||||
recordbatch.df_record_batch(),
|
||||
&mut self.dictionary_tracker,
|
||||
&self.write_options,
|
||||
)
|
||||
.expect("DictionaryTracker configured above to not fail on replacement");
|
||||
|
||||
// TODO(LFC): Handle dictionary as FlightData here, when we supported Arrow's Dictionary DataType.
|
||||
// Currently we don't have a datatype corresponding to Arrow's Dictionary DataType,
|
||||
// so there won't be any "dictionaries" here. Assert to be sure about it, and
|
||||
// perform a "testing guard" in case we forgot to handle the possible "dictionaries"
|
||||
// here in the future.
|
||||
debug_assert_eq!(flight_dictionaries.len(), 0);
|
||||
debug_assert_eq!(encoded_dictionaries.len(), 0);
|
||||
|
||||
flight_batch
|
||||
encoded_batch.into()
|
||||
}
|
||||
FlightMessage::AffectedRows(rows) => {
|
||||
let ext_data = FlightDataExt {
|
||||
affected_rows: rows as _,
|
||||
let metadata = FlightMetadata {
|
||||
affected_rows: Some(AffectedRows { value: rows as _ }),
|
||||
}
|
||||
.encode_to_vec();
|
||||
FlightData::new(None, IpcMessage(build_none_flight_msg()), vec![], ext_data)
|
||||
FlightData::new(
|
||||
None,
|
||||
IpcMessage(build_none_flight_msg().into()),
|
||||
metadata,
|
||||
vec![],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,7 +104,8 @@ pub struct FlightDecoder {
|
||||
|
||||
impl FlightDecoder {
|
||||
pub fn try_decode(&mut self, flight_data: FlightData) -> Result<FlightMessage> {
|
||||
let message = root_as_message(flight_data.data_header.as_slice()).map_err(|e| {
|
||||
let bytes = flight_data.data_header.slice(..);
|
||||
let message = root_as_message(&bytes).map_err(|e| {
|
||||
InvalidFlightDataSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
@@ -91,9 +113,15 @@ impl FlightDecoder {
|
||||
})?;
|
||||
match message.header_type() {
|
||||
MessageHeader::NONE => {
|
||||
let ext_data = FlightDataExt::decode(flight_data.data_body.as_slice())
|
||||
let metadata = FlightMetadata::decode(flight_data.app_metadata)
|
||||
.context(DecodeFlightDataSnafu)?;
|
||||
Ok(FlightMessage::AffectedRows(ext_data.affected_rows as _))
|
||||
if let Some(AffectedRows { value }) = metadata.affected_rows {
|
||||
return Ok(FlightMessage::AffectedRows(value as _));
|
||||
}
|
||||
InvalidFlightDataSnafu {
|
||||
reason: "Expecting FlightMetadata have some meaningful content.",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
MessageHeader::Schema => {
|
||||
let arrow_schema = ArrowSchema::try_from(&flight_data).map_err(|e| {
|
||||
@@ -170,7 +198,7 @@ pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<
|
||||
}
|
||||
}
|
||||
|
||||
fn build_none_flight_msg() -> Vec<u8> {
|
||||
fn build_none_flight_msg() -> Bytes {
|
||||
let mut builder = FlatBufferBuilder::new();
|
||||
|
||||
let mut message = arrow::ipc::MessageBuilder::new(&mut builder);
|
||||
@@ -181,7 +209,7 @@ fn build_none_flight_msg() -> Vec<u8> {
|
||||
let data = message.finish();
|
||||
builder.finish(data, None);
|
||||
|
||||
builder.finished_data().to_vec()
|
||||
builder.finished_data().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -67,7 +67,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
|
||||
return Ok(vals);
|
||||
},
|
||||
)+
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user