mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-03 20:02:54 +00:00
Compare commits
116 Commits
v0.1.0-alp
...
v0.1.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1a9f84c7f | ||
|
|
be897efd01 | ||
|
|
c06e04afbb | ||
|
|
e77a7f253c | ||
|
|
7d6f4cd88b | ||
|
|
83ac6598b6 | ||
|
|
4c925e0079 | ||
|
|
c6128ec0a4 | ||
|
|
7c34b009ec | ||
|
|
70edd4d55b | ||
|
|
6beea73590 | ||
|
|
c0d3533d10 | ||
|
|
9989a8c192 | ||
|
|
19dd8b1246 | ||
|
|
1e9918ddf9 | ||
|
|
4ce62f850b | ||
|
|
83d57f9111 | ||
|
|
803b7f0633 | ||
|
|
37ca5ba380 | ||
|
|
c1d32bdf2b | ||
|
|
83509f31f4 | ||
|
|
926022e14c | ||
|
|
2f2609d8c6 | ||
|
|
ecadbc1435 | ||
|
|
afac885c10 | ||
|
|
5d62e193bd | ||
|
|
7d77913e88 | ||
|
|
3f45a0d337 | ||
|
|
a1e97c990f | ||
|
|
4ae63b7089 | ||
|
|
b0925d94ed | ||
|
|
fc9276c79d | ||
|
|
184ca78a4d | ||
|
|
ebbf1e43b5 | ||
|
|
54fe81dad9 | ||
|
|
af935671b2 | ||
|
|
74adb077bc | ||
|
|
54c7a8be02 | ||
|
|
ea5146762a | ||
|
|
788b5362a1 | ||
|
|
028a69e349 | ||
|
|
9a30ba00c4 | ||
|
|
8149932bad | ||
|
|
89e4084af4 | ||
|
|
39df25a8f6 | ||
|
|
b2ad0e972b | ||
|
|
18e6740ac9 | ||
|
|
a7dc86ffe5 | ||
|
|
71482b38d7 | ||
|
|
dc9b5339bf | ||
|
|
5e05c8f884 | ||
|
|
aafc26c788 | ||
|
|
64243e3a7d | ||
|
|
36a13dafb7 | ||
|
|
637837ae44 | ||
|
|
ae8afd3711 | ||
|
|
3db8f95169 | ||
|
|
43aefc5d74 | ||
|
|
b33937f48e | ||
|
|
9bc4c0d9c7 | ||
|
|
302d7ec41b | ||
|
|
cc46194f29 | ||
|
|
5dfc24e4f6 | ||
|
|
4987136850 | ||
|
|
6960739b3d | ||
|
|
49d83abc0c | ||
|
|
ecb71f81be | ||
|
|
6f5639fccd | ||
|
|
1e9d09099e | ||
|
|
daad38360f | ||
|
|
bae0243959 | ||
|
|
d162fbb598 | ||
|
|
0959c1d16b | ||
|
|
e428a84446 | ||
|
|
58c37f588d | ||
|
|
d195a22f40 | ||
|
|
6775c5be87 | ||
|
|
5e89f1ba4e | ||
|
|
2664436194 | ||
|
|
b91c77b862 | ||
|
|
4015dd8075 | ||
|
|
b39dbcbda9 | ||
|
|
0e8411c2ff | ||
|
|
a9b42b436d | ||
|
|
9428e70971 | ||
|
|
32d51947a4 | ||
|
|
5fb417ec7c | ||
|
|
90fcaa8487 | ||
|
|
c609b193a1 | ||
|
|
1305924423 | ||
|
|
ea753b9ac0 | ||
|
|
72f05a3137 | ||
|
|
9e58311ecd | ||
|
|
2679faf911 | ||
|
|
fa54870197 | ||
|
|
3988770266 | ||
|
|
777a3182c5 | ||
|
|
5b675f54a8 | ||
|
|
627d444723 | ||
|
|
d1730a9577 | ||
|
|
ca7ed67dc5 | ||
|
|
072e5f78b4 | ||
|
|
8f5ecefc90 | ||
|
|
afd9866709 | ||
|
|
89d5306740 | ||
|
|
50cc0e9b51 | ||
|
|
7762873842 | ||
|
|
4aa24f0639 | ||
|
|
f1b95e25a1 | ||
|
|
041cd422a1 | ||
|
|
f907a93b97 | ||
|
|
a6eb213adf | ||
|
|
5fcad7a175 | ||
|
|
0566f812d3 | ||
|
|
334fd26bc5 | ||
|
|
8ffc078f88 |
@@ -1,2 +1,5 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
@@ -2,3 +2,9 @@
|
||||
GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
GT_OSS_ACCESS_KEY=OSS access key
|
||||
GT_OSS_ENDPOINT=OSS endpoint
|
||||
|
||||
70
.github/workflows/coverage.yml
vendored
70
.github/workflows/coverage.yml
vendored
@@ -1,70 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Code coverage
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
78
.github/workflows/develop.yml
vendored
78
.github/workflows/develop.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
@@ -110,6 +111,41 @@ jobs:
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness && ls /tmp
|
||||
- name: Upload sqlness logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -147,3 +183,45 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
|
||||
55
.github/workflows/docs.yml
vendored
Normal file
55
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
|
||||
# To pass the required status check, see:
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
39
.github/workflows/release.yml
vendored
39
.github/workflows/release.yml
vendored
@@ -18,6 +18,8 @@ env:
|
||||
# In the future, we can change SCHEDULED_PERIOD to nightly.
|
||||
SCHEDULED_PERIOD: weekly
|
||||
|
||||
CARGO_PROFILE: weekly
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build binary
|
||||
@@ -26,10 +28,10 @@ jobs:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-latest-16-cores
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-latest-16-cores
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
@@ -38,6 +40,7 @@ jobs:
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
runs-on: ${{ matrix.os }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -67,6 +70,25 @@ jobs:
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -82,13 +104,16 @@ jobs:
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
|
||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/release
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
@@ -97,17 +122,18 @@ jobs:
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.tgz
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
release:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -150,6 +176,7 @@ jobs:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
@@ -5,11 +5,11 @@ repos:
|
||||
- id: conventional-pre-commit
|
||||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/DevinR528/cargo-sort
|
||||
rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
|
||||
hooks:
|
||||
- id: cargo-sort
|
||||
args: ["--workspace"]
|
||||
# - repo: https://github.com/DevinR528/cargo-sort
|
||||
# rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
|
||||
# hooks:
|
||||
# - id: cargo-sort
|
||||
# args: ["--workspace"]
|
||||
|
||||
- repo: https://github.com/doublify/pre-commit-rust
|
||||
rev: v1.0
|
||||
|
||||
1241
Cargo.lock
generated
1241
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@@ -12,6 +12,7 @@ members = [
|
||||
"src/common/function-macro",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/procedure",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -26,6 +27,7 @@ members = [
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/object-store",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
"src/script",
|
||||
@@ -50,6 +52,7 @@ arrow-flight = "29.0"
|
||||
arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
@@ -58,12 +61,25 @@ datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git",
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "29.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.28"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
tonic = "0.8"
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
[profile.weekly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
2
Makefile
2
Makefile
@@ -35,7 +35,7 @@ integration-test: ## Run integation test.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo run --bin sqlness-runner
|
||||
cargo sqlness
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
|
||||
@@ -153,6 +153,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Java
|
||||
@@ -169,7 +172,7 @@ For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam
|
||||
|
||||
## Community
|
||||
|
||||
Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
|
||||
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||
community, please check out:
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -32,7 +32,6 @@ use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const DATABASE_NAME: &str = "greptime";
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
@@ -100,7 +99,6 @@ async fn write_data(
|
||||
let record_batch = record_batch.unwrap();
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
@@ -424,7 +422,7 @@ fn main() {
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DATABASE_NAME, client);
|
||||
let db = Database::with_client(client);
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
|
||||
@@ -1,12 +1,20 @@
|
||||
node_id = 42
|
||||
mode = 'distributed'
|
||||
rpc_addr = '127.0.0.1:3001'
|
||||
wal_dir = '/tmp/greptimedb/wal'
|
||||
rpc_hostname = '127.0.0.1'
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = '127.0.0.1:4406'
|
||||
mysql_runtime_size = 4
|
||||
enable_memory_catalog = false
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = '1GB'
|
||||
purge_interval = '10m'
|
||||
purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
@@ -2,3 +2,5 @@ bind_addr = '127.0.0.1:3002'
|
||||
server_addr = '127.0.0.1:3002'
|
||||
store_addr = '127.0.0.1:2379'
|
||||
datanode_lease_secs = 15
|
||||
# selector: 'LeaseBased', 'LoadBased'
|
||||
selector = 'LeaseBased'
|
||||
|
||||
@@ -1,12 +1,20 @@
|
||||
node_id = 0
|
||||
mode = 'standalone'
|
||||
wal_dir = '/tmp/greptimedb/wal/'
|
||||
enable_memory_catalog = false
|
||||
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
timeout = "30s"
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = '1GB'
|
||||
purge_interval = '10m'
|
||||
purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
151
docs/rfcs/2023-01-03-procedure-framework.md
Normal file
151
docs/rfcs/2023-01-03-procedure-framework.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
Feature Name: "procedure-framework"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/286
|
||||
Date: 2023-01-03
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Procedure Framework
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
A framework for executing operations in a fault-tolerant manner.
|
||||
|
||||
# Motivation
|
||||
Some operations in GreptimeDB require multiple steps to implement. For example, creating a table needs:
|
||||
1. Check whether the table exists
|
||||
2. Create the table in the table engine
|
||||
1. Create a region for the table in the storage engine
|
||||
2. Persist the metadata of the table to the table manifest
|
||||
3. Add the table to the catalog manager
|
||||
|
||||
If the node dies or restarts in the middle of creating a table, it could leave the system in an inconsistent state. The procedure framework, inspired by [Apache HBase's ProcedureV2 framework](https://github.com/apache/hbase/blob/bfc9fc9605de638785435e404430a9408b99a8d0/src/main/asciidoc/_chapters/pv2.adoc) and [Apache Accumulo’s FATE framework](https://accumulo.apache.org/docs/2.x/administration/fate), aims to provide a unified way to implement multi-step operations that is tolerant to failure.
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
The procedure framework consists of the following primary components:
|
||||
- A `Procedure` represents an operation or a set of operations to be performed step-by-step
|
||||
- `ProcedureManager`, the runtime to run `Procedures`. It executes the submitted procedures, stores procedures' states to the `ProcedureStore` and restores procedures from `ProcedureStore` while the database restarts.
|
||||
- `ProcedureStore` is a storage layer for persisting the procedure state
|
||||
|
||||
|
||||
## Procedures
|
||||
The `ProcedureManager` keeps calling `Procedure::execute()` until the Procedure is done, so the operation of the Procedure should be [idempotent](https://developer.mozilla.org/en-US/docs/Glossary/Idempotent): it needs to be able to undo or replay a partial execution of itself.
|
||||
|
||||
```rust
|
||||
trait Procedure {
|
||||
fn execute(&mut self, ctx: &Context) -> Result<Status>;
|
||||
|
||||
fn dump(&self) -> Result<String>;
|
||||
|
||||
fn rollback(&self) -> Result<()>;
|
||||
|
||||
// other methods...
|
||||
}
|
||||
```
|
||||
|
||||
The `Status` is an enum that has the following variants:
|
||||
```rust
|
||||
enum Status {
|
||||
Executing {
|
||||
persist: bool,
|
||||
},
|
||||
Suspended {
|
||||
subprocedures: Vec<ProcedureWithId>,
|
||||
persist: bool,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
```
|
||||
|
||||
A call to `execute()` can result in the following possibilities:
|
||||
- `Ok(Status::Done)`: we are done
|
||||
- `Ok(Status::Executing { .. })`: there are remaining steps to do
|
||||
- `Ok(Status::Suspend { sub_procedure, .. })`: execution is suspended and can be resumed later after the sub-procedure is done.
|
||||
- `Err(e)`: error occurs during execution and the procedure is unable to proceed anymore.
|
||||
|
||||
Users need to assign a unique `ProcedureId` to the procedure and the procedure can get this id via the `Context`. The `ProcedureId` is typically a UUID.
|
||||
|
||||
```rust
|
||||
struct Context {
|
||||
id: ProcedureId,
|
||||
// other fields ...
|
||||
}
|
||||
```
|
||||
|
||||
The `ProcedureManager` calls `Procedure::dump()` to serialize the internal state of the procedure and writes to the `ProcedureStore`. The `Status` has a field `persist` to tell the `ProcedureManager` whether it needs persistence.
|
||||
|
||||
## Sub-procedures
|
||||
A procedure may need to create some sub-procedures to process its subtasks. For example, creating a distributed table with multiple regions (partitions) needs to set up the regions in each node, thus the parent procedure should instantiate a sub-procedure for each region. The `ProcedureManager` makes sure that the parent procedure does not proceed till all sub-procedures are successfully finished.
|
||||
|
||||
The procedure can submit sub-procedures to the `ProcedureManager` by returning `Status::Suspended`. It needs to assign a procedure id to each procedure manually so it can track the status of the sub-procedures.
|
||||
```rust
|
||||
struct ProcedureWithId {
|
||||
id: ProcedureId,
|
||||
procedure: BoxedProcedure,
|
||||
}
|
||||
```
|
||||
|
||||
## ProcedureStore
|
||||
We might need to provide two different ProcedureStore implementations:
|
||||
- In standalone mode, it stores data on the local disk.
|
||||
- In distributed mode, it stores data on the meta server or the object store service.
|
||||
|
||||
These implementations should share the same storage structure. They store each procedure's state in a unique path based on the procedure id:
|
||||
|
||||
```
|
||||
Sample paths:
|
||||
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.step
|
||||
/procedures/{PROCEDURE_ID}/000003.commit
|
||||
```
|
||||
|
||||
`ProcedureStore` behaves like a WAL. Before performing each step, the `ProcedureManager` can write the procedure's current state to the ProcedureStore, which stores the state in the `.step` file. The `000001` in the path is a monotonic increasing sequence of the step. After the procedure is done, the `ProcedureManager` puts a `.commit` file to indicate the procedure is finished (committed).
|
||||
|
||||
The `ProcedureManager` can remove the procedure's files once the procedure is done, but it needs to leave the `.commit` as the last file to remove in case of failure during removal.
|
||||
|
||||
## ProcedureManager
|
||||
`ProcedureManager` executes procedures submitted to it.
|
||||
|
||||
```rust
|
||||
trait ProcedureManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
It supports the following operations:
|
||||
- Register a `ProcedureLoader` by the type name of the `Procedure`.
|
||||
- Submit a `Procedure` to the manager and execute it.
|
||||
|
||||
When `ProcedureManager` starts, it loads procedures from the `ProcedureStore` and restores the procedures by the `ProcedureLoader`. The manager stores the type name from `Procedure::type_name()` with the data from `Procedure::dump()` in the `.step` file and uses the type name to find a `ProcedureLoader` to recover the procedure from its data.
|
||||
|
||||
```rust
|
||||
type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Send>;
|
||||
```
|
||||
|
||||
## Rollback
|
||||
The rollback step is supposed to clean up the resources created during the execute() step. When a procedure has failed, the `ProcedureManager` puts a `rollback` file and calls the `Procedure::rollback()` method.
|
||||
|
||||
|
||||
```text
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.rollback
|
||||
```
|
||||
|
||||
Rollback is complicated to implement so some procedures might not support rollback or only provide a best-efforts approach.
|
||||
|
||||
## Locking
|
||||
The `ProcedureManager` can provide a locking mechanism that gives a procedure read/write access to a database object such as a table so other procedures are unable to modify the same table while the current one is executing.
|
||||
|
||||
# Drawbacks
|
||||
The `Procedure` framework introduces additional complexity and overhead to our database.
|
||||
- To execute a `Procedure`, we need to write to the `ProcedureStore` multiple times, which may slow down the server
|
||||
- We need to rewrite the logic of creating/dropping/altering a table using the procedure framework
|
||||
|
||||
# Alternatives
|
||||
Another approach is to tolerate failure during execution and allow users to retry the operation until it succeeds. But we still need to:
|
||||
- Make each step idempotent
|
||||
- Record the status in some place to check whether we are done
|
||||
92
docs/rfcs/2023-02-01-table-compaction.md
Normal file
92
docs/rfcs/2023-02-01-table-compaction.md
Normal file
@@ -0,0 +1,92 @@
|
||||
---
|
||||
Feature Name: "table-compaction"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/930
|
||||
Date: 2023-02-01
|
||||
Author: "Lei, HUANG <mrsatangel@gmail.com>"
|
||||
---
|
||||
|
||||
# Table Compaction
|
||||
|
||||
---
|
||||
|
||||
## Background
|
||||
|
||||
GreptimeDB uses an LSM-tree based storage engine that flushes memtables to SSTs for persistence.
|
||||
But currently it only supports level 0. SST files in level 0 does not guarantee to contain only rows with disjoint time ranges.
|
||||
That is to say, different SST files in level 0 may contain overlapped timestamps.
|
||||
The consequence is, in order to retrieve rows in some time range, all files need to be scanned, which brings a lot of IO overhead.
|
||||
|
||||
Also, just like other LSMT engines, delete/update to existing primary keys are converted to new rows with delete/update mark and appended to SSTs on flushing.
|
||||
We need to merge the operations to same primary keys so that we don't have to go through all SST files to find the final state of these primary keys.
|
||||
|
||||
## Goal
|
||||
|
||||
Implement a compaction framework to:
|
||||
- maintain SSTs in timestamp order to accelerate queries with timestamp condition;
|
||||
- merge rows with same primary key;
|
||||
- purge expired SSTs;
|
||||
- accommodate other tasks like data rollup/indexing.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Table compaction involves following components:
|
||||
- Compaction scheduler: run compaction tasks, limit the consumed resources;
|
||||
- Compaction strategy: find the SSTs to compact and determine the output files of compaction.
|
||||
- Compaction task: read the rows from input SSTs and write to the output files.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Compaction scheduler
|
||||
|
||||
`CompactionScheduler` is an executor that continuously polls and executes compaction request from a task queue.
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait CompactionScheduler {
|
||||
/// Schedules a compaction task.
|
||||
async fn schedule(&self, task: CompactionRequest) -> Result<()>;
|
||||
|
||||
/// Stops compaction scheduler.
|
||||
async fn stop(&self) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Compaction triggering
|
||||
|
||||
Currently, we can check whether to compact tables when memtable is flushed to SST.
|
||||
|
||||
https://github.com/GreptimeTeam/greptimedb/blob/4015dd80752e1e6aaa3d7cacc3203cb67ed9be6d/src/storage/src/flush.rs#L245
|
||||
|
||||
|
||||
### Compaction strategy
|
||||
|
||||
`CompactionStrategy` defines how to pick SSTs in all levels for compaction.
|
||||
|
||||
```rust
|
||||
pub trait CompactionStrategy {
|
||||
fn pick(
|
||||
&self,
|
||||
ctx: CompactionContext,
|
||||
levels: &LevelMetas,
|
||||
) -> Result<CompactionTask>;
|
||||
}
|
||||
```
|
||||
|
||||
The most suitable compaction strategy for time-series scenario would be
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
|
||||
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
|
||||
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
|
||||
SSTs from different time windows are neven compacted together.
|
||||
That strategy guarantees SSTs in each level are mainly sorted in timestamp order which boosts queries with
|
||||
explicit timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Currently, GreptimeDB's storage engine [only support two levels](https://github.com/GreptimeTeam/greptimedb/blob/43aefc5d74dfa73b7819cae77b7eb546d8534a41/src/storage/src/sst.rs#L32).
|
||||
For level 0, we can start with a simple time-window based leveled compaction, which reads from all SSTs in level 0,
|
||||
align them to time windows with a fixed duration, merge them with SSTs in level 1 within the same time window
|
||||
to ensure there is only one sorted run in level 1.
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2022-12-20
|
||||
2
rust-toolchain.toml
Normal file
2
rust-toolchain.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2022-12-20"
|
||||
@@ -10,9 +10,10 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
prost = "0.11"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3e6349be127b65a8b42a38cda9d527ec423ca77d" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic = "0.8"
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.8"
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
let default_out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
|
||||
tonic_build::configure()
|
||||
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
|
||||
.compile(
|
||||
&[
|
||||
"greptime/v1/greptime.proto",
|
||||
"greptime/v1/meta/common.proto",
|
||||
"greptime/v1/meta/heartbeat.proto",
|
||||
"greptime/v1/meta/route.proto",
|
||||
"greptime/v1/meta/store.proto",
|
||||
"prometheus/remote/remote.proto",
|
||||
],
|
||||
&["."],
|
||||
)
|
||||
.expect("compile proto");
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message Column {
|
||||
string column_name = 1;
|
||||
|
||||
enum SemanticType {
|
||||
TAG = 0;
|
||||
FIELD = 1;
|
||||
TIMESTAMP = 2;
|
||||
}
|
||||
SemanticType semantic_type = 2;
|
||||
|
||||
message Values {
|
||||
repeated int32 i8_values = 1;
|
||||
repeated int32 i16_values = 2;
|
||||
repeated int32 i32_values = 3;
|
||||
repeated int64 i64_values = 4;
|
||||
|
||||
repeated uint32 u8_values = 5;
|
||||
repeated uint32 u16_values = 6;
|
||||
repeated uint32 u32_values = 7;
|
||||
repeated uint64 u64_values = 8;
|
||||
|
||||
repeated float f32_values = 9;
|
||||
repeated double f64_values = 10;
|
||||
|
||||
repeated bool bool_values = 11;
|
||||
repeated bytes binary_values = 12;
|
||||
repeated string string_values = 13;
|
||||
|
||||
repeated int32 date_values = 14;
|
||||
repeated int64 datetime_values = 15;
|
||||
repeated int64 ts_second_values = 16;
|
||||
repeated int64 ts_millisecond_values = 17;
|
||||
repeated int64 ts_microsecond_values = 18;
|
||||
repeated int64 ts_nanosecond_values = 19;
|
||||
}
|
||||
// The array of non-null values in this column.
|
||||
//
|
||||
// For example: suppose there is a column "foo" that contains some int32 values (1, 2, 3, 4, 5, null, 7, 8, 9, null);
|
||||
// column:
|
||||
// column_name: foo
|
||||
// semantic_type: Tag
|
||||
// values: 1, 2, 3, 4, 5, 7, 8, 9
|
||||
// null_masks: 00100000 00000010
|
||||
Values values = 3;
|
||||
|
||||
// Mask maps the positions of null values.
|
||||
// If a bit in null_mask is 1, it indicates that the column value at that position is null.
|
||||
bytes null_mask = 4;
|
||||
|
||||
// Helpful in creating vector from column.
|
||||
ColumnDataType datatype = 5;
|
||||
}
|
||||
|
||||
message ColumnDef {
|
||||
string name = 1;
|
||||
ColumnDataType datatype = 2;
|
||||
bool is_nullable = 3;
|
||||
bytes default_constraint = 4;
|
||||
}
|
||||
|
||||
enum ColumnDataType {
|
||||
BOOLEAN = 0;
|
||||
INT8 = 1;
|
||||
INT16 = 2;
|
||||
INT32 = 3;
|
||||
INT64 = 4;
|
||||
UINT8 = 5;
|
||||
UINT16 = 6;
|
||||
UINT32 = 7;
|
||||
UINT64 = 8;
|
||||
FLOAT32 = 9;
|
||||
FLOAT64 = 10;
|
||||
BINARY = 11;
|
||||
STRING = 12;
|
||||
DATE = 13;
|
||||
DATETIME = 14;
|
||||
TIMESTAMP_SECOND = 15;
|
||||
TIMESTAMP_MILLISECOND = 16;
|
||||
TIMESTAMP_MICROSECOND = 17;
|
||||
TIMESTAMP_NANOSECOND = 18;
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message RequestHeader {
|
||||
string tenant = 1;
|
||||
}
|
||||
|
||||
message ResultHeader {
|
||||
uint32 version = 1;
|
||||
uint32 code = 2;
|
||||
string err_msg = 3;
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/ddl.proto";
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
message DatabaseRequest {
|
||||
string name = 1;
|
||||
repeated ObjectExpr exprs = 2;
|
||||
}
|
||||
|
||||
message DatabaseResponse {
|
||||
repeated ObjectResult results = 1;
|
||||
}
|
||||
|
||||
message ObjectExpr {
|
||||
oneof request {
|
||||
InsertRequest insert = 1;
|
||||
QueryRequest query = 2;
|
||||
DdlRequest ddl = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message QueryRequest {
|
||||
oneof query {
|
||||
string sql = 1;
|
||||
bytes logical_plan = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InsertRequest {
|
||||
string schema_name = 1;
|
||||
string table_name = 2;
|
||||
|
||||
// Data is represented here.
|
||||
repeated Column columns = 3;
|
||||
|
||||
// The row_count of all columns, which include null and non-null values.
|
||||
//
|
||||
// Note: the row_count of all columns in a InsertRequest must be same.
|
||||
uint32 row_count = 4;
|
||||
|
||||
// The region number of current insert request.
|
||||
uint32 region_number = 5;
|
||||
}
|
||||
|
||||
message ObjectResult {
|
||||
ResultHeader header = 1;
|
||||
repeated bytes flight_data = 2;
|
||||
}
|
||||
|
||||
message FlightDataExt {
|
||||
uint32 affected_rows = 1;
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
// "Data Definition Language" requests, that create, modify or delete the database structures but not the data.
|
||||
// `DdlRequest` could carry more information than plain SQL, for example, the "table_id" in `CreateTableExpr`.
|
||||
// So create a new DDL expr if you need it.
|
||||
message DdlRequest {
|
||||
oneof expr {
|
||||
CreateDatabaseExpr create_database = 1;
|
||||
CreateTableExpr create_table = 2;
|
||||
AlterExpr alter = 3;
|
||||
DropTableExpr drop_table = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message CreateTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
string desc = 4;
|
||||
repeated ColumnDef column_defs = 5;
|
||||
string time_index = 6;
|
||||
repeated string primary_keys = 7;
|
||||
bool create_if_not_exists = 8;
|
||||
map<string, string> table_options = 9;
|
||||
TableId table_id = 10;
|
||||
repeated uint32 region_ids = 11;
|
||||
}
|
||||
|
||||
message AlterExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
oneof kind {
|
||||
AddColumns add_columns = 4;
|
||||
DropColumns drop_columns = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message DropTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message CreateDatabaseExpr {
|
||||
//TODO(hl): maybe rename to schema_name?
|
||||
string database_name = 1;
|
||||
}
|
||||
|
||||
message AddColumns {
|
||||
repeated AddColumn add_columns = 1;
|
||||
}
|
||||
|
||||
message DropColumns {
|
||||
repeated DropColumn drop_columns = 1;
|
||||
}
|
||||
|
||||
message AddColumn {
|
||||
ColumnDef column_def = 1;
|
||||
bool is_key = 2;
|
||||
}
|
||||
|
||||
message DropColumn {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message TableId {
|
||||
uint32 id = 1;
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/common.proto";
|
||||
import "greptime/v1/database.proto";
|
||||
|
||||
service Greptime {
|
||||
rpc Batch(BatchRequest) returns (BatchResponse) {}
|
||||
}
|
||||
|
||||
message BatchRequest {
|
||||
RequestHeader header = 1;
|
||||
repeated DatabaseRequest databases = 2;
|
||||
}
|
||||
|
||||
message BatchResponse {
|
||||
repeated DatabaseResponse databases = 1;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
message RequestHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which be sent to.
|
||||
uint64 cluster_id = 2;
|
||||
// member_id is the ID of the sender server.
|
||||
uint64 member_id = 3;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
uint64 protocol_version = 1;
|
||||
// cluster_id is the ID of the cluster which sent the response.
|
||||
uint64 cluster_id = 2;
|
||||
Error error = 3;
|
||||
}
|
||||
|
||||
message Error {
|
||||
int32 code = 1;
|
||||
string err_msg = 2;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
uint64 id = 1;
|
||||
string addr = 2;
|
||||
}
|
||||
|
||||
message TableName {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message TimeInterval {
|
||||
// The unix timestamp in millis of the start of this period.
|
||||
uint64 start_timestamp_millis = 1;
|
||||
// The unix timestamp in millis of the end of this period.
|
||||
uint64 end_timestamp_millis = 2;
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
// key is the key in bytes. An empty key is not allowed.
|
||||
bytes key = 1;
|
||||
// value is the value held by the key, in bytes.
|
||||
bytes value = 2;
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Heartbeat {
|
||||
// Heartbeat, there may be many contents of the heartbeat, such as:
|
||||
// 1. Metadata to be registered to meta server and discoverable by other nodes.
|
||||
// 2. Some performance metrics, such as Load, CPU usage, etc.
|
||||
// 3. The number of computing tasks being executed.
|
||||
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
|
||||
|
||||
// Ask leader's endpoint.
|
||||
rpc AskLeader(AskLeaderRequest) returns (AskLeaderResponse) {}
|
||||
}
|
||||
|
||||
message HeartbeatRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// Self peer
|
||||
Peer peer = 2;
|
||||
// Leader node
|
||||
bool is_leader = 3;
|
||||
// Actually reported time interval
|
||||
TimeInterval report_interval = 4;
|
||||
// Node stat
|
||||
NodeStat node_stat = 5;
|
||||
// Region stats in this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
// Follower nodes and stats, empty on follower nodes
|
||||
repeated ReplicaStat replica_stats = 7;
|
||||
}
|
||||
|
||||
message NodeStat {
|
||||
// The read capacity units during this period
|
||||
uint64 rcus = 1;
|
||||
// The write capacity units during this period
|
||||
uint64 wcus = 2;
|
||||
// Table number in this node
|
||||
uint64 table_num = 3;
|
||||
// Region number in this node
|
||||
uint64 region_num = 4;
|
||||
|
||||
double cpu_usage = 5;
|
||||
double load = 6;
|
||||
// Read disk I/O in the node
|
||||
double read_io_rate = 7;
|
||||
// Write disk I/O in the node
|
||||
double write_io_rate = 8;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
// The read capacity units during this period
|
||||
uint64 rcus = 3;
|
||||
// The write capacity units during this period
|
||||
uint64 wcus = 4;
|
||||
// Approximate region size
|
||||
uint64 approximate_size = 5;
|
||||
// Approximate number of rows
|
||||
uint64 approximate_rows = 6;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
message ReplicaStat {
|
||||
Peer peer = 1;
|
||||
bool in_sync = 2;
|
||||
bool is_learner = 3;
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated bytes payload = 2;
|
||||
}
|
||||
|
||||
message AskLeaderRequest {
|
||||
RequestHeader header = 1;
|
||||
}
|
||||
|
||||
message AskLeaderResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
Peer leader = 2;
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Router {
|
||||
rpc Create(CreateRequest) returns (RouteResponse) {}
|
||||
|
||||
// Fetch routing information for tables. The smallest unit is the complete
|
||||
// routing information(all regions) of a table.
|
||||
//
|
||||
// ```text
|
||||
// table_1
|
||||
// table_name
|
||||
// table_schema
|
||||
// regions
|
||||
// region_1
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2
|
||||
// region_2
|
||||
// leader_peer
|
||||
// follower_peer_1, follower_peer_2, follower_peer_3
|
||||
// region_xxx
|
||||
// table_2
|
||||
// ...
|
||||
// ```
|
||||
//
|
||||
rpc Route(RouteRequest) returns (RouteResponse) {}
|
||||
|
||||
rpc Delete(DeleteRequest) returns (RouteResponse) {}
|
||||
}
|
||||
|
||||
message CreateRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
repeated Partition partitions = 3;
|
||||
}
|
||||
|
||||
message RouteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated TableName table_names = 2;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
TableName table_name = 2;
|
||||
}
|
||||
|
||||
message RouteResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
repeated Peer peers = 2;
|
||||
repeated TableRoute table_routes = 3;
|
||||
}
|
||||
|
||||
message TableRoute {
|
||||
Table table = 1;
|
||||
repeated RegionRoute region_routes = 2;
|
||||
}
|
||||
|
||||
message RegionRoute {
|
||||
Region region = 1;
|
||||
// single leader node for write task
|
||||
uint64 leader_peer_index = 2;
|
||||
// multiple follower nodes for read task
|
||||
repeated uint64 follower_peer_indexes = 3;
|
||||
}
|
||||
|
||||
message Table {
|
||||
uint64 id = 1;
|
||||
TableName table_name = 2;
|
||||
bytes table_schema = 3;
|
||||
}
|
||||
|
||||
message Region {
|
||||
// TODO(LFC): Maybe use message RegionNumber?
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
|
||||
// PARTITION `region_name` VALUES LESS THAN (value_list)
|
||||
message Partition {
|
||||
repeated bytes column_list = 1;
|
||||
repeated bytes value_list = 2;
|
||||
}
|
||||
|
||||
// This message is only for saving into store.
|
||||
message TableRouteValue {
|
||||
repeated Peer peers = 1;
|
||||
TableRoute table_route = 2;
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.meta;
|
||||
|
||||
import "greptime/v1/meta/common.proto";
|
||||
|
||||
service Store {
|
||||
// Range gets the keys in the range from the key-value store.
|
||||
rpc Range(RangeRequest) returns (RangeResponse);
|
||||
|
||||
// Put puts the given key into the key-value store.
|
||||
rpc Put(PutRequest) returns (PutResponse);
|
||||
|
||||
// BatchPut atomically puts the given keys into the key-value store.
|
||||
rpc BatchPut(BatchPutRequest) returns (BatchPutResponse);
|
||||
|
||||
// CompareAndPut atomically puts the value to the given updated
|
||||
// value if the current value == the expected value.
|
||||
rpc CompareAndPut(CompareAndPutRequest) returns (CompareAndPutResponse);
|
||||
|
||||
// DeleteRange deletes the given range from the key-value store.
|
||||
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
|
||||
|
||||
// MoveValue atomically renames the key to the given updated key.
|
||||
rpc MoveValue(MoveValueRequest) returns (MoveValueResponse);
|
||||
}
|
||||
|
||||
message RangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key for the range, If range_end is not given, the
|
||||
// request only looks up key.
|
||||
bytes key = 2;
|
||||
// range_end is the upper bound on the requested range [key, range_end).
|
||||
// If range_end is '\0', the range is all keys >= key.
|
||||
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||
// then the range request gets all keys prefixed with key.
|
||||
// If both key and range_end are '\0', then the range request returns all
|
||||
// keys.
|
||||
bytes range_end = 3;
|
||||
// limit is a limit on the number of keys returned for the request. When
|
||||
// limit is set to 0, it is treated as no limit.
|
||||
int64 limit = 4;
|
||||
// keys_only when set returns only the keys and not the values.
|
||||
bool keys_only = 5;
|
||||
}
|
||||
|
||||
message RangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// kvs is the list of key-value pairs matched by the range request.
|
||||
repeated KeyValue kvs = 2;
|
||||
// more indicates if there are more keys to return in the requested range.
|
||||
bool more = 3;
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 3;
|
||||
// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||
// The previous key-value pair will be returned in the put response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pair will be
|
||||
// returned.
|
||||
KeyValue prev_kv = 2;
|
||||
}
|
||||
|
||||
message BatchPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
repeated KeyValue kvs = 2;
|
||||
// If prev_kv is set, gets the previous key-value pairs before changing it.
|
||||
// The previous key-value pairs will be returned in the batch put response.
|
||||
bool prev_kv = 3;
|
||||
}
|
||||
|
||||
message BatchPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 2;
|
||||
}
|
||||
|
||||
message CompareAndPutRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the key, in bytes, to put into the key-value store.
|
||||
bytes key = 2;
|
||||
// expect is the previous value, in bytes
|
||||
bytes expect = 3;
|
||||
// value is the value, in bytes, to associate with the key in the
|
||||
// key-value store.
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
message CompareAndPutResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
bool success = 2;
|
||||
KeyValue prev_kv = 3;
|
||||
}
|
||||
|
||||
message DeleteRangeRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// key is the first key to delete in the range.
|
||||
bytes key = 2;
|
||||
// range_end is the key following the last key to delete for the range
|
||||
// [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key
|
||||
// argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the keys with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the
|
||||
// key argument.
|
||||
bytes range_end = 3;
|
||||
// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delete response.
|
||||
bool prev_kv = 4;
|
||||
}
|
||||
|
||||
message DeleteRangeResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// deleted is the number of keys deleted by the delete range request.
|
||||
int64 deleted = 2;
|
||||
// If prev_kv is set in the request, the previous key-value pairs will be
|
||||
// returned.
|
||||
repeated KeyValue prev_kvs = 3;
|
||||
}
|
||||
|
||||
message MoveValueRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, move the value of from_key to to_key (i.e. rename),
|
||||
// and return the value.
|
||||
bytes from_key = 2;
|
||||
bytes to_key = 3;
|
||||
}
|
||||
|
||||
message MoveValueResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
// If from_key exists, return the value of from_key.
|
||||
KeyValue kv = 2;
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2016 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "prometheus/remote/types.proto";
|
||||
|
||||
message WriteRequest {
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
// Cortex uses this field to determine the source of the write request.
|
||||
// We reserve it to avoid any compatibility issues.
|
||||
reserved 2;
|
||||
repeated prometheus.MetricMetadata metadata = 3;
|
||||
}
|
||||
|
||||
// ReadRequest represents a remote read request.
|
||||
message ReadRequest {
|
||||
repeated Query queries = 1;
|
||||
|
||||
enum ResponseType {
|
||||
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||
// It's recommended to use streamed response types instead.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
// Content-Encoding: ""
|
||||
STREAMED_XOR_CHUNKS = 1;
|
||||
}
|
||||
|
||||
// accepted_response_types allows negotiating the content type of the response.
|
||||
//
|
||||
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||
// implemented by server, error is returned.
|
||||
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||
repeated ResponseType accepted_response_types = 2;
|
||||
}
|
||||
|
||||
// ReadResponse is a response when response_type equals SAMPLES.
|
||||
message ReadResponse {
|
||||
// In same order as the request's queries.
|
||||
repeated QueryResult results = 1;
|
||||
}
|
||||
|
||||
message Query {
|
||||
int64 start_timestamp_ms = 1;
|
||||
int64 end_timestamp_ms = 2;
|
||||
repeated prometheus.LabelMatcher matchers = 3;
|
||||
prometheus.ReadHints hints = 4;
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
// Samples within a time series must be ordered by time.
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
}
|
||||
|
||||
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||
message ChunkedReadResponse {
|
||||
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||
|
||||
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||
int64 query_index = 2;
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
message MetricMetadata {
|
||||
enum MetricType {
|
||||
UNKNOWN = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
HISTOGRAM = 3;
|
||||
GAUGEHISTOGRAM = 4;
|
||||
SUMMARY = 5;
|
||||
INFO = 6;
|
||||
STATESET = 7;
|
||||
}
|
||||
|
||||
// Represents the metric type, these match the set from Prometheus.
|
||||
// Refer to model/textparse/interface.go for details.
|
||||
MetricType type = 1;
|
||||
string metric_family_name = 2;
|
||||
string help = 4;
|
||||
string unit = 5;
|
||||
}
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
// Optional, can be empty.
|
||||
repeated Label labels = 1;
|
||||
double value = 2;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1;
|
||||
repeated Sample samples = 2;
|
||||
repeated Exemplar exemplars = 3;
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1;
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
||||
|
||||
message ReadHints {
|
||||
int64 step_ms = 1; // Query step size in milliseconds.
|
||||
string func = 2; // String representation of surrounding function or aggregation.
|
||||
int64 start_ms = 3; // Start time in milliseconds.
|
||||
int64 end_ms = 4; // End time in milliseconds.
|
||||
repeated string grouping = 5; // List of label names used in aggregation.
|
||||
bool by = 6; // Indicate whether it is without or by.
|
||||
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||
}
|
||||
|
||||
// Chunk represents a TSDB chunk.
|
||||
// Time range [min, max] is inclusive.
|
||||
message Chunk {
|
||||
int64 min_time_ms = 1;
|
||||
int64 max_time_ms = 2;
|
||||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
// ChunkedSeries represents single, encoded time series.
|
||||
message ChunkedSeries {
|
||||
// Labels should be sorted.
|
||||
repeated Label labels = 1;
|
||||
// Chunks will be in start time order and may overlap.
|
||||
repeated Chunk chunks = 2;
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -105,125 +105,121 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
impl Values {
|
||||
pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values {
|
||||
match datatype {
|
||||
ColumnDataType::Boolean => Values {
|
||||
bool_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int8 => Values {
|
||||
i8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int16 => Values {
|
||||
i16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int32 => Values {
|
||||
i32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64 => Values {
|
||||
i64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint8 => Values {
|
||||
u8_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint16 => Values {
|
||||
u16_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint32 => Values {
|
||||
u32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Uint64 => Values {
|
||||
u64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float32 => Values {
|
||||
f32_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64 => Values {
|
||||
f64_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Binary => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Date => Values {
|
||||
date_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Datetime => Values {
|
||||
datetime_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampSecond => Values {
|
||||
ts_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMillisecond => Values {
|
||||
ts_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond => Values {
|
||||
ts_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampNanosecond => Values {
|
||||
ts_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl Column {
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(&mut self, origin_count: usize, vector: VectorRef) {
|
||||
let values = self.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&self.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
// The type of vals must be same.
|
||||
pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
let values = column.values.get_or_insert_with(Values::default);
|
||||
let mut null_mask = BitVec::from_slice(&column.null_mask);
|
||||
let len = vector.len();
|
||||
null_mask.reserve_exact(origin_count + len);
|
||||
null_mask.extend(BitVec::repeat(false, len));
|
||||
|
||||
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
self.null_mask = null_mask.into_vec();
|
||||
}
|
||||
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
|
||||
Value::Null => null_mask.set(idx + origin_count, true),
|
||||
Value::Boolean(val) => values.bool_values.push(val),
|
||||
Value::UInt8(val) => values.u8_values.push(val.into()),
|
||||
Value::UInt16(val) => values.u16_values.push(val.into()),
|
||||
Value::UInt32(val) => values.u32_values.push(val),
|
||||
Value::UInt64(val) => values.u64_values.push(val),
|
||||
Value::Int8(val) => values.i8_values.push(val.into()),
|
||||
Value::Int16(val) => values.i16_values.push(val.into()),
|
||||
Value::Int32(val) => values.i32_values.push(val),
|
||||
Value::Int64(val) => values.i64_values.push(val),
|
||||
Value::Float32(val) => values.f32_values.push(*val),
|
||||
Value::Float64(val) => values.f64_values.push(*val),
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.ts_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -239,59 +235,59 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_values_with_capacity() {
|
||||
let values = Values::with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int8, 2);
|
||||
let values = values.i8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int32, 2);
|
||||
let values = values.i32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Int64, 2);
|
||||
let values = values.i64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint8, 2);
|
||||
let values = values.u8_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint32, 2);
|
||||
let values = values.u32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Uint64, 2);
|
||||
let values = values.u64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float32, 2);
|
||||
let values = values.f32_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Float64, 2);
|
||||
let values = values.f64_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Binary, 2);
|
||||
let values = values.binary_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Boolean, 2);
|
||||
let values = values.bool_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::String, 2);
|
||||
let values = values_with_capacity(ColumnDataType::String, 2);
|
||||
let values = values.string_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Date, 2);
|
||||
let values = values.date_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values_with_capacity(ColumnDataType::Datetime, 2);
|
||||
let values = values.datetime_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = Values::with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values_with_capacity(ColumnDataType::TimestampMillisecond, 2);
|
||||
let values = values.ts_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
@@ -462,28 +458,28 @@ mod tests {
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().ts_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().ts_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().ts_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
|
||||
column.push_vals(3, vector);
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().ts_second_values
|
||||
@@ -507,7 +503,7 @@ mod tests {
|
||||
let row_count = 4;
|
||||
|
||||
let vector = Arc::new(BooleanVector::from(vec![Some(true), None, Some(false)]));
|
||||
column.push_vals(row_count, vector);
|
||||
push_vals(&mut column, row_count, vector);
|
||||
// Some(false), None, Some(true), Some(true), Some(true), None, Some(false)
|
||||
let bool_values = column.values.unwrap().bool_values;
|
||||
assert_eq!(vec![false, true, true, true, false], bool_values);
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,9 +14,13 @@
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod prometheus;
|
||||
pub mod result;
|
||||
pub mod serde;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
}
|
||||
|
||||
pub mod v1;
|
||||
|
||||
pub use prost::DecodeError;
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use arrow_flight::FlightData;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::{ObjectResult, ResultHeader};
|
||||
|
||||
pub const PROTOCOL_VERSION: u32 = 1;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ObjectResultBuilder {
|
||||
version: u32,
|
||||
code: u32,
|
||||
err_msg: Option<String>,
|
||||
flight_data: Option<Vec<FlightData>>,
|
||||
}
|
||||
|
||||
impl ObjectResultBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
version: PROTOCOL_VERSION,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn version(mut self, version: u32) -> Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn status_code(mut self, code: u32) -> Self {
|
||||
self.code = code;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn err_msg(mut self, err_msg: String) -> Self {
|
||||
self.err_msg = Some(err_msg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn flight_data(mut self, flight_data: Vec<FlightData>) -> Self {
|
||||
self.flight_data = Some(flight_data);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> ObjectResult {
|
||||
let header = Some(ResultHeader {
|
||||
version: self.version,
|
||||
code: self.code,
|
||||
err_msg: self.err_msg.unwrap_or_default(),
|
||||
});
|
||||
|
||||
let flight_data = if let Some(flight_data) = self.flight_data {
|
||||
flight_data
|
||||
.into_iter()
|
||||
.map(|x| x.encode_to_vec())
|
||||
.collect::<Vec<Vec<u8>>>()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
ObjectResult {
|
||||
header,
|
||||
flight_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_object_result_builder() {
|
||||
let obj_result = ObjectResultBuilder::new()
|
||||
.version(101)
|
||||
.status_code(500)
|
||||
.err_msg("Failed to read this file!".to_string())
|
||||
.build();
|
||||
let header = obj_result.header.unwrap();
|
||||
assert_eq!(101, header.version);
|
||||
assert_eq!(500, header.code);
|
||||
assert_eq!("Failed to read this file!", header.err_msg);
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use prost::DecodeError;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::meta::TableRouteValue;
|
||||
|
||||
macro_rules! impl_convert_with_bytes {
|
||||
($data_type: ty) => {
|
||||
impl From<$data_type> for Vec<u8> {
|
||||
fn from(entity: $data_type) -> Self {
|
||||
entity.encode_to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for $data_type {
|
||||
type Error = DecodeError;
|
||||
|
||||
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
|
||||
<$data_type>::decode(value.as_ref())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_convert_with_bytes!(TableRouteValue);
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
tonic::include_proto!("greptime.v1");
|
||||
pub mod column_def;
|
||||
|
||||
pub const GREPTIME_FD_SET: &[u8] = tonic::include_file_descriptor_set!("greptime_fd");
|
||||
pub mod meta {
|
||||
pub use greptime_proto::v1::meta::*;
|
||||
}
|
||||
|
||||
mod column_def;
|
||||
pub mod meta;
|
||||
pub use greptime_proto::v1::*;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,21 +19,24 @@ use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
impl ColumnDef {
|
||||
pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
|
||||
|
||||
let constraint = if self.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(self.default_constraint.as_slice())
|
||||
.context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
|
||||
)
|
||||
};
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(column_def.default_constraint.as_slice()).context(
|
||||
error::ConvertColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
},
|
||||
)?,
|
||||
)
|
||||
};
|
||||
|
||||
ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu { column: &self.name })
|
||||
}
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
tonic::include_proto!("greptime.v1.meta");
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
pub const PROTOCOL_VERSION: u64 = 1;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PeerDict {
|
||||
peers: HashMap<Peer, usize>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl PeerDict {
|
||||
pub fn get_or_insert(&mut self, peer: Peer) -> usize {
|
||||
let index = self.peers.entry(peer).or_insert_with(|| {
|
||||
let v = self.index;
|
||||
self.index += 1;
|
||||
v
|
||||
});
|
||||
|
||||
*index
|
||||
}
|
||||
|
||||
pub fn into_peers(self) -> Vec<Peer> {
|
||||
let mut array = vec![Peer::default(); self.index];
|
||||
for (p, i) in self.peers {
|
||||
array[i] = p;
|
||||
}
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
impl Hash for Peer {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.id.hash(state);
|
||||
self.addr.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Peer {}
|
||||
|
||||
impl RequestHeader {
|
||||
#[inline]
|
||||
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
member_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseHeader {
|
||||
#[inline]
|
||||
pub fn success(cluster_id: u64) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn failed(cluster_id: u64, error: Error) -> Self {
|
||||
Self {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id,
|
||||
error: Some(error),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(error) = &self.error {
|
||||
if error.code == ErrorCode::NotLeader as i32 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ErrorCode {
|
||||
NoActiveDatanodes = 1,
|
||||
NotLeader = 2,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[inline]
|
||||
pub fn no_active_datanodes() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NoActiveDatanodes as i32,
|
||||
err_msg: "No active datanodes".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_not_leader() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::NotLeader as i32,
|
||||
err_msg: "Current server is not leader".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatResponse {
|
||||
#[inline]
|
||||
pub fn is_not_leader(&self) -> bool {
|
||||
if let Some(header) = &self.header {
|
||||
return header.is_not_leader();
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! gen_set_header {
|
||||
($req: ty) => {
|
||||
impl $req {
|
||||
#[inline]
|
||||
pub fn set_header(&mut self, (cluster_id, member_id): (u64, u64)) {
|
||||
self.header = Some(RequestHeader::new((cluster_id, member_id)));
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
gen_set_header!(HeartbeatRequest);
|
||||
gen_set_header!(RouteRequest);
|
||||
gen_set_header!(CreateRequest);
|
||||
gen_set_header!(RangeRequest);
|
||||
gen_set_header!(DeleteRequest);
|
||||
gen_set_header!(PutRequest);
|
||||
gen_set_header!(BatchPutRequest);
|
||||
gen_set_header!(CompareAndPutRequest);
|
||||
gen_set_header!(DeleteRangeRequest);
|
||||
gen_set_header!(MoveValueRequest);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::vec;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_peer_dict() {
|
||||
let mut dict = PeerDict::default();
|
||||
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
});
|
||||
dict.get_or_insert(Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
});
|
||||
|
||||
assert_eq!(2, dict.index);
|
||||
assert_eq!(
|
||||
vec![
|
||||
Peer {
|
||||
id: 1,
|
||||
addr: "111".to_string(),
|
||||
},
|
||||
Peer {
|
||||
id: 2,
|
||||
addr: "222".to_string(),
|
||||
}
|
||||
],
|
||||
dict.into_peers()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@ common-time = { path = "../common/time" }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
regex = "1.6"
|
||||
@@ -33,7 +33,7 @@ table = { path = "../table" }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
@@ -21,6 +22,8 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
@@ -86,27 +89,25 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema, schema info: {}", schema_info))]
|
||||
#[snafu(display("Cannot find schema {} in catalog {}", schema, catalog))]
|
||||
SchemaNotFound {
|
||||
schema_info: String,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
TableNotExist { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to register table"))]
|
||||
RegisterTable {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
@@ -141,6 +142,17 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to deregister table, request: {:?}, source: {}",
|
||||
request,
|
||||
source
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal catalog manager state: {}", msg))]
|
||||
IllegalManagerState { backtrace: Backtrace, msg: String },
|
||||
|
||||
@@ -163,6 +175,18 @@ pub enum Error {
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(backtrace)]
|
||||
@@ -174,15 +198,6 @@ pub enum Error {
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("IO error occurred while fetching catalog info, source: {}", source))]
|
||||
Io {
|
||||
backtrace: Backtrace,
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Local and remote catalog data are inconsistent, msg: {}", msg))]
|
||||
CatalogStateInconsistent { msg: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
MetaSrv {
|
||||
#[snafu(backtrace)]
|
||||
@@ -195,10 +210,10 @@ pub enum Error {
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog internal error: {}", source))]
|
||||
Internal {
|
||||
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
|
||||
CatalogEntrySerde {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -212,35 +227,38 @@ impl ErrorExt for Error {
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. }
|
||||
| Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::RegisterTable { .. } | Error::SystemCatalogTypeMismatch { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. } => source.status_code(),
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableSchema { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
|
||||
Error::Internal { source, .. } => source.status_code(),
|
||||
Error::InvalidTableSchema { source, .. }
|
||||
| Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
}
|
||||
@@ -263,7 +281,6 @@ impl From<Error> for DataFusionError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::mock::MockError;
|
||||
use snafu::GenerateImplicitData;
|
||||
|
||||
use super::*;
|
||||
@@ -284,22 +301,6 @@ mod tests {
|
||||
InvalidKeySnafu { key: None }.build().status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::OpenSystemCatalog {
|
||||
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::CreateSystemCatalog {
|
||||
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -91,6 +91,7 @@ pub fn build_table_regional_prefix(
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
@@ -131,7 +132,6 @@ impl TableGlobalKey {
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
// TODO(LFC): Maybe remove it?
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,7 +19,7 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -97,6 +97,9 @@ pub trait CatalogManager: CatalogList {
|
||||
/// schema registered.
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
|
||||
|
||||
/// Register a system table, should be called before starting the manager.
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
-> error::Result<()>;
|
||||
@@ -142,7 +145,16 @@ impl Debug for RegisterTableRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RenameTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table_name: String,
|
||||
pub new_table_name: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DeregisterTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
@@ -155,11 +167,6 @@ pub struct RegisterSchemaRequest {
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
@@ -186,8 +193,10 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
.create_table(&EngineContext::default(), req.create_table_request.clone())
|
||||
.await
|
||||
.with_context(|_| CreateTableSnafu {
|
||||
table_info: format!(
|
||||
"{catalog_name}.{schema_name}.{table_name}, id: {table_id}",
|
||||
table_info: common_catalog::format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
),
|
||||
})?;
|
||||
manager
|
||||
@@ -208,3 +217,38 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The number of regions in the datanode node.
|
||||
pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
let mut region_number: u64 = 0;
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table = schema
|
||||
.table(&table_name)?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_number)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,6 +20,7 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
@@ -34,9 +35,9 @@ use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
|
||||
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
|
||||
TableExistsSnafu, TableNotFoundSnafu, UnimplementedSnafu,
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
@@ -45,9 +46,9 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
|
||||
CatalogProvider, CatalogProviderRef, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -241,7 +242,8 @@ impl LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &t.catalog_name, &t.schema_name),
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
})?;
|
||||
|
||||
let context = EngineContext {};
|
||||
@@ -250,7 +252,6 @@ impl LocalCatalogManager {
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
let option = self
|
||||
@@ -338,7 +339,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{catalog_name}.{schema_name}"),
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
{
|
||||
@@ -377,11 +379,75 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
// rename table in system catalog
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.new_table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
|
||||
let DeregisterTableRequest {
|
||||
catalog,
|
||||
schema,
|
||||
table_name,
|
||||
} = &request;
|
||||
let table_id = self
|
||||
.catalogs
|
||||
.table(catalog, schema, table_name)?
|
||||
.with_context(|| error::TableNotExistSnafu {
|
||||
table: format!("{catalog}.{schema}.{table_name}"),
|
||||
})?
|
||||
.table_info()
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.catalogs.deregister_table(request).await
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -452,7 +518,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{catalog_name}.{schema_name}"),
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,16 +20,19 @@ use std::sync::{Arc, RwLock};
|
||||
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::OptionExt;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
@@ -81,13 +84,33 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
@@ -99,7 +122,8 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
@@ -226,6 +250,10 @@ impl CatalogProvider for MemoryCatalogProvider {
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
@@ -288,6 +316,20 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if tables.get(name).is_some() {
|
||||
let table = tables.remove(name).unwrap();
|
||||
tables.insert(new_name, table.clone());
|
||||
Ok(table)
|
||||
} else {
|
||||
TableNotFoundSnafu {
|
||||
table_info: name.to_string(),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
@@ -352,6 +394,85 @@ mod tests {
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
// register test table
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
);
|
||||
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider.register_table(new_table_name.to_string(), other_table);
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register table
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(schema.table_exist(table_name).unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(!schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(new_table_name).unwrap());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -43,7 +43,7 @@ use crate::remote::{Kv, KvBackendRef};
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Catalog manager based on metasrv.
|
||||
@@ -324,7 +324,6 @@ impl RemoteCatalogManager {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
region_numbers: region_numbers.clone(),
|
||||
};
|
||||
match self
|
||||
.engine
|
||||
@@ -418,7 +417,8 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
catalog_provider
|
||||
.schema(&schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{}.{}", &catalog_name, &schema_name),
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
if schema_provider.table_exist(&request.table_name)? {
|
||||
return TableExistsSnafu {
|
||||
@@ -430,11 +430,18 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
let schema = self
|
||||
.schema(catalog_name, schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let result = schema.deregister_table(&request.table_name)?;
|
||||
Ok(result.is_none())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -448,6 +455,13 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, _request: RenameTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let mut requests = self.system_table_requests.lock().await;
|
||||
requests.push(request);
|
||||
@@ -474,7 +488,8 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
schema_info: format!("{catalog_name}.{schema_name}"),
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
}
|
||||
@@ -737,6 +752,13 @@ impl SchemaProvider for RemoteSchemaProvider {
|
||||
prev
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table_name = name.to_string();
|
||||
let table_key = self.build_regional_table_key(&table_name).to_string();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -35,6 +35,10 @@ pub trait SchemaProvider: Sync + Send {
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -25,29 +25,27 @@ use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
|
||||
use table::requests::{CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
|
||||
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
|
||||
};
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
pub const ENTRY_TYPE_INDEX: usize = 0;
|
||||
pub const KEY_INDEX: usize = 1;
|
||||
pub const VALUE_INDEX: usize = 3;
|
||||
|
||||
pub struct SystemCatalogTable {
|
||||
table_info: TableInfoRef,
|
||||
pub table: TableRef,
|
||||
}
|
||||
pub struct SystemCatalogTable(TableRef);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for SystemCatalogTable {
|
||||
@@ -56,25 +54,29 @@ impl Table for SystemCatalogTable {
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.table_info.meta.schema.clone()
|
||||
self.0.schema()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
panic!("System catalog table does not support scan!")
|
||||
self.0.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
/// Insert values into table.
|
||||
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
|
||||
self.table.insert(request).await
|
||||
self.0.insert(request).await
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
self.table_info.clone()
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +87,6 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let schema = Arc::new(build_system_catalog_schema());
|
||||
let ctx = EngineContext::default();
|
||||
@@ -95,10 +96,7 @@ impl SystemCatalogTable {
|
||||
.await
|
||||
.context(OpenSystemCatalogSnafu)?
|
||||
{
|
||||
Ok(Self {
|
||||
table_info: table.table_info(),
|
||||
table,
|
||||
})
|
||||
Ok(Self(table))
|
||||
} else {
|
||||
// system catalog table is not yet created, try to create
|
||||
let request = CreateTableRequest {
|
||||
@@ -118,8 +116,7 @@ impl SystemCatalogTable {
|
||||
.create_table(&ctx, request)
|
||||
.await
|
||||
.context(CreateSystemCatalogSnafu)?;
|
||||
let table_info = table.table_info();
|
||||
Ok(Self { table, table_info })
|
||||
Ok(Self(table))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,7 +125,6 @@ impl SystemCatalogTable {
|
||||
let full_projection = None;
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.table
|
||||
.scan(full_projection, &[], None)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
@@ -186,16 +182,56 @@ fn build_system_catalog_schema() -> Schema {
|
||||
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
|
||||
}
|
||||
|
||||
pub fn build_table_insert_request(full_table_name: String, table_id: TableId) -> InsertRequest {
|
||||
/// Formats key string for table entry in system catalog
|
||||
#[inline]
|
||||
pub fn format_table_entry_key(catalog: &str, schema: &str, table_id: TableId) -> String {
|
||||
format!("{catalog}.{schema}.{table_id}")
|
||||
}
|
||||
|
||||
pub fn build_table_insert_request(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> InsertRequest {
|
||||
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
full_table_name.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_id })
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_name })
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
|
||||
);
|
||||
m
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
let full_schema_name = format!("{catalog_name}.{schema_name}");
|
||||
build_insert_request(
|
||||
@@ -208,22 +244,10 @@ pub fn build_schema_insert_request(catalog_name: String, schema_name: String) ->
|
||||
}
|
||||
|
||||
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
|
||||
let primary_key_columns = build_primary_key_columns(entry_type, key);
|
||||
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
columns_values.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
|
||||
);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
|
||||
columns_values.insert(
|
||||
"value".to_string(),
|
||||
@@ -246,6 +270,7 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
columns_values,
|
||||
region_number: 0, // system catalog table has only one region
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,8 +310,8 @@ pub fn decode_system_catalog(
|
||||
}
|
||||
|
||||
EntryType::Table => {
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_name>`
|
||||
// and the value is a JSON string with format: `{"table_id": <table_id>}`
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_id>`
|
||||
// and the value is a JSON string with format: `{"table_name": <table_name>}`
|
||||
let table_parts = key.split('.').collect::<Vec<_>>();
|
||||
ensure!(
|
||||
table_parts.len() >= 3,
|
||||
@@ -298,11 +323,12 @@ pub fn decode_system_catalog(
|
||||
debug!("Table meta value: {}", String::from_utf8_lossy(value));
|
||||
let table_meta: TableEntryValue =
|
||||
serde_json::from_slice(value).context(ValueDeserializeSnafu)?;
|
||||
let table_id = table_parts[2].parse::<TableId>().unwrap();
|
||||
Ok(Entry::Table(TableEntry {
|
||||
catalog_name: table_parts[0].to_string(),
|
||||
schema_name: table_parts[1].to_string(),
|
||||
table_name: table_parts[2].to_string(),
|
||||
table_id: table_meta.table_id,
|
||||
table_name: table_meta.table_name,
|
||||
table_id,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -362,12 +388,14 @@ pub struct TableEntry {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableEntryValue {
|
||||
pub table_id: TableId,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use log_store::fs::noop::NoopLogStore;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::value::Value;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::ObjectStore;
|
||||
@@ -415,8 +443,8 @@ mod tests {
|
||||
pub fn test_decode_table() {
|
||||
let entry = decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.some_table".as_bytes()),
|
||||
Some("{\"table_id\":42}".as_bytes()),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
Some("{\"table_name\":\"some_table\"}".as_bytes()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -435,7 +463,7 @@ mod tests {
|
||||
pub fn test_decode_mismatch() {
|
||||
decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.some_table".as_bytes()),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
@@ -487,4 +515,53 @@ mod tests {
|
||||
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
|
||||
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_catalog_table_records() {
|
||||
let (_, table_engine) = prepare_table_engine().await;
|
||||
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
|
||||
let table_insertion = build_table_insert_request(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"my_table".to_string(),
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.insert(table_insertion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 1);
|
||||
let batch = batches.remove(0);
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
|
||||
let row = batch.rows().next().unwrap();
|
||||
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
|
||||
let Value::Binary(key) = row[1].clone() else { unreachable!() };
|
||||
let Value::Binary(value) = row[3].clone() else { unreachable!() };
|
||||
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
|
||||
let expected = Entry::Table(TableEntry {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
let table_deletion = build_table_deletion_request(
|
||||
&DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -38,10 +38,13 @@ use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::scan::SimpleTableScan;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{Error, InsertCatalogRecordSnafu};
|
||||
use crate::system::{build_schema_insert_request, build_table_insert_request, SystemCatalogTable};
|
||||
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::system::{
|
||||
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
|
||||
SystemCatalogTable,
|
||||
};
|
||||
use crate::{
|
||||
format_full_table_name, CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef,
|
||||
CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Tables holds all tables created by user.
|
||||
@@ -233,6 +236,10 @@ impl SchemaProvider for InformationSchema {
|
||||
panic!("System catalog & schema does not support register table")
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> crate::error::Result<TableRef> {
|
||||
unimplemented!("System catalog & schema does not support rename table")
|
||||
}
|
||||
|
||||
fn deregister_table(&self, _name: &str) -> crate::error::Result<Option<TableRef>> {
|
||||
panic!("System catalog & schema does not support deregister table")
|
||||
}
|
||||
@@ -269,8 +276,7 @@ impl SystemCatalog {
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> crate::error::Result<usize> {
|
||||
let full_table_name = format_full_table_name(&catalog, &schema, &table_name);
|
||||
let request = build_table_insert_request(full_table_name, table_id);
|
||||
let request = build_table_insert_request(catalog, schema, table_name, table_id);
|
||||
self.information_schema
|
||||
.system
|
||||
.insert(request)
|
||||
@@ -278,6 +284,21 @@ impl SystemCatalog {
|
||||
.context(InsertCatalogRecordSnafu)
|
||||
}
|
||||
|
||||
pub(crate) async fn deregister_table(
|
||||
&self,
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> CatalogResult<bool> {
|
||||
self.information_schema
|
||||
.system
|
||||
.delete(build_table_deletion_request(request, table_id))
|
||||
.await
|
||||
.map(|x| x == 1)
|
||||
.with_context(|_| error::DeregisterTableSnafu {
|
||||
request: request.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn register_schema(
|
||||
&self,
|
||||
catalog: String,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -17,7 +17,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::local::LocalCatalogManager;
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::{error, info};
|
||||
use mito::config::EngineConfig;
|
||||
@@ -38,6 +38,44 @@ mod tests {
|
||||
Ok(catalog_manager)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
// register table
|
||||
let table_name = "test_table";
|
||||
let table_id = 42;
|
||||
let table = Arc::new(NumbersTable::new(table_id));
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table: table.clone(),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "table_t";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog_manager
|
||||
.rename_table(rename_table_req)
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let registered_table = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_duplicate_register() {
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -6,8 +6,10 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
@@ -17,10 +19,12 @@ common-time = { path = "../common/time" }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand = "0.8"
|
||||
snafu.workspace = true
|
||||
tonic = "0.8"
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
datanode = { path = "../datanode" }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -65,13 +65,12 @@ async fn run() {
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let db = Database::new("create table", client.clone());
|
||||
let db = Database::with_client(client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let db = Database::new("greptime", client);
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,8 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_client::GreptimeClient;
|
||||
use api::v1::*;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -24,6 +23,21 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
}
|
||||
|
||||
impl FlightClient {
|
||||
pub(crate) fn addr(&self) -> &str {
|
||||
&self.addr
|
||||
}
|
||||
|
||||
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
||||
&mut self.client
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Client {
|
||||
inner: Arc<Inner>,
|
||||
@@ -104,43 +118,23 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub async fn database(&self, req: DatabaseRequest) -> Result<DatabaseResponse> {
|
||||
let req = BatchRequest {
|
||||
databases: vec![req],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut res = self.batch(req).await?;
|
||||
res.databases.pop().context(error::MissingResultSnafu {
|
||||
name: "database",
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn batch(&self, req: BatchRequest) -> Result<BatchResponse> {
|
||||
let peer = self
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
.context(error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "No available peer found",
|
||||
})?;
|
||||
let mut client = self.make_client(&peer)?;
|
||||
let result = client
|
||||
.batch(req)
|
||||
.await
|
||||
.context(error::TonicStatusSnafu { addr: peer })?;
|
||||
Ok(result.into_inner())
|
||||
}
|
||||
|
||||
fn make_client(&self, addr: impl AsRef<str>) -> Result<GreptimeClient<Channel>> {
|
||||
let addr = addr.as_ref();
|
||||
let channel = self
|
||||
.inner
|
||||
.channel_manager
|
||||
.get(addr)
|
||||
.context(error::CreateChannelSnafu { addr })?;
|
||||
Ok(GreptimeClient::new(channel))
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,146 +12,134 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::{
|
||||
object_expr, query_request, AlterExpr, CreateTableExpr, DatabaseRequest, DdlRequest,
|
||||
DropTableExpr, InsertRequest, ObjectExpr, ObjectResult as GrpcObjectResult, QueryRequest,
|
||||
};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{
|
||||
flight_messages_to_recordbatches, raw_flight_data_to_message, FlightMessage,
|
||||
};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, DatanodeSnafu, IllegalFlightMessagesSnafu};
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest, InsertRequest,
|
||||
QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Database {
|
||||
name: String,
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub fn new(name: impl Into<String>, client: Client) -> Self {
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
pub fn with_client(client: Client) -> Self {
|
||||
Self::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client)
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<RpcOutput> {
|
||||
let expr = ObjectExpr {
|
||||
request: Some(object_expr::Request::Insert(request)),
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
self.object(expr).await?.try_into()
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<RpcOutput> {
|
||||
let query = QueryRequest {
|
||||
query: Some(query_request::Query::Sql(sql.to_string())),
|
||||
};
|
||||
self.do_query(query).await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<RpcOutput> {
|
||||
let query = QueryRequest {
|
||||
query: Some(query_request::Query::LogicalPlan(logical_plan)),
|
||||
};
|
||||
self.do_query(query).await
|
||||
}
|
||||
|
||||
async fn do_query(&self, request: QueryRequest) -> Result<RpcOutput> {
|
||||
let expr = ObjectExpr {
|
||||
request: Some(object_expr::Request::Query(request)),
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec(),
|
||||
};
|
||||
|
||||
let obj_result = self.object(expr).await?;
|
||||
obj_result.try_into()
|
||||
}
|
||||
let mut client = self.client.make_client()?;
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<RpcOutput> {
|
||||
let expr = ObjectExpr {
|
||||
request: Some(object_expr::Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
})),
|
||||
};
|
||||
self.object(expr).await?.try_into()
|
||||
}
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
.mut_inner()
|
||||
.do_get(request)
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
addr: client.addr(),
|
||||
})
|
||||
.unwrap_err()
|
||||
})?;
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<RpcOutput> {
|
||||
let expr = ObjectExpr {
|
||||
request: Some(object_expr::Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
})),
|
||||
};
|
||||
self.object(expr).await?.try_into()
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<RpcOutput> {
|
||||
let expr = ObjectExpr {
|
||||
request: Some(object_expr::Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
})),
|
||||
};
|
||||
self.object(expr).await?.try_into()
|
||||
}
|
||||
|
||||
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
|
||||
let res = self.objects(vec![expr]).await?.pop().unwrap();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn objects(&self, exprs: Vec<ObjectExpr>) -> Result<Vec<GrpcObjectResult>> {
|
||||
let expr_count = exprs.len();
|
||||
let req = DatabaseRequest {
|
||||
name: self.name.clone(),
|
||||
exprs,
|
||||
};
|
||||
|
||||
let res = self.client.database(req).await?;
|
||||
let res = res.results;
|
||||
|
||||
ensure!(
|
||||
res.len() == expr_count,
|
||||
error::MissingResultSnafu {
|
||||
name: "object_results",
|
||||
expected: expr_count,
|
||||
actual: res.len(),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum RpcOutput {
|
||||
RecordBatches(RecordBatches),
|
||||
AffectedRows(usize),
|
||||
}
|
||||
|
||||
impl TryFrom<api::v1::ObjectResult> for RpcOutput {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(object_result: api::v1::ObjectResult) -> std::result::Result<Self, Self::Error> {
|
||||
let header = object_result.header.context(error::MissingHeaderSnafu)?;
|
||||
if !StatusCode::is_success(header.code) {
|
||||
return DatanodeSnafu {
|
||||
code: header.code,
|
||||
msg: header.err_msg,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let flight_messages = raw_flight_data_to_message(object_result.flight_data)
|
||||
.context(ConvertFlightDataSnafu)?;
|
||||
let decoder = &mut FlightDecoder::default();
|
||||
let flight_messages = flight_data
|
||||
.into_iter()
|
||||
.map(|x| decoder.try_decode(x).context(ConvertFlightDataSnafu))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let output = if let Some(FlightMessage::AffectedRows(rows)) = flight_messages.get(0) {
|
||||
ensure!(
|
||||
@@ -160,23 +148,20 @@ impl TryFrom<api::v1::ObjectResult> for RpcOutput {
|
||||
reason: "Expect 'AffectedRows' Flight messages to be one and only!"
|
||||
}
|
||||
);
|
||||
RpcOutput::AffectedRows(*rows)
|
||||
Output::AffectedRows(*rows)
|
||||
} else {
|
||||
let recordbatches = flight_messages_to_recordbatches(flight_messages)
|
||||
.context(ConvertFlightDataSnafu)?;
|
||||
RpcOutput::RecordBatches(recordbatches)
|
||||
Output::RecordBatches(recordbatches)
|
||||
};
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RpcOutput> for Output {
|
||||
fn from(value: RpcOutput) -> Self {
|
||||
match value {
|
||||
RpcOutput::AffectedRows(x) => Output::AffectedRows(x),
|
||||
RpcOutput::RecordBatches(x) => Output::RecordBatches(x),
|
||||
}
|
||||
}
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -25,26 +26,18 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing {}, expected {}, actual {}", name, expected, actual))]
|
||||
MissingResult {
|
||||
name: String,
|
||||
expected: usize,
|
||||
actual: usize,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing result header"))]
|
||||
MissingHeader,
|
||||
|
||||
#[snafu(display("Tonic internal error, addr: {}, source: {}", addr, source))]
|
||||
TonicStatus {
|
||||
#[snafu(display(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
addr,
|
||||
tonic_code,
|
||||
source
|
||||
))]
|
||||
FlightGet {
|
||||
addr: String,
|
||||
source: tonic::Status,
|
||||
backtrace: Backtrace,
|
||||
tonic_code: Code,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Error occurred on the data node, code: {}, msg: {}", code, msg))]
|
||||
Datanode { code: u32, msg: String },
|
||||
|
||||
#[snafu(display("Failed to convert FlightData, source: {}", source))]
|
||||
ConvertFlightData {
|
||||
#[snafu(backtrace)]
|
||||
@@ -76,6 +69,10 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
/// Error deserialized from gRPC metadata
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -84,16 +81,14 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::MissingResult { .. }
|
||||
| Error::MissingHeader { .. }
|
||||
| Error::TonicStatus { .. }
|
||||
| Error::Datanode { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ExternalError { code, .. } => *code,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,5 +20,5 @@ pub mod load_balance;
|
||||
pub use api;
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::{Database, RpcOutput};
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -12,6 +12,7 @@ path = "src/bin/greptime.rs"
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
@@ -24,7 +25,7 @@ meta-srv = { path = "../meta-srv" }
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
snafu.workspace = true
|
||||
tokio = { version = "1.18", features = ["full"] }
|
||||
tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOpts;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
@@ -54,6 +54,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
@@ -94,6 +96,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
opts.rpc_addr = addr;
|
||||
}
|
||||
|
||||
if cmd.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = cmd.rpc_hostname;
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_addr = addr;
|
||||
}
|
||||
@@ -121,11 +128,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage = ObjectStoreConfig::File { data_dir };
|
||||
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal_dir = wal_dir;
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -134,6 +141,7 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::time::Duration;
|
||||
|
||||
use datanode::datanode::ObjectStoreConfig;
|
||||
use servers::Mode;
|
||||
@@ -151,7 +159,7 @@ mod tests {
|
||||
};
|
||||
let options: DatanodeOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(4, options.mysql_runtime_size);
|
||||
let MetaClientOpts {
|
||||
@@ -167,10 +175,11 @@ mod tests {
|
||||
assert!(!tcp_nodelay);
|
||||
|
||||
match options.storage {
|
||||
ObjectStoreConfig::File { data_dir } => {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -216,6 +225,11 @@ mod tests {
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!("/tmp/greptimedb/wal", dn_opts.wal.dir);
|
||||
assert_eq!(Duration::from_secs(600), dn_opts.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, dn_opts.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
|
||||
assert!(!dn_opts.wal.sync_write);
|
||||
assert_eq!(Some(42), dn_opts.node_id);
|
||||
let MetaClientOpts {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -61,6 +61,13 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::auth::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -71,6 +78,7 @@ impl ErrorExt for Error {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
@@ -22,7 +23,6 @@ use frontend::instance::Instance;
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::Plugins;
|
||||
use meta_client::MetaClientOpts;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
@@ -91,10 +91,9 @@ impl StartCommand {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = Instance::try_new_distributed(&opts)
|
||||
let instance = Instance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
instance.set_plugins(plugins.clone());
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
@@ -287,7 +286,7 @@ mod tests {
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
@@ -56,6 +56,10 @@ struct StartCommand {
|
||||
store_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
selector: Option<String>,
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -91,6 +95,17 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
if let Some(addr) = cmd.store_addr {
|
||||
opts.store_addr = addr;
|
||||
}
|
||||
if let Some(selector_type) = &cmd.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
info!("Using {} selector", selector_type);
|
||||
}
|
||||
|
||||
if cmd.use_memory_store {
|
||||
warn!("Using memory store for Meta. Make sure you are in running tests.");
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -98,6 +113,8 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -107,11 +124,14 @@ mod tests {
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -120,15 +140,18 @@ mod tests {
|
||||
bind_addr: None,
|
||||
server_addr: None,
|
||||
store_addr: None,
|
||||
selector: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/metasrv.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(15, options.datanode_lease_secs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,8 +15,9 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -26,7 +27,7 @@ use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use frontend::Plugins;
|
||||
use frontend::promql::PromqlOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -63,6 +64,7 @@ impl SubCommand {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
@@ -71,8 +73,9 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb_options: Option<OpentsdbOptions>,
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub promql_options: Option<PromqlOptions>,
|
||||
pub mode: Mode,
|
||||
pub wal_dir: String,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
}
|
||||
@@ -87,8 +90,9 @@ impl Default for StandaloneOptions {
|
||||
opentsdb_options: Some(OpentsdbOptions::default()),
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
promql_options: Some(PromqlOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
wal_dir: "/tmp/greptimedb/wal".to_string(),
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
}
|
||||
@@ -105,6 +109,7 @@ impl StandaloneOptions {
|
||||
opentsdb_options: self.opentsdb_options,
|
||||
influxdb_options: self.influxdb_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
promql_options: self.promql_options,
|
||||
mode: self.mode,
|
||||
meta_client_opts: None,
|
||||
}
|
||||
@@ -112,7 +117,7 @@ impl StandaloneOptions {
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
wal_dir: self.wal_dir,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
..Default::default()
|
||||
@@ -322,6 +327,10 @@ mod tests {
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
|
||||
assert_eq!(
|
||||
None,
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
}
|
||||
|
||||
@@ -349,7 +358,7 @@ mod tests {
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -37,12 +37,23 @@ mod tests {
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
|
||||
#[serde(default)]
|
||||
struct MockConfig {
|
||||
path: String,
|
||||
port: u32,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Default for MockConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "test".to_string(),
|
||||
port: 0,
|
||||
host: "localhost".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_file() -> Result<()> {
|
||||
let config = MockConfig {
|
||||
@@ -63,6 +74,21 @@ mod tests {
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, config);
|
||||
|
||||
// Only host in file
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config.host, "greptime.test");
|
||||
assert_eq!(loaded_config.port, 0);
|
||||
assert_eq!(loaded_config.path, "test");
|
||||
|
||||
// Truncate the file.
|
||||
let file = File::create(&test_file).unwrap();
|
||||
file.set_len(0).unwrap();
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, MockConfig::default());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,13 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { path = "../error" }
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
toml = "0.5"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,5 +15,9 @@
|
||||
pub mod bit_vec;
|
||||
pub mod buffer;
|
||||
pub mod bytes;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
pub type Plugins = anymap::Map<dyn core::any::Any + Send + Sync>;
|
||||
|
||||
321
src/common/base/src/readable_size.rs
Normal file
321
src/common/base/src/readable_size.rs
Normal file
@@ -0,0 +1,321 @@
|
||||
// Copyright (c) 2017-present, PingCAP, Inc. Licensed under Apache-2.0.
|
||||
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs without any modification.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{Display, Write};
|
||||
use std::ops::{Div, Mul};
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::de::{Unexpected, Visitor};
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
const UNIT: u64 = 1;
|
||||
|
||||
const BINARY_DATA_MAGNITUDE: u64 = 1024;
|
||||
pub const B: u64 = UNIT;
|
||||
pub const KIB: u64 = B * BINARY_DATA_MAGNITUDE;
|
||||
pub const MIB: u64 = KIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
||||
|
||||
#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd)]
|
||||
pub struct ReadableSize(pub u64);
|
||||
|
||||
impl ReadableSize {
|
||||
pub const fn kb(count: u64) -> ReadableSize {
|
||||
ReadableSize(count * KIB)
|
||||
}
|
||||
|
||||
pub const fn mb(count: u64) -> ReadableSize {
|
||||
ReadableSize(count * MIB)
|
||||
}
|
||||
|
||||
pub const fn gb(count: u64) -> ReadableSize {
|
||||
ReadableSize(count * GIB)
|
||||
}
|
||||
|
||||
pub const fn as_mb(self) -> u64 {
|
||||
self.0 / MIB
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<u64> for ReadableSize {
|
||||
type Output = ReadableSize;
|
||||
|
||||
fn div(self, rhs: u64) -> ReadableSize {
|
||||
ReadableSize(self.0 / rhs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<ReadableSize> for ReadableSize {
|
||||
type Output = u64;
|
||||
|
||||
fn div(self, rhs: ReadableSize) -> u64 {
|
||||
self.0 / rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<u64> for ReadableSize {
|
||||
type Output = ReadableSize;
|
||||
|
||||
fn mul(self, rhs: u64) -> ReadableSize {
|
||||
ReadableSize(self.0 * rhs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ReadableSize {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let size = self.0;
|
||||
let mut buffer = String::new();
|
||||
if size == 0 {
|
||||
write!(buffer, "{}KiB", size).unwrap();
|
||||
} else if size % PIB == 0 {
|
||||
write!(buffer, "{}PiB", size / PIB).unwrap();
|
||||
} else if size % TIB == 0 {
|
||||
write!(buffer, "{}TiB", size / TIB).unwrap();
|
||||
} else if size % GIB as u64 == 0 {
|
||||
write!(buffer, "{}GiB", size / GIB).unwrap();
|
||||
} else if size % MIB as u64 == 0 {
|
||||
write!(buffer, "{}MiB", size / MIB).unwrap();
|
||||
} else if size % KIB as u64 == 0 {
|
||||
write!(buffer, "{}KiB", size / KIB).unwrap();
|
||||
} else {
|
||||
return serializer.serialize_u64(size);
|
||||
}
|
||||
serializer.serialize_str(&buffer)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ReadableSize {
|
||||
type Err = String;
|
||||
|
||||
// This method parses value in binary unit.
|
||||
fn from_str(s: &str) -> Result<ReadableSize, String> {
|
||||
let size_str = s.trim();
|
||||
if size_str.is_empty() {
|
||||
return Err(format!("{:?} is not a valid size.", s));
|
||||
}
|
||||
|
||||
if !size_str.is_ascii() {
|
||||
return Err(format!("ASCII string is expected, but got {:?}", s));
|
||||
}
|
||||
|
||||
// size: digits and '.' as decimal separator
|
||||
let size_len = size_str
|
||||
.to_string()
|
||||
.chars()
|
||||
.take_while(|c| char::is_ascii_digit(c) || ['.', 'e', 'E', '-', '+'].contains(c))
|
||||
.count();
|
||||
|
||||
// unit: alphabetic characters
|
||||
let (size, unit) = size_str.split_at(size_len);
|
||||
|
||||
let unit = match unit.trim() {
|
||||
"K" | "KB" | "KiB" => KIB,
|
||||
"M" | "MB" | "MiB" => MIB,
|
||||
"G" | "GB" | "GiB" => GIB,
|
||||
"T" | "TB" | "TiB" => TIB,
|
||||
"P" | "PB" | "PiB" => PIB,
|
||||
"B" | "" => B,
|
||||
_ => {
|
||||
return Err(format!(
|
||||
"only B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, and PiB are supported: {:?}",
|
||||
s
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
match size.parse::<f64>() {
|
||||
Ok(n) => Ok(ReadableSize((n * unit as f64) as u64)),
|
||||
Err(_) => Err(format!("invalid size string: {:?}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ReadableSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.0 >= PIB {
|
||||
write!(f, "{:.1}PiB", self.0 as f64 / PIB as f64)
|
||||
} else if self.0 >= TIB {
|
||||
write!(f, "{:.1}TiB", self.0 as f64 / TIB as f64)
|
||||
} else if self.0 >= GIB {
|
||||
write!(f, "{:.1}GiB", self.0 as f64 / GIB as f64)
|
||||
} else if self.0 >= MIB {
|
||||
write!(f, "{:.1}MiB", self.0 as f64 / MIB as f64)
|
||||
} else if self.0 >= KIB {
|
||||
write!(f, "{:.1}KiB", self.0 as f64 / KIB as f64)
|
||||
} else {
|
||||
write!(f, "{}B", self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReadableSize {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
struct SizeVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for SizeVisitor {
|
||||
type Value = ReadableSize;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
formatter.write_str("valid size")
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, size: i64) -> Result<ReadableSize, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
if size >= 0 {
|
||||
self.visit_u64(size as u64)
|
||||
} else {
|
||||
Err(E::invalid_value(Unexpected::Signed(size), &self))
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, size: u64) -> Result<ReadableSize, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
Ok(ReadableSize(size))
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, size_str: &str) -> Result<ReadableSize, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
size_str.parse().map_err(E::custom)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(SizeVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_readable_size() {
|
||||
let s = ReadableSize::kb(2);
|
||||
assert_eq!(s.0, 2048);
|
||||
assert_eq!(s.as_mb(), 0);
|
||||
let s = ReadableSize::mb(2);
|
||||
assert_eq!(s.0, 2 * 1024 * 1024);
|
||||
assert_eq!(s.as_mb(), 2);
|
||||
let s = ReadableSize::gb(2);
|
||||
assert_eq!(s.0, 2 * 1024 * 1024 * 1024);
|
||||
assert_eq!(s.as_mb(), 2048);
|
||||
|
||||
assert_eq!((ReadableSize::mb(2) / 2).0, MIB);
|
||||
assert_eq!((ReadableSize::mb(1) / 2).0, 512 * KIB);
|
||||
assert_eq!(ReadableSize::mb(2) / ReadableSize::kb(1), 2048);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_readable_size() {
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct SizeHolder {
|
||||
s: ReadableSize,
|
||||
}
|
||||
|
||||
let legal_cases = vec![
|
||||
(0, "0KiB"),
|
||||
(2 * KIB, "2KiB"),
|
||||
(4 * MIB, "4MiB"),
|
||||
(5 * GIB, "5GiB"),
|
||||
(7 * TIB, "7TiB"),
|
||||
(11 * PIB, "11PiB"),
|
||||
];
|
||||
for (size, exp) in legal_cases {
|
||||
let c = SizeHolder {
|
||||
s: ReadableSize(size),
|
||||
};
|
||||
let res_str = toml::to_string(&c).unwrap();
|
||||
let exp_str = format!("s = {:?}\n", exp);
|
||||
assert_eq!(res_str, exp_str);
|
||||
let res_size: SizeHolder = toml::from_str(&exp_str).unwrap();
|
||||
assert_eq!(res_size.s.0, size);
|
||||
}
|
||||
|
||||
let c = SizeHolder {
|
||||
s: ReadableSize(512),
|
||||
};
|
||||
let res_str = toml::to_string(&c).unwrap();
|
||||
assert_eq!(res_str, "s = 512\n");
|
||||
let res_size: SizeHolder = toml::from_str(&res_str).unwrap();
|
||||
assert_eq!(res_size.s.0, c.s.0);
|
||||
|
||||
let decode_cases = vec![
|
||||
(" 0.5 PB", PIB / 2),
|
||||
("0.5 TB", TIB / 2),
|
||||
("0.5GB ", GIB / 2),
|
||||
("0.5MB", MIB / 2),
|
||||
("0.5KB", KIB / 2),
|
||||
("0.5P", PIB / 2),
|
||||
("0.5T", TIB / 2),
|
||||
("0.5G", GIB / 2),
|
||||
("0.5M", MIB / 2),
|
||||
("0.5K", KIB / 2),
|
||||
("23", 23),
|
||||
("1", 1),
|
||||
("1024B", KIB),
|
||||
// units with binary prefixes
|
||||
(" 0.5 PiB", PIB / 2),
|
||||
("1PiB", PIB),
|
||||
("0.5 TiB", TIB / 2),
|
||||
("2 TiB", TIB * 2),
|
||||
("0.5GiB ", GIB / 2),
|
||||
("787GiB ", GIB * 787),
|
||||
("0.5MiB", MIB / 2),
|
||||
("3MiB", MIB * 3),
|
||||
("0.5KiB", KIB / 2),
|
||||
("1 KiB", KIB),
|
||||
// scientific notation
|
||||
("0.5e6 B", B * 500000),
|
||||
("0.5E6 B", B * 500000),
|
||||
("1e6B", B * 1000000),
|
||||
("8E6B", B * 8000000),
|
||||
("8e7", B * 80000000),
|
||||
("1e-1MB", MIB / 10),
|
||||
("1e+1MB", MIB * 10),
|
||||
("0e+10MB", 0),
|
||||
];
|
||||
for (src, exp) in decode_cases {
|
||||
let src = format!("s = {:?}", src);
|
||||
let res: SizeHolder = toml::from_str(&src).unwrap();
|
||||
assert_eq!(res.s.0, exp);
|
||||
}
|
||||
|
||||
let illegal_cases = vec![
|
||||
"0.5kb", "0.5kB", "0.5Kb", "0.5k", "0.5g", "b", "gb", "1b", "B", "1K24B", " 5_KB",
|
||||
"4B7", "5M_",
|
||||
];
|
||||
for src in illegal_cases {
|
||||
let src_str = format!("s = {:?}", src);
|
||||
assert!(toml::from_str::<SizeHolder>(&src_str).is_err(), "{}", src);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -16,6 +16,6 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
chrono.workspace = true
|
||||
tempdir = "0.3"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,3 +14,9 @@
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
#[inline]
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
@@ -6,3 +6,4 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
strum = { version = "0.24", features = ["std", "derive"] }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -33,72 +33,99 @@ pub trait ErrorExt: std::error::Error {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
/// A helper macro to define a opaque boxed error based on errors that implement [ErrorExt] trait.
|
||||
#[macro_export]
|
||||
macro_rules! define_opaque_error {
|
||||
($Error:ident) => {
|
||||
/// An error behaves like `Box<dyn Error>`.
|
||||
///
|
||||
/// Define this error as a new type instead of using `Box<dyn Error>` directly so we can implement
|
||||
/// more methods or traits for it.
|
||||
pub struct $Error {
|
||||
inner: Box<dyn $crate::ext::ErrorExt + Send + Sync>,
|
||||
}
|
||||
|
||||
impl $Error {
|
||||
pub fn new<E: $crate::ext::ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
Self {
|
||||
inner: Box::new(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for $Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Use the pretty debug format of inner error for opaque error.
|
||||
let debug_format = $crate::format::DebugFormat::new(&*self.inner);
|
||||
debug_format.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for $Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for $Error {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.inner.source()
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::ext::ErrorExt for $Error {
|
||||
fn status_code(&self) -> $crate::status_code::StatusCode {
|
||||
self.inner.status_code()
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&$crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self.inner.as_any()
|
||||
}
|
||||
}
|
||||
|
||||
// Implement ErrorCompat for this opaque error so the backtrace is also available
|
||||
// via `ErrorCompat::backtrace()`.
|
||||
impl $crate::snafu::ErrorCompat for $Error {
|
||||
fn backtrace(&self) -> Option<&$crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
}
|
||||
};
|
||||
/// An opaque boxed error based on errors that implement [ErrorExt] trait.
|
||||
pub struct BoxedError {
|
||||
inner: Box<dyn crate::ext::ErrorExt + Send + Sync>,
|
||||
}
|
||||
|
||||
// Define a general boxed error.
|
||||
define_opaque_error!(BoxedError);
|
||||
impl BoxedError {
|
||||
pub fn new<E: crate::ext::ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
Self {
|
||||
inner: Box::new(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BoxedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Use the pretty debug format of inner error for opaque error.
|
||||
let debug_format = crate::format::DebugFormat::new(&*self.inner);
|
||||
debug_format.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BoxedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for BoxedError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.inner.source()
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::ext::ErrorExt for BoxedError {
|
||||
fn status_code(&self) -> crate::status_code::StatusCode {
|
||||
self.inner.status_code()
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self.inner.as_any()
|
||||
}
|
||||
}
|
||||
|
||||
// Implement ErrorCompat for this opaque error so the backtrace is also available
|
||||
// via `ErrorCompat::backtrace()`.
|
||||
impl crate::snafu::ErrorCompat for BoxedError {
|
||||
fn backtrace(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type with plain error message
|
||||
#[derive(Debug)]
|
||||
pub struct PlainError {
|
||||
msg: String,
|
||||
status_code: StatusCode,
|
||||
}
|
||||
|
||||
impl PlainError {
|
||||
pub fn new(msg: String, status_code: StatusCode) -> Self {
|
||||
Self { msg, status_code }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PlainError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for PlainError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::ext::ErrorExt for PlainError {
|
||||
fn status_code(&self) -> crate::status_code::StatusCode {
|
||||
self.status_code
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self as _
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -24,6 +24,9 @@ pub mod prelude {
|
||||
pub use crate::ext::{BoxedError, ErrorExt};
|
||||
pub use crate::format::DebugFormat;
|
||||
pub use crate::status_code::StatusCode;
|
||||
|
||||
pub const INNER_ERROR_CODE: &str = "INNER_ERROR_CODE";
|
||||
pub const INNER_ERROR_MSG: &str = "INNER_ERROR_MSG";
|
||||
}
|
||||
|
||||
pub use snafu;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use strum::EnumString;
|
||||
|
||||
/// Common status code for public API.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString)]
|
||||
pub enum StatusCode {
|
||||
// ====== Begin of common status code ==============
|
||||
/// Success.
|
||||
@@ -75,6 +77,8 @@ pub enum StatusCode {
|
||||
AuthHeaderNotFound = 7003,
|
||||
/// Invalid http authorization header
|
||||
InvalidAuthHeader = 7004,
|
||||
/// Illegal request to connect catalog-schema
|
||||
AccessDenied = 7005,
|
||||
// ====== End of auth related status code =====
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user