mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 16:32:54 +00:00
Compare commits
45 Commits
v0.1.0-alp
...
replace-ar
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8630cdb38 | ||
|
|
0f3dcc1b38 | ||
|
|
7c696dae08 | ||
|
|
142dee41d6 | ||
|
|
ce6d1cb7d1 | ||
|
|
dbb3034ecb | ||
|
|
652d59a643 | ||
|
|
fa971c6513 | ||
|
|
36c929e1a7 | ||
|
|
a712382fba | ||
|
|
4b644aa482 | ||
|
|
4defde055c | ||
|
|
95b2d8654f | ||
|
|
42fdc7251a | ||
|
|
d0892bf0b7 | ||
|
|
fff530cb50 | ||
|
|
b936d8b18a | ||
|
|
1bde1ba399 | ||
|
|
3687bc7346 | ||
|
|
587bdc9800 | ||
|
|
58c26def6b | ||
|
|
6f3baf96b0 | ||
|
|
a898f846d1 | ||
|
|
a562199455 | ||
|
|
fb0b4eb826 | ||
|
|
2ba99259e1 | ||
|
|
551cde23b1 | ||
|
|
653906d4fa | ||
|
|
829ff491c4 | ||
|
|
b32438e78c | ||
|
|
0ccb8b4302 | ||
|
|
b48ae21b71 | ||
|
|
3c0adb00f3 | ||
|
|
8c66b7d000 | ||
|
|
99371fd31b | ||
|
|
fe505fecfd | ||
|
|
cc1ec26416 | ||
|
|
504059a699 | ||
|
|
7151deb4ed | ||
|
|
d0686f9c19 | ||
|
|
221f3e9d2e | ||
|
|
61c4a3691a | ||
|
|
d7626fd6af | ||
|
|
e3201a4705 | ||
|
|
571a84d91b |
@@ -2,9 +2,3 @@
|
||||
GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
GT_OSS_ACCESS_KEY=OSS access key
|
||||
GT_OSS_ENDPOINT=OSS endpoint
|
||||
|
||||
64
.github/workflows/coverage.yml
vendored
Normal file
64
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Code coverage
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-07-14
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
88
.github/workflows/develop.yml
vendored
88
.github/workflows/develop.yml
vendored
@@ -7,7 +7,6 @@ on:
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
@@ -24,7 +23,7 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2022-07-14
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -42,8 +41,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
@@ -84,8 +81,6 @@ jobs:
|
||||
# path: ./llvm
|
||||
# key: llvm
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: KyleMayes/install-llvm-action@v1
|
||||
# with:
|
||||
# version: "14.0"
|
||||
@@ -111,41 +106,6 @@ jobs:
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo run --bin sqlness-runner && ls /tmp
|
||||
- name: Upload sqlness logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -154,8 +114,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
@@ -173,8 +131,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
@@ -183,45 +139,3 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
|
||||
55
.github/workflows/docs.yml
vendored
55
.github/workflows/docs.yml
vendored
@@ -1,55 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
|
||||
# To pass the required status check, see:
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
38
.github/workflows/release.yml
vendored
38
.github/workflows/release.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2022-12-20
|
||||
RUST_TOOLCHAIN: nightly-2022-07-14
|
||||
|
||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
@@ -18,8 +18,6 @@ env:
|
||||
# In the future, we can change SCHEDULED_PERIOD to nightly.
|
||||
SCHEDULED_PERIOD: weekly
|
||||
|
||||
CARGO_PROFILE: weekly
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build binary
|
||||
@@ -28,10 +26,10 @@ jobs:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
os: ubuntu-latest-16-cores
|
||||
file: greptime-linux-amd64
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
os: ubuntu-latest-16-cores
|
||||
file: greptime-linux-arm64
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
@@ -69,25 +67,6 @@ jobs:
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -103,16 +82,13 @@ jobs:
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
||||
run: cargo build ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
cd target/${{ matrix.arch }}/release
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
@@ -121,13 +97,13 @@ jobs:
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.sha256sum
|
||||
release:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
|
||||
@@ -5,11 +5,11 @@ repos:
|
||||
- id: conventional-pre-commit
|
||||
stages: [commit-msg]
|
||||
|
||||
# - repo: https://github.com/DevinR528/cargo-sort
|
||||
# rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
|
||||
# hooks:
|
||||
# - id: cargo-sort
|
||||
# args: ["--workspace"]
|
||||
- repo: https://github.com/DevinR528/cargo-sort
|
||||
rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
|
||||
hooks:
|
||||
- id: cargo-sort
|
||||
args: ["--workspace"]
|
||||
|
||||
- repo: https://github.com/doublify/pre-commit-rust
|
||||
rev: v1.0
|
||||
|
||||
1702
Cargo.lock
generated
1702
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
39
Cargo.toml
39
Cargo.toml
@@ -12,7 +12,6 @@ members = [
|
||||
"src/common/function-macro",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/procedure",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -27,7 +26,6 @@ members = [
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/object-store",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
"src/script",
|
||||
@@ -41,42 +39,5 @@ members = [
|
||||
"tests/runner",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = "29.0"
|
||||
arrow-flight = "29.0"
|
||||
arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "29.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.28"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tonic = "0.8"
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
[profile.weekly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
@@ -153,9 +153,6 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Java
|
||||
@@ -172,7 +169,7 @@ For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam
|
||||
|
||||
## Community
|
||||
|
||||
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||
Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
|
||||
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||
community, please check out:
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
arrow = "26.0.0"
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { path = "../src/client" }
|
||||
indicatif = "0.17.1"
|
||||
itertools = "0.10.5"
|
||||
parquet.workspace = true
|
||||
tokio.workspace = true
|
||||
parquet = "26.0.0"
|
||||
tokio = { version = "1.21", features = ["full"] }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,6 +15,7 @@
|
||||
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
||||
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
||||
|
||||
#![feature(once_cell)]
|
||||
#![allow(clippy::print_stdout)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
@@ -25,13 +26,15 @@ use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArr
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::admin::Admin;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::{Client, Database};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateExpr, InsertExpr};
|
||||
use client::{Client, Database, Select};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const DATABASE_NAME: &str = "greptime";
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
@@ -91,27 +94,31 @@ async fn write_data(
|
||||
.unwrap();
|
||||
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
||||
progress_bar.set_style(pb_style);
|
||||
progress_bar.set_message(format!("{path:?}"));
|
||||
progress_bar.set_message(format!("{:?}", path));
|
||||
|
||||
let mut total_rpc_elapsed_ms = 0;
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
let insert_expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let now = Instant::now();
|
||||
db.insert(request).await.unwrap();
|
||||
db.insert(insert_expr).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
}
|
||||
|
||||
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
||||
progress_bar.finish_with_message(format!(
|
||||
"file {:?} done in {}ms",
|
||||
path, total_rpc_elapsed_ms
|
||||
));
|
||||
total_rpc_elapsed_ms
|
||||
}
|
||||
|
||||
@@ -212,126 +219,126 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
fn create_table_expr() -> CreateExpr {
|
||||
CreateExpr {
|
||||
catalog_name: Some(CATALOG_NAME.to_string()),
|
||||
schema_name: Some(SCHEMA_NAME.to_string()),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
desc: "".to_string(),
|
||||
desc: None,
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
datatype: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
@@ -339,7 +346,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: Some(0),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,23 +355,25 @@ fn query_set() -> HashMap<String, String> {
|
||||
|
||||
ret.insert(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
format!("SELECT COUNT(*) FROM {};", TABLE_NAME),
|
||||
);
|
||||
|
||||
ret.insert(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {} GROUP BY passenger_count",TABLE_NAME)
|
||||
);
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
async fn do_write(args: &Args, client: &Client) {
|
||||
let admin = Admin::new("admin", client.clone());
|
||||
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr()).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
let create_table_result = admin.create(create_table_expr()).await;
|
||||
println!("Create table result: {:?}", create_table_result);
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
||||
@@ -378,7 +387,7 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let batch_size = args.batch_size;
|
||||
for _ in 0..args.thread_num {
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let db = Database::new(DATABASE_NAME, client.clone());
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
@@ -387,7 +396,7 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
file_progress.inc(1);
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let db = Database::new(DATABASE_NAME, client.clone());
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
@@ -397,10 +406,10 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database) {
|
||||
for (query_name, query) in query_set() {
|
||||
println!("Running query: {query}");
|
||||
println!("Running query: {}", query);
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let _res = db.sql(&query).await.unwrap();
|
||||
let _res = db.select(Select::Sql(query.clone())).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
@@ -422,13 +431,13 @@ fn main() {
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::with_client(client);
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
do_write(&args, &client).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
let db = Database::new(DATABASE_NAME, client.clone());
|
||||
do_query(args.iter_num, &db).await;
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,20 +1,12 @@
|
||||
node_id = 42
|
||||
mode = 'distributed'
|
||||
rpc_addr = '127.0.0.1:3001'
|
||||
rpc_hostname = '127.0.0.1'
|
||||
wal_dir = '/tmp/greptimedb/wal'
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = '127.0.0.1:4406'
|
||||
mysql_runtime_size = 4
|
||||
enable_memory_catalog = false
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = '1GB'
|
||||
purge_interval = '10m'
|
||||
purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
@@ -2,5 +2,3 @@ bind_addr = '127.0.0.1:3002'
|
||||
server_addr = '127.0.0.1:3002'
|
||||
store_addr = '127.0.0.1:2379'
|
||||
datanode_lease_secs = 15
|
||||
# selector: 'LeaseBased', 'LoadBased'
|
||||
selector = 'LeaseBased'
|
||||
|
||||
@@ -1,20 +1,12 @@
|
||||
node_id = 0
|
||||
mode = 'standalone'
|
||||
wal_dir = '/tmp/greptimedb/wal/'
|
||||
enable_memory_catalog = false
|
||||
|
||||
[http_options]
|
||||
addr = '127.0.0.1:4000'
|
||||
timeout = "30s"
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = '1GB'
|
||||
purge_interval = '10m'
|
||||
purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
@@ -24,8 +24,6 @@ RUN cargo build --release
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 34 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 58 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 35 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 46 KiB |
@@ -1,175 +0,0 @@
|
||||
---
|
||||
Feature Name: "promql-in-rust"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/596
|
||||
Date: 2022-12-20
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Rewrite PromQL in Rust
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
A Rust native implementation of PromQL, for GreptimeDB.
|
||||
|
||||
# Motivation
|
||||
Prometheus and its query language PromQL prevails in the cloud-native observability area, which is an important scenario for time series database like GreptimeDB. We already have support for its remote read and write protocols. Users can now integrate GreptimeDB as the storage backend to existing Prometheus deployment, but cannot run PromQL query directly on GreptimeDB like SQL.
|
||||
|
||||
This RFC proposes to add support for PromQL. Because it was created in Go, we can't use the existing code easily. For interoperability, performance and extendability, porting its logic to Rust is a good choice.
|
||||
|
||||
# Details
|
||||
|
||||
## Overview
|
||||
One of the goals is to make use of our existing basic operators, execution model and runtime to reduce the work. So the entire proposal is built on top of Apache Arrow DataFusion. The rewrote PromQL logic is manifested as `Expr` or `Execution Plan` in DataFusion. And both the intermediate data structure and the result is in the format of `Arrow`'s `RecordBatch`.
|
||||
|
||||
The following sections are organized in a top-down manner. Starts with evaluation procedure. Then introduces the building blocks of our new PromQL operation. Follows by an explanation of data model. And end with an example logic plan.
|
||||
|
||||
*This RFC is heavily related to Prometheus and PromQL. It won't repeat some basic concepts of them.*
|
||||
|
||||
## Evaluation
|
||||
|
||||
The original implementation is like an interpreter of parsed PromQL AST. It has two characteristics: (1) Operations are evaluated in place after they are parsed to AST. And some key parameters are separated from the AST because they do not present in the query, but come from other places like another field in the HTTP payload. (2) calculation is performed per timestamp. You can see this pattern many times:
|
||||
```go
|
||||
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {}
|
||||
```
|
||||
|
||||
These bring out two differences in the proposed implementation. First, to make it more general and clear, the evaluation procedure is reorganized into serval phases (and is the same as DataFusion's). And second, data are evaluated by time series (corresponding to "columnar calculation", if think timestamp as row number).
|
||||
|
||||
```
|
||||
Logic
|
||||
Query AST Plan
|
||||
─────────► Parser ───────► Logical ────────► Physical ────┐
|
||||
Planner Planner │
|
||||
│
|
||||
◄───────────────────────────── Executor ◄────────────────┘
|
||||
Evaluation Result Execution
|
||||
Plan
|
||||
```
|
||||
|
||||
- Parser
|
||||
|
||||
Provided by [`promql-parser`](https://github.com/GreptimeTeam/promql-parser) crate. Same as the original implementation.
|
||||
|
||||
- Logical Planner
|
||||
|
||||
Generates a logical plan with all the needed parameters. It should accept something like `EvalStmt` in Go's implementation, which contains query time range, evaluation interval and lookback range.
|
||||
|
||||
Another important thing done here is assembling the logic plan, with all the operations baked into logically. Like what's the filter and time range to read, how the data then flows through a selector into a binary operation, etc. Or what's the output schema of every single step. The generated logic plan is deterministic without variables, and can be `EXPLAIN`ed clearly.
|
||||
|
||||
- Physical Planner
|
||||
|
||||
This step converts a logic plan into evaluatable execution plan. There are not many special things like the previous step. Except when a query is going to be executed distributedly. In this case, a logic plan will be divided into serval parts and sent to serval nodes. One physical planner only sees its own part.
|
||||
|
||||
- Executor
|
||||
|
||||
As its name shows, this step calculates data to result. And all new calculation logic, the implementation of PromQL in rust, is placed here. And the rewrote functions are using `RecordBatch` and `Array` from `Arrow` as the intermediate data structure.
|
||||
|
||||
Each "batch" contains only data from single time series. This is from the underlying storage implementation. Though it's not a requirement of this RFC, having this property can simplify some functions.
|
||||
|
||||
Another thing to mention is the rewrote functions don't aware of timestamp or value columns, they are defined only based on the input data types. For example, `increase()` function in PromQL calculates the unbiased delta of data, its implementation here only does this single thing. Let's compare the signature of two implementations:
|
||||
|
||||
- Go
|
||||
```go
|
||||
func funcIncrease(vals []parser.Value, args parser.Expressions) Vector {}
|
||||
```
|
||||
- Rust
|
||||
```rust
|
||||
fn prom_increase(input: Array) -> Array {}
|
||||
```
|
||||
|
||||
Some unimportant parameters are omitted. The original Go version only writes the logic for `Point`'s value, either float or histogram. But the proposed rewritten one accepts a generic `Array` as input, which can be any type that suits, from `i8` to `u64` to `TimestampNanosecond`.
|
||||
|
||||
## Plan and Expression
|
||||
|
||||
They are structures to express logic from PromQL. The proposed implementation is built on top of DataFusion, thus our plan and expression are in form of `ExtensionPlan` and `ScalarUDF`. The only difference between them in this context is the return type: plan returns a record batch while expression returns a single column.
|
||||
|
||||
This RFC proposes to add four new plans, they are fundamental building blocks that mainly handle data selection logic in PromQL, for the following calculation expressions.
|
||||
|
||||
- `SeriesNormalize`
|
||||
|
||||
Sort data inside one series on the timestamp column, and bias "offset" if has. This plan usually comes after `TableScan` (or `TableScan` and `Filter`) plan.
|
||||
|
||||
- `VectorManipulator` and `MatrixManipulator`
|
||||
|
||||
Corresponding to `InstantSelector` and `RangeSelector`. We don't calculate timestamp by timestamp, thus use "vector" instead of "instant", this image shows the difference. And "matrix" is another name for "range vector", for not confused with our "vector". The following section will detail how they are implemented using Arrow.
|
||||
|
||||

|
||||
|
||||
Due to "interval" parameter in PromQL, data after "selector" (or "manipulator" here) are usually shorter than input. And we have to modify the entire record batch to shorten both timestamp, value and tag columns. So they are formed as plan.
|
||||
|
||||
- `PromAggregator`
|
||||
|
||||
The carrier of aggregator expressions. This should not be very different from the DataFusion built-in `Aggregate` plan, except PromQL can use "group without" to do reverse selection.
|
||||
|
||||
PromQL has around 70 expressions and functions. But luckily we can reuse lots of them from DataFusion. Like unary expression, binary expression and aggregator. We only need to implement those PromQL-specific expressions, like `rate` or `percentile`. The following table lists some typical functions in PromQL, and their signature in the proposed implementation. Other function should be the same.
|
||||
|
||||
| Name | In Param(s) | Out Param(s) | Explain |
|
||||
|-------------------- |------------------------------------------------------ |-------------- |-------------------- |
|
||||
| instant_delta | Matrix T | Array T | idelta in PromQL |
|
||||
| increase | Matrix T | Array T | increase in PromQL |
|
||||
| extrapolate_factor | - Matrix T<br>- Array Timestamp<br>- Array Timestamp | Array T | * |
|
||||
|
||||
*: *`extrapolate_factor` is one of the "dark sides" in PromQL. In short it's a translation of this [paragraph](https://github.com/prometheus/prometheus/blob/0372e259baf014bbade3134fd79bcdfd8cbdef2c/promql/functions.go#L134-L159)*
|
||||
|
||||
To reuse those common calculation logic, we can break them into serval expressions, and assemble in the logic planning phase. Like `rate()` in PromQL can be represented as `increase / extrapolate_factor`.
|
||||
|
||||
## Data Model
|
||||
|
||||
This part explains how data is represented. Following the data model in GreptimeDB, all the data are stored as table, with tag columns, timestamp column and value column. Table to record batch is very straightforward. So an instant vector can be thought of as a row (though as said before, we don't use instant vectors) in the table. Given four basic types in PromQL: scalar, string, instant vector and range vector, only the last "range vector" need some tricks to adapt our columnar calculation.
|
||||
|
||||
Range vector is some sort of matrix, it's consisted of small one-dimension vectors, with each being an input of range function. And, applying range function to a range vector can be thought of kind of convolution.
|
||||
|
||||

|
||||
|
||||
(Left is an illustration of range vector. Notice the Y-axis has no meaning, it's just put different pieces separately. The right side is an imagined "matrix" as range function. Multiplying the left side to it can get a one-dimension "matrix" with four elements. That's the evaluation result of a range vector.)
|
||||
|
||||
To adapt this range vector to record batch, it should be represented by a column. This RFC proposes to use `DictionaryArray` from Arrow to represent range vector, or `Matrix`. This is "misusing" `DictionaryArray` to ship some additional information about an array. Because the range vector is sliding over one series, we only need to know the `offset` and `length` of each slides to reconstruct the matrix from an array:
|
||||
|
||||

|
||||
|
||||
The length is not fixed, it depends on the input's timestamp. An PoC implementation of `Matrix` and `increase()` can be found in [this repo](https://github.com/waynexia/corroding-prometheus).
|
||||
|
||||
## Example
|
||||
|
||||
The logic plan of PromQL query
|
||||
```promql
|
||||
# start: 2022-12-20T10:00:00
|
||||
# end: 2022-12-21T10:00:00
|
||||
# interval: 1m
|
||||
# lookback: 30s
|
||||
sum (rate(request_duration[5m])) by (idc)
|
||||
```
|
||||
looks like
|
||||
|
||||
<!-- title: 'PromAggregator: \naggr = sum, column = idc'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Matrix Manipulator: \ninterval = 1m, range = 5m, expr = div(increase(value), extrapolate_factor(timestamp))'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Series Normalize: \noffset = 0'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: filter
|
||||
inputs:
|
||||
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: scan -->
|
||||
|
||||

|
||||
|
||||
# Drawbacks
|
||||
|
||||
Human-being is always error-prone. It's harder to endeavor to rewrite from the ground and requires more attention to ensure correctness, than translate line-by-line. And, since the evaluator's architecture are different, it might be painful to catch up with PromQL's breaking update (if any) in the future.
|
||||
|
||||
Misusing Arrow's DictionaryVector as Matrix is another point. This hack needs some `unsafe` function call to bypass Arrow's check. And though Arrow's API is stable, this is still an undocumented behavior.
|
||||
|
||||
# Alternatives
|
||||
|
||||
There are a few alternatives we've considered:
|
||||
- Wrap the existing PromQL's implementation via FFI, and import it to GreptimeDB.
|
||||
- Translate its evaluator engine line-by-line, rather than rewrite one.
|
||||
- Integrate the Prometheus server into GreptimeDB via RPC, making it a detached execution engine for PromQL.
|
||||
|
||||
The first and second options are making a separate execution engine in GreptimeDB, they may alleviate the pain during rewriting, but will have negative impacts to afterward evolve like resource management. And introduce another deploy component in the last option will bring a complex deploy architecture.
|
||||
|
||||
And all of them are more or less redundant in data transportation that affects performance and resources. The proposed built-in executing procedure is also easy to integrate and expose to the existing SQL interface GreptimeDB currently provides. Some concepts in PromQL like sliding windows (range vector in PromQL) are very convenient and ergonomic in analyzing series data. This makes it not only a PromQL evaluator, but also an enhancement to our query system.
|
||||
@@ -1,153 +0,0 @@
|
||||
---
|
||||
Feature Name: "procedure-framework"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/286
|
||||
Date: 2023-01-03
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Procedure Framework
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
A framework for executing operations in a fault-tolerant manner.
|
||||
|
||||
# Motivation
|
||||
Some operations in GreptimeDB require multiple steps to implement. For example, creating a table needs:
|
||||
1. Check whether the table exists
|
||||
2. Create the table in the table engine
|
||||
1. Create a region for the table in the storage engine
|
||||
2. Persist the metadata of the table to the table manifest
|
||||
3. Add the table to the catalog manager
|
||||
|
||||
If the node dies or restarts in the middle of creating a table, it could leave the system in an inconsistent state. The procedure framework, inspired by [Apache HBase's ProcedureV2 framework](https://github.com/apache/hbase/blob/bfc9fc9605de638785435e404430a9408b99a8d0/src/main/asciidoc/_chapters/pv2.adoc) and [Apache Accumulo’s FATE framework](https://accumulo.apache.org/docs/2.x/administration/fate), aims to provide a unified way to implement multi-step operations that is tolerant to failure.
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
The procedure framework consists of the following primary components:
|
||||
- A `Procedure` represents an operation or a set of operations to be performed step-by-step
|
||||
- `ProcedureManager`, the runtime to run `Procedures`. It executes the submitted procedures, stores procedures' states to the `ProcedureStore` and restores procedures from `ProcedureStore` while the database restarts.
|
||||
- `ProcedureStore` is a storage layer for persisting the procedure state
|
||||
|
||||
|
||||
## Procedures
|
||||
The `ProcedureManager` keeps calling `Procedure::execute()` until the Procedure is done, so the operation of the Procedure should be [idempotent](https://developer.mozilla.org/en-US/docs/Glossary/Idempotent): it needs to be able to undo or replay a partial execution of itself.
|
||||
|
||||
```rust
|
||||
trait Procedure {
|
||||
fn execute(&mut self, ctx: &Context) -> Result<Status>;
|
||||
|
||||
fn dump(&self) -> Result<String>;
|
||||
|
||||
fn rollback(&self) -> Result<()>;
|
||||
|
||||
// other methods...
|
||||
}
|
||||
```
|
||||
|
||||
The `Status` is an enum that has the following variants:
|
||||
```rust
|
||||
enum Status {
|
||||
Executing {
|
||||
persist: bool,
|
||||
},
|
||||
Suspended {
|
||||
subprocedures: Vec<ProcedureWithId>,
|
||||
persist: bool,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
```
|
||||
|
||||
A call to `execute()` can result in the following possibilities:
|
||||
- `Ok(Status::Done)`: we are done
|
||||
- `Ok(Status::Executing { .. })`: there are remaining steps to do
|
||||
- `Ok(Status::Suspend { sub_procedure, .. })`: execution is suspended and can be resumed later after the sub-procedure is done.
|
||||
- `Err(e)`: error occurs during execution and the procedure is unable to proceed anymore.
|
||||
|
||||
Users need to assign a unique `ProcedureId` to the procedure and the procedure can get this id via the `Context`. The `ProcedureId` is typically a UUID.
|
||||
|
||||
```rust
|
||||
struct Context {
|
||||
id: ProcedureId,
|
||||
// other fields ...
|
||||
}
|
||||
```
|
||||
|
||||
The `ProcedureManager` calls `Procedure::dump()` to serialize the internal state of the procedure and writes to the `ProcedureStore`. The `Status` has a field `persist` to tell the `ProcedureManager` whether it needs persistence.
|
||||
|
||||
## Sub-procedures
|
||||
A procedure may need to create some sub-procedures to process its subtasks. For example, creating a distributed table with multiple regions (partitions) needs to set up the regions in each node, thus the parent procedure should instantiate a sub-procedure for each region. The `ProcedureManager` makes sure that the parent procedure does not proceed till all sub-procedures are successfully finished.
|
||||
|
||||
The procedure can submit sub-procedures to the `ProcedureManager` by returning `Status::Suspended`. It needs to assign a procedure id to each procedure manually so it can track the status of the sub-procedures.
|
||||
```rust
|
||||
struct ProcedureWithId {
|
||||
id: ProcedureId,
|
||||
procedure: BoxedProcedure,
|
||||
}
|
||||
```
|
||||
|
||||
## ProcedureStore
|
||||
We might need to provide two different ProcedureStore implementations:
|
||||
- In standalone mode, it stores data on the local disk.
|
||||
- In distributed mode, it stores data on the meta server or the object store service.
|
||||
|
||||
These implementations should share the same storage structure. They store each procedure's state in a unique path based on the procedure id:
|
||||
|
||||
```
|
||||
Sample paths:
|
||||
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.step
|
||||
/procedures/{PROCEDURE_ID}/000003.commit
|
||||
```
|
||||
|
||||
`ProcedureStore` behaves like a WAL. Before performing each step, the `ProcedureManager` can write the procedure's current state to the ProcedureStore, which stores the state in the `.step` file. The `000001` in the path is a monotonic increasing sequence of the step. After the procedure is done, the `ProcedureManager` puts a `.commit` file to indicate the procedure is finished (committed).
|
||||
|
||||
The `ProcedureManager` can remove the procedure's files once the procedure is done, but it needs to leave the `.commit` as the last file to remove in case of failure during removal.
|
||||
|
||||
## ProcedureManager
|
||||
`ProcedureManager` executes procedures submitted to it.
|
||||
|
||||
```rust
|
||||
trait ProcedureManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
It supports the following operations:
|
||||
- Register a `ProcedureLoader` by the type name of the `Procedure`.
|
||||
- Submit a `Procedure` to the manager and execute it.
|
||||
|
||||
When `ProcedureManager` starts, it loads procedures from the `ProcedureStore` and restores the procedures by the `ProcedureLoader`. The manager stores the type name from `Procedure::type_name()` with the data from `Procedure::dump()` in the `.step` file and uses the type name to find a `ProcedureLoader` to recover the procedure from its data.
|
||||
|
||||
```rust
|
||||
type BoxedProcedureLoader = Box<dyn Fn(&str) -> Result<BoxedProcedure> + Send>;
|
||||
```
|
||||
|
||||
## Rollback
|
||||
The rollback step is supposed to clean up the resources created during the execute() step. When a procedure has failed, the `ProcedureManager` puts a `rollback` file and calls the `Procedure::rollback()` method.
|
||||
|
||||
|
||||
```text
|
||||
/procedures/{PROCEDURE_ID}/000001.step
|
||||
/procedures/{PROCEDURE_ID}/000002.rollback
|
||||
```
|
||||
|
||||
Rollback is complicated to implement so some procedures might not support rollback or only provide a best-efforts approach.
|
||||
|
||||
## Locking
|
||||
The `ProcedureManager` can provide a locking mechanism that gives a procedure read/write access to a database object such as a table so other procedures are unable to modify the same table while the current one is executing.
|
||||
|
||||
Sub-procedures always inherit their parents' locks. The `ProcedureManager` only acquires locks for a procedure if its parent doesn't hold the lock.
|
||||
|
||||
# Drawbacks
|
||||
The `Procedure` framework introduces additional complexity and overhead to our database.
|
||||
- To execute a `Procedure`, we need to write to the `ProcedureStore` multiple times, which may slow down the server
|
||||
- We need to rewrite the logic of creating/dropping/altering a table using the procedure framework
|
||||
|
||||
# Alternatives
|
||||
Another approach is to tolerate failure during execution and allow users to retry the operation until it succeeds. But we still need to:
|
||||
- Make each step idempotent
|
||||
- Record the status in some place to check whether we are done
|
||||
@@ -1,92 +0,0 @@
|
||||
---
|
||||
Feature Name: "table-compaction"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/930
|
||||
Date: 2023-02-01
|
||||
Author: "Lei, HUANG <mrsatangel@gmail.com>"
|
||||
---
|
||||
|
||||
# Table Compaction
|
||||
|
||||
---
|
||||
|
||||
## Background
|
||||
|
||||
GreptimeDB uses an LSM-tree based storage engine that flushes memtables to SSTs for persistence.
|
||||
But currently it only supports level 0. SST files in level 0 does not guarantee to contain only rows with disjoint time ranges.
|
||||
That is to say, different SST files in level 0 may contain overlapped timestamps.
|
||||
The consequence is, in order to retrieve rows in some time range, all files need to be scanned, which brings a lot of IO overhead.
|
||||
|
||||
Also, just like other LSMT engines, delete/update to existing primary keys are converted to new rows with delete/update mark and appended to SSTs on flushing.
|
||||
We need to merge the operations to same primary keys so that we don't have to go through all SST files to find the final state of these primary keys.
|
||||
|
||||
## Goal
|
||||
|
||||
Implement a compaction framework to:
|
||||
- maintain SSTs in timestamp order to accelerate queries with timestamp condition;
|
||||
- merge rows with same primary key;
|
||||
- purge expired SSTs;
|
||||
- accommodate other tasks like data rollup/indexing.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Table compaction involves following components:
|
||||
- Compaction scheduler: run compaction tasks, limit the consumed resources;
|
||||
- Compaction strategy: find the SSTs to compact and determine the output files of compaction.
|
||||
- Compaction task: read the rows from input SSTs and write to the output files.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Compaction scheduler
|
||||
|
||||
`CompactionScheduler` is an executor that continuously polls and executes compaction request from a task queue.
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait CompactionScheduler {
|
||||
/// Schedules a compaction task.
|
||||
async fn schedule(&self, task: CompactionRequest) -> Result<()>;
|
||||
|
||||
/// Stops compaction scheduler.
|
||||
async fn stop(&self) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Compaction triggering
|
||||
|
||||
Currently, we can check whether to compact tables when memtable is flushed to SST.
|
||||
|
||||
https://github.com/GreptimeTeam/greptimedb/blob/4015dd80752e1e6aaa3d7cacc3203cb67ed9be6d/src/storage/src/flush.rs#L245
|
||||
|
||||
|
||||
### Compaction strategy
|
||||
|
||||
`CompactionStrategy` defines how to pick SSTs in all levels for compaction.
|
||||
|
||||
```rust
|
||||
pub trait CompactionStrategy {
|
||||
fn pick(
|
||||
&self,
|
||||
ctx: CompactionContext,
|
||||
levels: &LevelMetas,
|
||||
) -> Result<CompactionTask>;
|
||||
}
|
||||
```
|
||||
|
||||
The most suitable compaction strategy for time-series scenario would be
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
|
||||
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
|
||||
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
|
||||
SSTs from different time windows are neven compacted together.
|
||||
That strategy guarantees SSTs in each level are mainly sorted in timestamp order which boosts queries with
|
||||
explicit timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Currently, GreptimeDB's storage engine [only support two levels](https://github.com/GreptimeTeam/greptimedb/blob/43aefc5d74dfa73b7819cae77b7eb546d8534a41/src/storage/src/sst.rs#L32).
|
||||
For level 0, we can start with a simple time-window based leveled compaction, which reads from all SSTs in level 0,
|
||||
align them to time windows with a fixed duration, merge them with SSTs in level 1 within the same time window
|
||||
to ensure there is only one sorted run in level 1.
|
||||
1
rust-toolchain
Normal file
1
rust-toolchain
Normal file
@@ -0,0 +1 @@
|
||||
nightly-2022-07-14
|
||||
@@ -1,2 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2022-12-20"
|
||||
@@ -1,18 +1,18 @@
|
||||
[package]
|
||||
name = "api"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
arrow-flight.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
prost.workspace = true
|
||||
prost = "0.11"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
tonic = "0.8"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.8"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,11 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
let default_out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
|
||||
tonic_build::configure()
|
||||
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
|
||||
.compile(
|
||||
&[
|
||||
"greptime/v1/database.proto",
|
||||
"greptime/v1/select.proto",
|
||||
"greptime/v1/greptime.proto",
|
||||
"greptime/v1/meta/common.proto",
|
||||
"greptime/v1/meta/heartbeat.proto",
|
||||
"greptime/v1/meta/route.proto",
|
||||
|
||||
@@ -3,41 +3,56 @@ syntax = "proto3";
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
// "Data Definition Language" requests, that create, modify or delete the database structures but not the data.
|
||||
// `DdlRequest` could carry more information than plain SQL, for example, the "table_id" in `CreateTableExpr`.
|
||||
// So create a new DDL expr if you need it.
|
||||
message DdlRequest {
|
||||
message AdminRequest {
|
||||
string name = 1;
|
||||
repeated AdminExpr exprs = 2;
|
||||
}
|
||||
|
||||
message AdminResponse {
|
||||
repeated AdminResult results = 1;
|
||||
}
|
||||
|
||||
message AdminExpr {
|
||||
ExprHeader header = 1;
|
||||
oneof expr {
|
||||
CreateDatabaseExpr create_database = 1;
|
||||
CreateTableExpr create_table = 2;
|
||||
CreateExpr create = 2;
|
||||
AlterExpr alter = 3;
|
||||
DropTableExpr drop_table = 4;
|
||||
CreateDatabaseExpr create_database = 4;
|
||||
DropTableExpr drop_table = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message CreateTableExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
message AdminResult {
|
||||
ResultHeader header = 1;
|
||||
oneof result {
|
||||
MutateResult mutate = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(hl): rename to CreateTableExpr
|
||||
message CreateExpr {
|
||||
optional string catalog_name = 1;
|
||||
optional string schema_name = 2;
|
||||
string table_name = 3;
|
||||
string desc = 4;
|
||||
optional string desc = 4;
|
||||
repeated ColumnDef column_defs = 5;
|
||||
string time_index = 6;
|
||||
repeated string primary_keys = 7;
|
||||
bool create_if_not_exists = 8;
|
||||
map<string, string> table_options = 9;
|
||||
TableId table_id = 10;
|
||||
optional uint32 table_id = 10;
|
||||
repeated uint32 region_ids = 11;
|
||||
}
|
||||
|
||||
message AlterExpr {
|
||||
string catalog_name = 1;
|
||||
string schema_name = 2;
|
||||
optional string catalog_name = 1;
|
||||
optional string schema_name = 2;
|
||||
string table_name = 3;
|
||||
oneof kind {
|
||||
AddColumns add_columns = 4;
|
||||
DropColumns drop_columns = 5;
|
||||
RenameTable rename_table = 6;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,12 +62,6 @@ message DropTableExpr {
|
||||
string table_name = 3;
|
||||
}
|
||||
|
||||
message CreateDatabaseExpr {
|
||||
//TODO(hl): maybe rename to schema_name?
|
||||
string database_name = 1;
|
||||
bool create_if_not_exists = 2;
|
||||
}
|
||||
|
||||
message AddColumns {
|
||||
repeated AddColumn add_columns = 1;
|
||||
}
|
||||
@@ -61,10 +70,6 @@ message DropColumns {
|
||||
repeated DropColumn drop_columns = 1;
|
||||
}
|
||||
|
||||
message RenameTable {
|
||||
string new_table_name = 1;
|
||||
}
|
||||
|
||||
message AddColumn {
|
||||
ColumnDef column_def = 1;
|
||||
bool is_key = 2;
|
||||
@@ -74,6 +79,7 @@ message DropColumn {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message TableId {
|
||||
uint32 id = 1;
|
||||
message CreateDatabaseExpr {
|
||||
//TODO(hl): maybe rename to schema_name?
|
||||
string database_name = 1;
|
||||
}
|
||||
@@ -59,7 +59,7 @@ message ColumnDef {
|
||||
string name = 1;
|
||||
ColumnDataType datatype = 2;
|
||||
bool is_nullable = 3;
|
||||
bytes default_constraint = 4;
|
||||
optional bytes default_constraint = 4;
|
||||
}
|
||||
|
||||
enum ColumnDataType {
|
||||
|
||||
22
src/api/greptime/v1/common.proto
Normal file
22
src/api/greptime/v1/common.proto
Normal file
@@ -0,0 +1,22 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
message RequestHeader {
|
||||
string tenant = 1;
|
||||
}
|
||||
|
||||
message ExprHeader {
|
||||
uint32 version = 1;
|
||||
}
|
||||
|
||||
message ResultHeader {
|
||||
uint32 version = 1;
|
||||
uint32 code = 2;
|
||||
string err_msg = 3;
|
||||
}
|
||||
|
||||
message MutateResult {
|
||||
uint32 success = 1;
|
||||
uint32 failure = 2;
|
||||
}
|
||||
@@ -2,51 +2,65 @@ syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/ddl.proto";
|
||||
import "greptime/v1/column.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
|
||||
message RequestHeader {
|
||||
// The `catalog` that is selected to be used in this request.
|
||||
string catalog = 1;
|
||||
// The `schema` that is selected to be used in this request.
|
||||
string schema = 2;
|
||||
message DatabaseRequest {
|
||||
string name = 1;
|
||||
repeated ObjectExpr exprs = 2;
|
||||
}
|
||||
|
||||
message GreptimeRequest {
|
||||
RequestHeader header = 1;
|
||||
oneof request {
|
||||
InsertRequest insert = 2;
|
||||
QueryRequest query = 3;
|
||||
DdlRequest ddl = 4;
|
||||
message DatabaseResponse {
|
||||
repeated ObjectResult results = 1;
|
||||
}
|
||||
|
||||
message ObjectExpr {
|
||||
ExprHeader header = 1;
|
||||
oneof expr {
|
||||
InsertExpr insert = 2;
|
||||
SelectExpr select = 3;
|
||||
UpdateExpr update = 4;
|
||||
DeleteExpr delete = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message QueryRequest {
|
||||
oneof query {
|
||||
// TODO(fys): Only support sql now, and will support promql etc in the future
|
||||
message SelectExpr {
|
||||
oneof expr {
|
||||
string sql = 1;
|
||||
bytes logical_plan = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InsertRequest {
|
||||
string table_name = 1;
|
||||
message InsertExpr {
|
||||
string schema_name = 1;
|
||||
string table_name = 2;
|
||||
|
||||
// Data is represented here.
|
||||
repeated Column columns = 3;
|
||||
|
||||
// The row_count of all columns, which include null and non-null values.
|
||||
//
|
||||
// Note: the row_count of all columns in a InsertRequest must be same.
|
||||
// Note: the row_count of all columns in a InsertExpr must be same.
|
||||
uint32 row_count = 4;
|
||||
|
||||
// The region number of current insert request.
|
||||
uint32 region_number = 5;
|
||||
}
|
||||
|
||||
message AffectedRows {
|
||||
uint32 value = 1;
|
||||
// TODO(jiachun)
|
||||
message UpdateExpr {}
|
||||
// TODO(jiachun)
|
||||
message DeleteExpr {}
|
||||
|
||||
message ObjectResult {
|
||||
ResultHeader header = 1;
|
||||
oneof result {
|
||||
SelectResult select = 2;
|
||||
MutateResult mutate = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message FlightMetadata {
|
||||
AffectedRows affected_rows = 1;
|
||||
message SelectResult {
|
||||
bytes raw_data = 1;
|
||||
}
|
||||
|
||||
22
src/api/greptime/v1/greptime.proto
Normal file
22
src/api/greptime/v1/greptime.proto
Normal file
@@ -0,0 +1,22 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1;
|
||||
|
||||
import "greptime/v1/admin.proto";
|
||||
import "greptime/v1/common.proto";
|
||||
import "greptime/v1/database.proto";
|
||||
|
||||
service Greptime {
|
||||
rpc Batch(BatchRequest) returns (BatchResponse) {}
|
||||
}
|
||||
|
||||
message BatchRequest {
|
||||
RequestHeader header = 1;
|
||||
repeated AdminRequest admins = 2;
|
||||
repeated DatabaseRequest databases = 3;
|
||||
}
|
||||
|
||||
message BatchResponse {
|
||||
repeated AdminResponse admins = 1;
|
||||
repeated DatabaseResponse databases = 2;
|
||||
}
|
||||
@@ -26,7 +26,7 @@ message HeartbeatRequest {
|
||||
TimeInterval report_interval = 4;
|
||||
// Node stat
|
||||
NodeStat node_stat = 5;
|
||||
// Region stats on this node
|
||||
// Region stats in this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
// Follower nodes and stats, empty on follower nodes
|
||||
repeated ReplicaStat replica_stats = 7;
|
||||
@@ -34,19 +34,19 @@ message HeartbeatRequest {
|
||||
|
||||
message NodeStat {
|
||||
// The read capacity units during this period
|
||||
int64 rcus = 1;
|
||||
uint64 rcus = 1;
|
||||
// The write capacity units during this period
|
||||
int64 wcus = 2;
|
||||
// How many tables on this node
|
||||
int64 table_num = 3;
|
||||
// How many regions on this node
|
||||
int64 region_num = 4;
|
||||
uint64 wcus = 2;
|
||||
// Table number in this node
|
||||
uint64 table_num = 3;
|
||||
// Region number in this node
|
||||
uint64 region_num = 4;
|
||||
|
||||
double cpu_usage = 5;
|
||||
double load = 6;
|
||||
// Read disk IO on this node
|
||||
// Read disk I/O in the node
|
||||
double read_io_rate = 7;
|
||||
// Write disk IO on this node
|
||||
// Write disk I/O in the node
|
||||
double write_io_rate = 8;
|
||||
|
||||
// Others
|
||||
@@ -57,13 +57,13 @@ message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
// The read capacity units during this period
|
||||
int64 rcus = 3;
|
||||
uint64 rcus = 3;
|
||||
// The write capacity units during this period
|
||||
int64 wcus = 4;
|
||||
// Approximate bytes of this region
|
||||
int64 approximate_bytes = 5;
|
||||
// Approximate number of rows in this region
|
||||
int64 approximate_rows = 6;
|
||||
uint64 wcus = 4;
|
||||
// Approximate region size
|
||||
uint64 approximate_size = 5;
|
||||
// Approximate number of rows
|
||||
uint64 approximate_rows = 6;
|
||||
|
||||
// Others
|
||||
map<string, string> attrs = 100;
|
||||
|
||||
10
src/api/greptime/v1/select.proto
Normal file
10
src/api/greptime/v1/select.proto
Normal file
@@ -0,0 +1,10 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package greptime.v1.codec;
|
||||
|
||||
import "greptime/v1/column.proto";
|
||||
|
||||
message SelectResult {
|
||||
repeated Column columns = 1;
|
||||
uint32 row_count = 2;
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,6 +15,7 @@
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod prometheus;
|
||||
pub mod result;
|
||||
pub mod serde;
|
||||
pub mod v1;
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
203
src/api/src/result.rs
Normal file
203
src/api/src/result.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::prelude::ErrorExt;
|
||||
|
||||
use crate::v1::codec::SelectResult;
|
||||
use crate::v1::{
|
||||
admin_result, object_result, AdminResult, MutateResult, ObjectResult, ResultHeader,
|
||||
SelectResult as SelectResultRaw,
|
||||
};
|
||||
|
||||
pub const PROTOCOL_VERSION: u32 = 1;
|
||||
|
||||
pub type Success = u32;
|
||||
pub type Failure = u32;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ObjectResultBuilder {
|
||||
version: u32,
|
||||
code: u32,
|
||||
err_msg: Option<String>,
|
||||
result: Option<Body>,
|
||||
}
|
||||
|
||||
pub enum Body {
|
||||
Mutate((Success, Failure)),
|
||||
Select(SelectResult),
|
||||
}
|
||||
|
||||
impl ObjectResultBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
version: PROTOCOL_VERSION,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn version(mut self, version: u32) -> Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn status_code(mut self, code: u32) -> Self {
|
||||
self.code = code;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn err_msg(mut self, err_msg: String) -> Self {
|
||||
self.err_msg = Some(err_msg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mutate_result(mut self, success: u32, failure: u32) -> Self {
|
||||
self.result = Some(Body::Mutate((success, failure)));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn select_result(mut self, select_result: SelectResult) -> Self {
|
||||
self.result = Some(Body::Select(select_result));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> ObjectResult {
|
||||
let header = Some(ResultHeader {
|
||||
version: self.version,
|
||||
code: self.code,
|
||||
err_msg: self.err_msg.unwrap_or_default(),
|
||||
});
|
||||
|
||||
let result = match self.result {
|
||||
Some(Body::Mutate((success, failure))) => {
|
||||
Some(object_result::Result::Mutate(MutateResult {
|
||||
success,
|
||||
failure,
|
||||
}))
|
||||
}
|
||||
Some(Body::Select(select)) => Some(object_result::Result::Select(SelectResultRaw {
|
||||
raw_data: select.into(),
|
||||
})),
|
||||
None => None,
|
||||
};
|
||||
|
||||
ObjectResult { header, result }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_err_result(err: &impl ErrorExt) -> ObjectResult {
|
||||
ObjectResultBuilder::new()
|
||||
.status_code(err.status_code() as u32)
|
||||
.err_msg(err.to_string())
|
||||
.build()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AdminResultBuilder {
|
||||
version: u32,
|
||||
code: u32,
|
||||
err_msg: Option<String>,
|
||||
mutate: Option<(Success, Failure)>,
|
||||
}
|
||||
|
||||
impl AdminResultBuilder {
|
||||
pub fn status_code(mut self, code: u32) -> Self {
|
||||
self.code = code;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn err_msg(mut self, err_msg: String) -> Self {
|
||||
self.err_msg = Some(err_msg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mutate_result(mut self, success: u32, failure: u32) -> Self {
|
||||
self.mutate = Some((success, failure));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> AdminResult {
|
||||
let header = Some(ResultHeader {
|
||||
version: self.version,
|
||||
code: self.code,
|
||||
err_msg: self.err_msg.unwrap_or_default(),
|
||||
});
|
||||
|
||||
let result = if let Some((success, failure)) = self.mutate {
|
||||
Some(admin_result::Result::Mutate(MutateResult {
|
||||
success,
|
||||
failure,
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
AdminResult { header, result }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AdminResultBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
version: PROTOCOL_VERSION,
|
||||
code: 0,
|
||||
err_msg: None,
|
||||
mutate: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::status_code::StatusCode;
|
||||
|
||||
use super::*;
|
||||
use crate::error::UnknownColumnDataTypeSnafu;
|
||||
use crate::v1::{object_result, MutateResult};
|
||||
|
||||
#[test]
|
||||
fn test_object_result_builder() {
|
||||
let obj_result = ObjectResultBuilder::new()
|
||||
.version(101)
|
||||
.status_code(500)
|
||||
.err_msg("Failed to read this file!".to_string())
|
||||
.mutate_result(100, 20)
|
||||
.build();
|
||||
let header = obj_result.header.unwrap();
|
||||
assert_eq!(101, header.version);
|
||||
assert_eq!(500, header.code);
|
||||
assert_eq!("Failed to read this file!", header.err_msg);
|
||||
|
||||
let result = obj_result.result.unwrap();
|
||||
assert_eq!(
|
||||
object_result::Result::Mutate(MutateResult {
|
||||
success: 100,
|
||||
failure: 20,
|
||||
}),
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_err_result() {
|
||||
let err = UnknownColumnDataTypeSnafu { datatype: 1 }.build();
|
||||
let err_result = build_err_result(&err);
|
||||
let header = err_result.header.unwrap();
|
||||
let result = err_result.result;
|
||||
|
||||
assert_eq!(PROTOCOL_VERSION, header.version);
|
||||
assert_eq!(StatusCode::InvalidArguments as u32, header.code);
|
||||
assert!(result.is_none());
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,6 +15,7 @@
|
||||
pub use prost::DecodeError;
|
||||
use prost::Message;
|
||||
|
||||
use crate::v1::codec::SelectResult;
|
||||
use crate::v1::meta::TableRouteValue;
|
||||
|
||||
macro_rules! impl_convert_with_bytes {
|
||||
@@ -35,4 +36,80 @@ macro_rules! impl_convert_with_bytes {
|
||||
};
|
||||
}
|
||||
|
||||
impl_convert_with_bytes!(SelectResult);
|
||||
impl_convert_with_bytes!(TableRouteValue);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Deref;
|
||||
|
||||
use crate::v1::codec::*;
|
||||
use crate::v1::{column, Column};
|
||||
|
||||
const SEMANTIC_TAG: i32 = 0;
|
||||
|
||||
#[test]
|
||||
fn test_convert_select_result() {
|
||||
let select_result = mock_select_result();
|
||||
|
||||
let bytes: Vec<u8> = select_result.into();
|
||||
let result: SelectResult = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(8, result.row_count);
|
||||
assert_eq!(1, result.columns.len());
|
||||
|
||||
let column = &result.columns[0];
|
||||
assert_eq!("foo", column.column_name);
|
||||
assert_eq!(SEMANTIC_TAG, column.semantic_type);
|
||||
assert_eq!(vec![1], column.null_mask);
|
||||
assert_eq!(
|
||||
vec![2, 3, 4, 5, 6, 7, 8],
|
||||
column.values.as_ref().unwrap().i32_values
|
||||
);
|
||||
}
|
||||
|
||||
#[should_panic]
|
||||
#[test]
|
||||
fn test_convert_select_result_wrong() {
|
||||
let select_result = mock_select_result();
|
||||
|
||||
let mut bytes: Vec<u8> = select_result.into();
|
||||
|
||||
// modify some bytes
|
||||
bytes[0] = 0b1;
|
||||
bytes[1] = 0b1;
|
||||
|
||||
let result: SelectResult = bytes.deref().try_into().unwrap();
|
||||
|
||||
assert_eq!(8, result.row_count);
|
||||
assert_eq!(1, result.columns.len());
|
||||
|
||||
let column = &result.columns[0];
|
||||
assert_eq!("foo", column.column_name);
|
||||
assert_eq!(SEMANTIC_TAG, column.semantic_type);
|
||||
assert_eq!(vec![1], column.null_mask);
|
||||
assert_eq!(
|
||||
vec![2, 3, 4, 5, 6, 7, 8],
|
||||
column.values.as_ref().unwrap().i32_values
|
||||
);
|
||||
}
|
||||
|
||||
fn mock_select_result() -> SelectResult {
|
||||
let values = column::Values {
|
||||
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
|
||||
..Default::default()
|
||||
};
|
||||
let null_mask = vec![1];
|
||||
let column = Column {
|
||||
column_name: "foo".to_string(),
|
||||
semantic_type: SEMANTIC_TAG,
|
||||
values: Some(values),
|
||||
null_mask,
|
||||
..Default::default()
|
||||
};
|
||||
SelectResult {
|
||||
columns: vec![column],
|
||||
row_count: 8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,5 +15,11 @@
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
tonic::include_proto!("greptime.v1");
|
||||
|
||||
pub const GREPTIME_FD_SET: &[u8] = tonic::include_file_descriptor_set!("greptime_fd");
|
||||
|
||||
pub mod codec {
|
||||
tonic::include_proto!("greptime.v1.codec");
|
||||
}
|
||||
|
||||
mod column_def;
|
||||
pub mod meta;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -23,13 +23,12 @@ impl ColumnDef {
|
||||
pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
|
||||
|
||||
let constraint = if self.default_constraint.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
ColumnDefaultConstraint::try_from(self.default_constraint.as_slice())
|
||||
let constraint = match &self.default_constraint {
|
||||
None => None,
|
||||
Some(v) => Some(
|
||||
ColumnDefaultConstraint::try_from(&v[..])
|
||||
.context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
|
||||
)
|
||||
),
|
||||
};
|
||||
|
||||
ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
[package]
|
||||
name = "catalog"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
arc-swap = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
backoff = { version = "0.4", features = ["tokio"] }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
@@ -18,10 +19,10 @@ common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
datafusion.workspace = true
|
||||
datafusion = "14.0.0"
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
futures-util = "0.3"
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
regex = "1.6"
|
||||
@@ -30,7 +31,7 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
storage = { path = "../storage" }
|
||||
table = { path = "../table" }
|
||||
tokio.workspace = true
|
||||
tokio = { version = "1.18", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
@@ -39,4 +40,4 @@ mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
storage = { path = "../storage" }
|
||||
tempdir = "0.3"
|
||||
tokio.workspace = true
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
@@ -22,8 +21,6 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
@@ -89,25 +86,27 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema {} in catalog {}", schema, catalog))]
|
||||
#[snafu(display("Cannot find schema, schema info: {}", schema_info))]
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
schema_info: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
TableNotExist { table: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to register table"))]
|
||||
RegisterTable {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
@@ -142,17 +141,6 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to deregister table, request: {:?}, source: {}",
|
||||
request,
|
||||
source
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal catalog manager state: {}", msg))]
|
||||
IllegalManagerState { backtrace: Backtrace, msg: String },
|
||||
|
||||
@@ -175,12 +163,6 @@ pub enum Error {
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(backtrace)]
|
||||
@@ -192,6 +174,15 @@ pub enum Error {
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("IO error occurred while fetching catalog info, source: {}", source))]
|
||||
Io {
|
||||
backtrace: Backtrace,
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Local and remote catalog data are inconsistent, msg: {}", msg))]
|
||||
CatalogStateInconsistent { msg: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
MetaSrv {
|
||||
#[snafu(backtrace)]
|
||||
@@ -203,6 +194,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog internal error: {}", source))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -215,34 +212,35 @@ impl ErrorExt for Error {
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
| Error::ValueDeserialize { .. }
|
||||
| Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
Error::RegisterTable { .. } | Error::SystemCatalogTypeMismatch { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. } => source.status_code(),
|
||||
|
||||
| Error::CreateTable { source, .. } => source.status_code(),
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableSchema { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
|
||||
Error::SchemaProviderOperation { source } => source.status_code(),
|
||||
Error::Internal { source, .. } => source.status_code(),
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
}
|
||||
@@ -265,6 +263,7 @@ impl From<Error> for DataFusionError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::mock::MockError;
|
||||
use snafu::GenerateImplicitData;
|
||||
|
||||
use super::*;
|
||||
@@ -285,6 +284,22 @@ mod tests {
|
||||
InvalidKeySnafu { key: None }.build().status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::OpenSystemCatalog {
|
||||
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::CreateSystemCatalog {
|
||||
source: table::error::Error::new(MockError::new(StatusCode::StorageUnavailable))
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -33,38 +33,48 @@ const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
"^{}-({})$",
|
||||
CATALOG_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
"^{}-({})-({})$",
|
||||
SCHEMA_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN, ALPHANUMERICS_NAME_PATTERN
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
"^{}-({})-({})-({})$",
|
||||
TABLE_GLOBAL_KEY_PREFIX,
|
||||
ALPHANUMERICS_NAME_PATTERN,
|
||||
ALPHANUMERICS_NAME_PATTERN,
|
||||
ALPHANUMERICS_NAME_PATTERN
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
|
||||
"^{}-({})-({})-({})-([0-9]+)$",
|
||||
TABLE_REGIONAL_KEY_PREFIX,
|
||||
ALPHANUMERICS_NAME_PATTERN,
|
||||
ALPHANUMERICS_NAME_PATTERN,
|
||||
ALPHANUMERICS_NAME_PATTERN
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn build_catalog_prefix() -> String {
|
||||
format!("{CATALOG_KEY_PREFIX}-")
|
||||
format!("{}-", CATALOG_KEY_PREFIX)
|
||||
}
|
||||
|
||||
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
format!("{}-{}-", SCHEMA_KEY_PREFIX, catalog_name.as_ref())
|
||||
}
|
||||
|
||||
pub fn build_table_global_prefix(
|
||||
@@ -72,7 +82,8 @@ pub fn build_table_global_prefix(
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
|
||||
"{}-{}-{}-",
|
||||
TABLE_GLOBAL_KEY_PREFIX,
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
@@ -91,7 +102,6 @@ pub fn build_table_regional_prefix(
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
@@ -132,6 +142,7 @@ impl TableGlobalKey {
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
// TODO(LFC): Maybe remove it?
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
@@ -367,7 +378,7 @@ mod tests {
|
||||
table_info,
|
||||
};
|
||||
let serialized = serde_json::to_string(&value).unwrap();
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
let deserialized = TableGlobalValue::parse(&serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,7 +19,7 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -97,9 +97,6 @@ pub trait CatalogManager: CatalogList {
|
||||
/// schema registered.
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
|
||||
|
||||
/// Register a system table, should be called before starting the manager.
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
-> error::Result<()>;
|
||||
@@ -145,16 +142,7 @@ impl Debug for RegisterTableRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RenameTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table_name: String,
|
||||
pub new_table_name: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Clone)]
|
||||
pub struct DeregisterTableRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
@@ -167,6 +155,11 @@ pub struct RegisterSchemaRequest {
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{}.{}.{}", catalog, schema, table)
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
@@ -193,10 +186,9 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
.create_table(&EngineContext::default(), req.create_table_request.clone())
|
||||
.await
|
||||
.with_context(|_| CreateTableSnafu {
|
||||
table_info: common_catalog::format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
table_info: format!(
|
||||
"{}.{}.{}, id: {}",
|
||||
catalog_name, schema_name, table_name, table_id,
|
||||
),
|
||||
})?;
|
||||
manager
|
||||
@@ -208,7 +200,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
table: table.clone(),
|
||||
})
|
||||
.await?;
|
||||
info!("Created and registered system table: {table_name}");
|
||||
info!("Created and registered system table: {}", table_name);
|
||||
table
|
||||
};
|
||||
if let Some(hook) = req.open_hook {
|
||||
@@ -217,38 +209,3 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The number of regions in the datanode node.
|
||||
pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
let mut region_number: u64 = 0;
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table = schema
|
||||
.table(&table_name)?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_number)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,7 +20,6 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
@@ -35,9 +34,9 @@ use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
|
||||
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
|
||||
TableExistsSnafu, TableNotFoundSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
@@ -46,9 +45,9 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
|
||||
CatalogProvider, CatalogProviderRef, DeregisterTableRequest, RegisterSchemaRequest,
|
||||
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -242,8 +241,7 @@ impl LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
schema_info: format!("{}.{}", &t.catalog_name, &t.schema_name),
|
||||
})?;
|
||||
|
||||
let context = EngineContext {};
|
||||
@@ -252,6 +250,7 @@ impl LocalCatalogManager {
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
let option = self
|
||||
@@ -339,8 +338,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
schema_info: format!("{}.{}", catalog_name, schema_name),
|
||||
})?;
|
||||
|
||||
{
|
||||
@@ -379,75 +377,11 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
// rename table in system catalog
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.new_table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
|
||||
let DeregisterTableRequest {
|
||||
catalog,
|
||||
schema,
|
||||
table_name,
|
||||
} = &request;
|
||||
let table_id = self
|
||||
.catalogs
|
||||
.table(catalog, schema, table_name)?
|
||||
.with_context(|| error::TableNotExistSnafu {
|
||||
table: format!("{catalog}.{schema}.{table_name}"),
|
||||
})?
|
||||
.table_info()
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.catalogs.deregister_table(request).await
|
||||
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "deregister table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
@@ -518,8 +452,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
schema_info: format!("{}.{}", catalog_name, schema_name),
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,19 +20,16 @@ use std::sync::{Arc, RwLock};
|
||||
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
@@ -84,33 +81,13 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
})?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
@@ -122,8 +99,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
schema_info: format!("{}.{}", &request.catalog, &request.schema),
|
||||
})?;
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
@@ -250,10 +226,6 @@ impl CatalogProvider for MemoryCatalogProvider {
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
@@ -316,20 +288,6 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if tables.get(name).is_some() {
|
||||
let table = tables.remove(name).unwrap();
|
||||
tables.insert(new_name, table.clone());
|
||||
Ok(table)
|
||||
} else {
|
||||
TableNotFoundSnafu {
|
||||
table_info: name.to_string(),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
@@ -394,85 +352,6 @@ mod tests {
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
// register test table
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
);
|
||||
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider.register_table(new_table_name.to_string(), other_table);
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register table
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(schema.table_exist(table_name).unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(!schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(new_table_name).unwrap());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -43,7 +43,7 @@ use crate::remote::{Kv, KvBackendRef};
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Catalog manager based on metasrv.
|
||||
@@ -324,13 +324,17 @@ impl RemoteCatalogManager {
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
region_numbers: region_numbers.clone(),
|
||||
};
|
||||
match self
|
||||
.engine
|
||||
.open_table(&context, request)
|
||||
.await
|
||||
.with_context(|_| OpenTableSnafu {
|
||||
table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
|
||||
table_info: format!(
|
||||
"{}.{}.{}, id:{}",
|
||||
catalog_name, schema_name, table_name, table_id
|
||||
),
|
||||
})? {
|
||||
Some(table) => {
|
||||
info!(
|
||||
@@ -351,7 +355,7 @@ impl RemoteCatalogManager {
|
||||
.clone()
|
||||
.try_into()
|
||||
.context(InvalidTableSchemaSnafu {
|
||||
table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
|
||||
table_info: format!("{}.{}.{}", catalog_name, schema_name, table_name,),
|
||||
schema: meta.schema.clone(),
|
||||
})?;
|
||||
let req = CreateTableRequest {
|
||||
@@ -417,8 +421,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
catalog_provider
|
||||
.schema(&schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
schema_info: format!("{}.{}", &catalog_name, &schema_name),
|
||||
})?;
|
||||
if schema_provider.table_exist(&request.table_name)? {
|
||||
return TableExistsSnafu {
|
||||
@@ -448,13 +451,6 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, _request: RenameTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let mut requests = self.system_table_requests.lock().await;
|
||||
requests.push(request);
|
||||
@@ -481,8 +477,7 @@ impl CatalogManager for RemoteCatalogManager {
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
schema_info: format!("{}.{}", catalog_name, schema_name),
|
||||
})?;
|
||||
schema.table(table_name)
|
||||
}
|
||||
@@ -745,13 +740,6 @@ impl SchemaProvider for RemoteSchemaProvider {
|
||||
prev
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table_name = name.to_string();
|
||||
let table_key = self.build_regional_table_key(&table_name).to_string();
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -35,10 +35,6 @@ pub trait SchemaProvider: Sync + Send {
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -25,27 +25,29 @@ use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest};
|
||||
use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
|
||||
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
|
||||
};
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
pub const ENTRY_TYPE_INDEX: usize = 0;
|
||||
pub const KEY_INDEX: usize = 1;
|
||||
pub const VALUE_INDEX: usize = 3;
|
||||
|
||||
pub struct SystemCatalogTable(TableRef);
|
||||
pub struct SystemCatalogTable {
|
||||
table_info: TableInfoRef,
|
||||
pub table: TableRef,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for SystemCatalogTable {
|
||||
@@ -54,29 +56,25 @@ impl Table for SystemCatalogTable {
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.0.schema()
|
||||
self.table_info.meta.schema.clone()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
_projection: &Option<Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
self.0.scan(projection, filters, limit).await
|
||||
panic!("System catalog table does not support scan!")
|
||||
}
|
||||
|
||||
/// Insert values into table.
|
||||
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
|
||||
self.0.insert(request).await
|
||||
self.table.insert(request).await
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
self.table_info.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,6 +85,7 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let schema = Arc::new(build_system_catalog_schema());
|
||||
let ctx = EngineContext::default();
|
||||
@@ -96,7 +95,10 @@ impl SystemCatalogTable {
|
||||
.await
|
||||
.context(OpenSystemCatalogSnafu)?
|
||||
{
|
||||
Ok(Self(table))
|
||||
Ok(Self {
|
||||
table_info: table.table_info(),
|
||||
table,
|
||||
})
|
||||
} else {
|
||||
// system catalog table is not yet created, try to create
|
||||
let request = CreateTableRequest {
|
||||
@@ -116,7 +118,8 @@ impl SystemCatalogTable {
|
||||
.create_table(&ctx, request)
|
||||
.await
|
||||
.context(CreateSystemCatalogSnafu)?;
|
||||
Ok(Self(table))
|
||||
let table_info = table.table_info();
|
||||
Ok(Self { table, table_info })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,7 +128,8 @@ impl SystemCatalogTable {
|
||||
let full_projection = None;
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.scan(full_projection, &[], None)
|
||||
.table
|
||||
.scan(&full_projection, &[], None)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
let stream = scan
|
||||
@@ -182,58 +186,18 @@ fn build_system_catalog_schema() -> Schema {
|
||||
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
|
||||
}
|
||||
|
||||
/// Formats key string for table entry in system catalog
|
||||
#[inline]
|
||||
pub fn format_table_entry_key(catalog: &str, schema: &str, table_id: TableId) -> String {
|
||||
format!("{catalog}.{schema}.{table_id}")
|
||||
}
|
||||
|
||||
pub fn build_table_insert_request(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> InsertRequest {
|
||||
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
|
||||
pub fn build_table_insert_request(full_table_name: String, table_id: TableId) -> InsertRequest {
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_name })
|
||||
full_table_name.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_id })
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
|
||||
);
|
||||
m
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
let full_schema_name = format!("{catalog_name}.{schema_name}");
|
||||
let full_schema_name = format!("{}.{}", catalog_name, schema_name);
|
||||
build_insert_request(
|
||||
EntryType::Schema,
|
||||
full_schema_name.as_bytes(),
|
||||
@@ -244,10 +208,22 @@ pub fn build_schema_insert_request(catalog_name: String, schema_name: String) ->
|
||||
}
|
||||
|
||||
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
|
||||
let primary_key_columns = build_primary_key_columns(entry_type, key);
|
||||
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
columns_values.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
columns_values.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"value".to_string(),
|
||||
@@ -270,7 +246,6 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
columns_values,
|
||||
region_number: 0, // system catalog table has only one region
|
||||
}
|
||||
}
|
||||
|
||||
@@ -310,8 +285,8 @@ pub fn decode_system_catalog(
|
||||
}
|
||||
|
||||
EntryType::Table => {
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_id>`
|
||||
// and the value is a JSON string with format: `{"table_name": <table_name>}`
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_name>`
|
||||
// and the value is a JSON string with format: `{"table_id": <table_id>}`
|
||||
let table_parts = key.split('.').collect::<Vec<_>>();
|
||||
ensure!(
|
||||
table_parts.len() >= 3,
|
||||
@@ -323,12 +298,11 @@ pub fn decode_system_catalog(
|
||||
debug!("Table meta value: {}", String::from_utf8_lossy(value));
|
||||
let table_meta: TableEntryValue =
|
||||
serde_json::from_slice(value).context(ValueDeserializeSnafu)?;
|
||||
let table_id = table_parts[2].parse::<TableId>().unwrap();
|
||||
Ok(Entry::Table(TableEntry {
|
||||
catalog_name: table_parts[0].to_string(),
|
||||
schema_name: table_parts[1].to_string(),
|
||||
table_name: table_meta.table_name,
|
||||
table_id,
|
||||
table_name: table_parts[2].to_string(),
|
||||
table_id: table_meta.table_id,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -388,14 +362,12 @@ pub struct TableEntry {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableEntryValue {
|
||||
pub table_name: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::value::Value;
|
||||
use log_store::NoopLogStore;
|
||||
use log_store::fs::noop::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::ObjectStore;
|
||||
@@ -418,7 +390,7 @@ mod tests {
|
||||
if let Entry::Catalog(e) = entry {
|
||||
assert_eq!("some_catalog", e.catalog_name);
|
||||
} else {
|
||||
panic!("Unexpected type: {entry:?}");
|
||||
panic!("Unexpected type: {:?}", entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,7 +407,7 @@ mod tests {
|
||||
assert_eq!("some_catalog", e.catalog_name);
|
||||
assert_eq!("some_schema", e.schema_name);
|
||||
} else {
|
||||
panic!("Unexpected type: {entry:?}");
|
||||
panic!("Unexpected type: {:?}", entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,8 +415,8 @@ mod tests {
|
||||
pub fn test_decode_table() {
|
||||
let entry = decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
Some("{\"table_name\":\"some_table\"}".as_bytes()),
|
||||
Some("some_catalog.some_schema.some_table".as_bytes()),
|
||||
Some("{\"table_id\":42}".as_bytes()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -454,7 +426,7 @@ mod tests {
|
||||
assert_eq!("some_table", e.table_name);
|
||||
assert_eq!(42, e.table_id);
|
||||
} else {
|
||||
panic!("Unexpected type: {entry:?}");
|
||||
panic!("Unexpected type: {:?}", entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -463,7 +435,7 @@ mod tests {
|
||||
pub fn test_decode_mismatch() {
|
||||
decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
Some("some_catalog.some_schema.some_table".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
@@ -515,53 +487,4 @@ mod tests {
|
||||
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
|
||||
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_catalog_table_records() {
|
||||
let (_, table_engine) = prepare_table_engine().await;
|
||||
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
|
||||
let table_insertion = build_table_insert_request(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"my_table".to_string(),
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.insert(table_insertion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 1);
|
||||
let batch = batches.remove(0);
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
|
||||
let row = batch.rows().next().unwrap();
|
||||
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
|
||||
let Value::Binary(key) = row[1].clone() else { unreachable!() };
|
||||
let Value::Binary(value) = row[3].clone() else { unreachable!() };
|
||||
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
|
||||
let expected = Entry::Table(TableEntry {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
let table_deletion = build_table_deletion_request(
|
||||
&DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -38,13 +38,10 @@ use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::scan::SimpleTableScan;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::system::{
|
||||
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
|
||||
SystemCatalogTable,
|
||||
};
|
||||
use crate::error::{Error, InsertCatalogRecordSnafu};
|
||||
use crate::system::{build_schema_insert_request, build_table_insert_request, SystemCatalogTable};
|
||||
use crate::{
|
||||
CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
format_full_table_name, CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Tables holds all tables created by user.
|
||||
@@ -80,7 +77,7 @@ impl Table for Tables {
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_projection: &Option<Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> table::error::Result<PhysicalPlanRef> {
|
||||
@@ -236,10 +233,6 @@ impl SchemaProvider for InformationSchema {
|
||||
panic!("System catalog & schema does not support register table")
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> crate::error::Result<TableRef> {
|
||||
unimplemented!("System catalog & schema does not support rename table")
|
||||
}
|
||||
|
||||
fn deregister_table(&self, _name: &str) -> crate::error::Result<Option<TableRef>> {
|
||||
panic!("System catalog & schema does not support deregister table")
|
||||
}
|
||||
@@ -276,7 +269,8 @@ impl SystemCatalog {
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> crate::error::Result<usize> {
|
||||
let request = build_table_insert_request(catalog, schema, table_name, table_id);
|
||||
let full_table_name = format_full_table_name(&catalog, &schema, &table_name);
|
||||
let request = build_table_insert_request(full_table_name, table_id);
|
||||
self.information_schema
|
||||
.system
|
||||
.insert(request)
|
||||
@@ -284,21 +278,6 @@ impl SystemCatalog {
|
||||
.context(InsertCatalogRecordSnafu)
|
||||
}
|
||||
|
||||
pub(crate) async fn deregister_table(
|
||||
&self,
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> CatalogResult<bool> {
|
||||
self.information_schema
|
||||
.system
|
||||
.delete(build_table_deletion_request(request, table_id))
|
||||
.await
|
||||
.map(|x| x == 1)
|
||||
.with_context(|_| error::DeregisterTableSnafu {
|
||||
request: request.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn register_schema(
|
||||
&self,
|
||||
catalog: String,
|
||||
@@ -391,7 +370,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let tables = Tables::new(catalog_list, "test_engine".to_string());
|
||||
let tables_stream = tables.scan(None, &[], None).await.unwrap();
|
||||
let tables_stream = tables.scan(&None, &[], None).await.unwrap();
|
||||
let session_ctx = SessionContext::new();
|
||||
let mut tables_stream = tables_stream.execute(0, session_ctx.task_ctx()).unwrap();
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -17,7 +17,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::local::LocalCatalogManager;
|
||||
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::{error, info};
|
||||
use mito::config::EngineConfig;
|
||||
@@ -38,44 +38,6 @@ mod tests {
|
||||
Ok(catalog_manager)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
// register table
|
||||
let table_name = "test_table";
|
||||
let table_id = 42;
|
||||
let table = Arc::new(NumbersTable::new(table_id));
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table: table.clone(),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "table_t";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog_manager
|
||||
.rename_table(rename_table_req)
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let registered_table = catalog_manager
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_duplicate_register() {
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
@@ -107,7 +69,8 @@ mod tests {
|
||||
assert!(
|
||||
err.to_string()
|
||||
.contains("Table `greptime.public.test_table` already exists"),
|
||||
"Actual error message: {err}",
|
||||
"Actual error message: {}",
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -189,10 +189,10 @@ impl TableEngine for MockTableEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_table(
|
||||
fn get_table<'a>(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
table_ref: &TableReference,
|
||||
table_ref: &'a TableReference,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
futures::executor::block_on(async {
|
||||
Ok(self
|
||||
@@ -204,7 +204,7 @@ impl TableEngine for MockTableEngine {
|
||||
})
|
||||
}
|
||||
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
|
||||
fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
|
||||
futures::executor::block_on(async {
|
||||
self.tables
|
||||
.read()
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,35 +1,32 @@
|
||||
[package]
|
||||
name = "client"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-stream = "0.3"
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-time = { path = "../common/time" }
|
||||
datafusion.workspace = true
|
||||
datafusion = "14.0.0"
|
||||
datatypes = { path = "../datatypes" }
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand = "0.8"
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic = "0.8"
|
||||
|
||||
[dev-dependencies]
|
||||
datanode = { path = "../datanode" }
|
||||
substrait = { path = "../common/substrait" }
|
||||
tokio.workspace = true
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
|
||||
106
src/client/examples/insert.rs
Normal file
106
src/client/examples/insert.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::*;
|
||||
use client::{Client, Database};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
let db = Database::new("greptime", client);
|
||||
|
||||
let (columns, row_count) = insert_data();
|
||||
|
||||
let expr = InsertExpr {
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
db.insert(expr).await.unwrap();
|
||||
}
|
||||
|
||||
fn insert_data() -> (Vec<Column>, u32) {
|
||||
const SEMANTIC_TAG: i32 = 0;
|
||||
const SEMANTIC_FIELD: i32 = 1;
|
||||
const SEMANTIC_TS: i32 = 2;
|
||||
|
||||
let row_count = 4;
|
||||
|
||||
let host_vals = column::Values {
|
||||
string_values: vec![
|
||||
"host1".to_string(),
|
||||
"host2".to_string(),
|
||||
"host3".to_string(),
|
||||
"host4".to_string(),
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
let host_column = Column {
|
||||
column_name: "host".to_string(),
|
||||
semantic_type: SEMANTIC_TAG,
|
||||
values: Some(host_vals),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let cpu_vals = column::Values {
|
||||
f64_values: vec![0.31, 0.41, 0.2],
|
||||
..Default::default()
|
||||
};
|
||||
let cpu_column = Column {
|
||||
column_name: "cpu".to_string(),
|
||||
semantic_type: SEMANTIC_FIELD,
|
||||
values: Some(cpu_vals),
|
||||
null_mask: vec![2],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_vals = column::Values {
|
||||
f64_values: vec![0.1, 0.2, 0.3],
|
||||
..Default::default()
|
||||
};
|
||||
let mem_column = Column {
|
||||
column_name: "memory".to_string(),
|
||||
semantic_type: SEMANTIC_FIELD,
|
||||
values: Some(mem_vals),
|
||||
null_mask: vec![4],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let ts_vals = column::Values {
|
||||
i64_values: vec![100, 101, 102, 103],
|
||||
..Default::default()
|
||||
};
|
||||
let ts_column = Column {
|
||||
column_name: "ts".to_string(),
|
||||
semantic_type: SEMANTIC_TS,
|
||||
values: Some(ts_vals),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
(
|
||||
vec![host_column, cpu_column, mem_column, ts_column],
|
||||
row_count,
|
||||
)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateExpr};
|
||||
use client::admin::Admin;
|
||||
use client::{Client, Database};
|
||||
use prost_09::Message;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
@@ -32,45 +33,46 @@ fn main() {
|
||||
async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
|
||||
let create_table_expr = CreateTableExpr {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
let create_table_expr = CreateExpr {
|
||||
catalog_name: Some("greptime".to_string()),
|
||||
schema_name: Some("public".to_string()),
|
||||
table_name: "test_logical_dist_exec".to_string(),
|
||||
desc: "".to_string(),
|
||||
desc: None,
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "timestamp".to_string(),
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "key".to_string(),
|
||||
datatype: ColumnDataType::Uint64 as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "value".to_string(),
|
||||
datatype: ColumnDataType::Uint64 as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
default_constraint: None,
|
||||
},
|
||||
],
|
||||
time_index: "timestamp".to_string(),
|
||||
primary_keys: vec!["key".to_string()],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
table_id: Some(TableId { id: 1024 }),
|
||||
table_id: Some(1024),
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let db = Database::with_client(client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
let admin = Admin::new("create table", client.clone());
|
||||
let result = admin.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let db = Database::new("greptime", client);
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
|
||||
34
src/client/examples/select.rs
Normal file
34
src/client/examples/select.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use client::{Client, Database, Select};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
|
||||
let db = Database::new("greptime", client);
|
||||
|
||||
let sql = Select::Sql("select * from demo".to_string());
|
||||
let result = db.select(sql).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
}
|
||||
137
src/client/src/admin.rs
Normal file
137
src/client/src/admin.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::*;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_query::Output;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::database::PROTOCOL_VERSION;
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Admin {
|
||||
name: String,
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl Admin {
|
||||
pub fn new(name: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::Create(expr)),
|
||||
};
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
pub async fn do_request(&self, expr: AdminExpr) -> Result<AdminResult> {
|
||||
// `remove(0)` is safe because of `do_requests`'s invariants.
|
||||
Ok(self.do_requests(vec![expr]).await?.remove(0))
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::Alter(expr)),
|
||||
};
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::DropTable(expr)),
|
||||
};
|
||||
|
||||
self.do_request(expr).await
|
||||
}
|
||||
|
||||
/// Invariants: the lengths of input vec (`Vec<AdminExpr>`) and output vec (`Vec<AdminResult>`) are equal.
|
||||
async fn do_requests(&self, exprs: Vec<AdminExpr>) -> Result<Vec<AdminResult>> {
|
||||
let expr_count = exprs.len();
|
||||
let req = AdminRequest {
|
||||
name: self.name.clone(),
|
||||
exprs,
|
||||
};
|
||||
|
||||
let resp = self.client.admin(req).await?;
|
||||
|
||||
let results = resp.results;
|
||||
ensure!(
|
||||
results.len() == expr_count,
|
||||
error::MissingResultSnafu {
|
||||
name: "admin_results",
|
||||
expected: expr_count,
|
||||
actual: results.len(),
|
||||
}
|
||||
);
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn create_database(&self, expr: CreateDatabaseExpr) -> Result<AdminResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let expr = AdminExpr {
|
||||
header: Some(header),
|
||||
expr: Some(admin_expr::Expr::CreateDatabase(expr)),
|
||||
};
|
||||
Ok(self.do_requests(vec![expr]).await?.remove(0))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn admin_result_to_output(admin_result: AdminResult) -> Result<Output> {
|
||||
let header = admin_result.header.context(error::MissingHeaderSnafu)?;
|
||||
if !StatusCode::is_success(header.code) {
|
||||
return error::DatanodeSnafu {
|
||||
code: header.code,
|
||||
msg: header.err_msg,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let result = admin_result.result.context(error::MissingResultSnafu {
|
||||
name: "result".to_string(),
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})?;
|
||||
let output = match result {
|
||||
admin_result::Result::Mutate(mutate) => {
|
||||
if mutate.failure != 0 {
|
||||
return error::MutateFailureSnafu {
|
||||
failure: mutate.failure,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Output::AffectedRows(mutate.success as usize)
|
||||
}
|
||||
};
|
||||
Ok(output)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use api::v1::greptime_client::GreptimeClient;
|
||||
use api::v1::*;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -23,21 +24,6 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
}
|
||||
|
||||
impl FlightClient {
|
||||
pub(crate) fn addr(&self) -> &str {
|
||||
&self.addr
|
||||
}
|
||||
|
||||
pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient<Channel> {
|
||||
&mut self.client
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Client {
|
||||
inner: Arc<Inner>,
|
||||
@@ -118,23 +104,57 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
let addr = self
|
||||
pub async fn admin(&self, req: AdminRequest) -> Result<AdminResponse> {
|
||||
let req = BatchRequest {
|
||||
admins: vec![req],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut res = self.batch(req).await?;
|
||||
res.admins.pop().context(error::MissingResultSnafu {
|
||||
name: "admins",
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn database(&self, req: DatabaseRequest) -> Result<DatabaseResponse> {
|
||||
let req = BatchRequest {
|
||||
databases: vec![req],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut res = self.batch(req).await?;
|
||||
res.databases.pop().context(error::MissingResultSnafu {
|
||||
name: "database",
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn batch(&self, req: BatchRequest) -> Result<BatchResponse> {
|
||||
let peer = self
|
||||
.inner
|
||||
.get_peer()
|
||||
.context(error::IllegalGrpcClientStateSnafu {
|
||||
err_msg: "No available peer found",
|
||||
})?;
|
||||
let mut client = self.make_client(&peer)?;
|
||||
let result = client
|
||||
.batch(req)
|
||||
.await
|
||||
.context(error::TonicStatusSnafu { addr: peer })?;
|
||||
Ok(result.into_inner())
|
||||
}
|
||||
|
||||
fn make_client(&self, addr: impl AsRef<str>) -> Result<GreptimeClient<Channel>> {
|
||||
let addr = addr.as_ref();
|
||||
let channel = self
|
||||
.inner
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
.get(addr)
|
||||
.context(error::CreateChannelSnafu { addr })?;
|
||||
Ok(GreptimeClient::new(channel))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,173 +12,237 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::codec::SelectResult as GrpcSelectResult;
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{
|
||||
AlterExpr, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest, InsertRequest,
|
||||
QueryRequest, RequestHeader,
|
||||
object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
|
||||
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, SelectExpr,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use common_query::Output;
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{ColumnToVectorSnafu, ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
pub const PROTOCOL_VERSION: u32 = 1;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Database {
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
|
||||
name: String,
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
pub fn new(name: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
name: name.into(),
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_client(client: Client) -> Self {
|
||||
Self::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client)
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
pub async fn insert(&self, insert: InsertExpr) -> Result<ObjectResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec(),
|
||||
let expr = ObjectExpr {
|
||||
header: Some(header),
|
||||
expr: Some(object_expr::Expr::Insert(insert)),
|
||||
};
|
||||
self.object(expr).await?.try_into()
|
||||
}
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
.mut_inner()
|
||||
.do_get(request)
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
addr: client.addr(),
|
||||
})
|
||||
.unwrap_err()
|
||||
})?;
|
||||
|
||||
let decoder = &mut FlightDecoder::default();
|
||||
let flight_messages = flight_data
|
||||
pub async fn batch_insert(&self, insert_exprs: Vec<InsertExpr>) -> Result<Vec<ObjectResult>> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
let obj_exprs = insert_exprs
|
||||
.into_iter()
|
||||
.map(|x| decoder.try_decode(x).context(ConvertFlightDataSnafu))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
.map(|expr| ObjectExpr {
|
||||
header: Some(header.clone()),
|
||||
expr: Some(object_expr::Expr::Insert(expr)),
|
||||
})
|
||||
.collect();
|
||||
self.objects(obj_exprs)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|result| result.try_into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
let output = if let Some(FlightMessage::AffectedRows(rows)) = flight_messages.get(0) {
|
||||
ensure!(
|
||||
flight_messages.len() == 1,
|
||||
IllegalFlightMessagesSnafu {
|
||||
reason: "Expect 'AffectedRows' Flight messages to be one and only!"
|
||||
pub async fn select(&self, expr: Select) -> Result<ObjectResult> {
|
||||
let select_expr = match expr {
|
||||
Select::Sql(sql) => SelectExpr {
|
||||
expr: Some(select_expr::Expr::Sql(sql)),
|
||||
},
|
||||
};
|
||||
self.do_select(select_expr).await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
|
||||
let select_expr = SelectExpr {
|
||||
expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
|
||||
};
|
||||
self.do_select(select_expr).await
|
||||
}
|
||||
|
||||
async fn do_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
|
||||
let header = ExprHeader {
|
||||
version: PROTOCOL_VERSION,
|
||||
};
|
||||
|
||||
let expr = ObjectExpr {
|
||||
header: Some(header),
|
||||
expr: Some(object_expr::Expr::Select(select_expr)),
|
||||
};
|
||||
|
||||
let obj_result = self.object(expr).await?;
|
||||
obj_result.try_into()
|
||||
}
|
||||
|
||||
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
|
||||
let res = self.objects(vec![expr]).await?.pop().unwrap();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn objects(&self, exprs: Vec<ObjectExpr>) -> Result<Vec<GrpcObjectResult>> {
|
||||
let expr_count = exprs.len();
|
||||
let req = DatabaseRequest {
|
||||
name: self.name.clone(),
|
||||
exprs,
|
||||
};
|
||||
|
||||
let res = self.client.database(req).await?;
|
||||
let res = res.results;
|
||||
|
||||
ensure!(
|
||||
res.len() == expr_count,
|
||||
error::MissingResultSnafu {
|
||||
name: "object_results",
|
||||
expected: expr_count,
|
||||
actual: res.len(),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ObjectResult {
|
||||
Select(GrpcSelectResult),
|
||||
Mutate(GrpcMutateResult),
|
||||
}
|
||||
|
||||
impl TryFrom<api::v1::ObjectResult> for ObjectResult {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(object_result: api::v1::ObjectResult) -> std::result::Result<Self, Self::Error> {
|
||||
let header = object_result.header.context(error::MissingHeaderSnafu)?;
|
||||
if !StatusCode::is_success(header.code) {
|
||||
return DatanodeSnafu {
|
||||
code: header.code,
|
||||
msg: header.err_msg,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let obj_result = object_result.result.context(error::MissingResultSnafu {
|
||||
name: "result".to_string(),
|
||||
expected: 1_usize,
|
||||
actual: 0_usize,
|
||||
})?;
|
||||
Ok(match obj_result {
|
||||
object_result::Result::Select(select) => {
|
||||
let result = (*select.raw_data).try_into().context(DecodeSelectSnafu)?;
|
||||
ObjectResult::Select(result)
|
||||
}
|
||||
object_result::Result::Mutate(mutate) => ObjectResult::Mutate(mutate),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Select {
|
||||
Sql(String),
|
||||
}
|
||||
|
||||
impl TryFrom<ObjectResult> for Output {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: ObjectResult) -> Result<Self> {
|
||||
let output = match value {
|
||||
ObjectResult::Select(select) => {
|
||||
let vectors = select
|
||||
.columns
|
||||
.iter()
|
||||
.map(|column| {
|
||||
column_to_vector(column, select.row_count).context(ColumnToVectorSnafu)
|
||||
})
|
||||
.collect::<Result<Vec<VectorRef>>>()?;
|
||||
|
||||
let column_schemas = select
|
||||
.columns
|
||||
.iter()
|
||||
.zip(vectors.iter())
|
||||
.map(|(column, vector)| {
|
||||
let datatype = vector.data_type();
|
||||
// nullable or not, does not affect the output
|
||||
let mut column_schema =
|
||||
ColumnSchema::new(&column.column_name, datatype, true);
|
||||
if column.semantic_type == SemanticType::Timestamp as i32 {
|
||||
column_schema = column_schema.with_time_index(true);
|
||||
}
|
||||
column_schema
|
||||
})
|
||||
.collect::<Vec<ColumnSchema>>();
|
||||
|
||||
let schema = Arc::new(Schema::try_new(column_schemas).context(ConvertSchemaSnafu)?);
|
||||
let recordbatches = if vectors.is_empty() {
|
||||
RecordBatches::try_new(schema, vec![])
|
||||
} else {
|
||||
RecordBatch::new(schema, vectors)
|
||||
.and_then(|batch| RecordBatches::try_new(batch.schema.clone(), vec![batch]))
|
||||
}
|
||||
);
|
||||
Output::AffectedRows(*rows)
|
||||
} else {
|
||||
let recordbatches = flight_messages_to_recordbatches(flight_messages)
|
||||
.context(ConvertFlightDataSnafu)?;
|
||||
Output::RecordBatches(recordbatches)
|
||||
.context(error::CreateRecordBatchesSnafu)?;
|
||||
Output::RecordBatches(recordbatches)
|
||||
}
|
||||
ObjectResult::Mutate(mutate) => {
|
||||
if mutate.failure != 0 {
|
||||
return error::MutateFailureSnafu {
|
||||
failure: mutate.failure,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Output::AffectedRows(mutate.success as usize)
|
||||
}
|
||||
};
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::Column;
|
||||
use common_grpc::select::{null_mask, values};
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use datatypes::prelude::{Vector, VectorRef};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
|
||||
UInt32Vector, UInt64Vector, UInt8Vector,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_column_to_vector() {
|
||||
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,43 +13,67 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::serde::DecodeError;
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||
IllegalFlightMessages {
|
||||
reason: String,
|
||||
#[snafu(display("Connect failed to {}, source: {}", url, source))]
|
||||
ConnectFailed {
|
||||
url: String,
|
||||
source: tonic::transport::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
addr,
|
||||
tonic_code,
|
||||
source
|
||||
))]
|
||||
FlightGet {
|
||||
addr: String,
|
||||
tonic_code: Code,
|
||||
source: BoxedError,
|
||||
#[snafu(display("Missing {}, expected {}, actual {}", name, expected, actual))]
|
||||
MissingResult {
|
||||
name: String,
|
||||
expected: usize,
|
||||
actual: usize,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData, source: {}", source))]
|
||||
ConvertFlightData {
|
||||
#[snafu(display("Missing result header"))]
|
||||
MissingHeader,
|
||||
|
||||
#[snafu(display("Tonic internal error, addr: {}, source: {}", addr, source))]
|
||||
TonicStatus {
|
||||
addr: String,
|
||||
source: tonic::Status,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to decode select result, source: {}", source))]
|
||||
DecodeSelect { source: DecodeError },
|
||||
|
||||
#[snafu(display("Error occurred on the data node, code: {}, msg: {}", code, msg))]
|
||||
Datanode { code: u32, msg: String },
|
||||
|
||||
#[snafu(display("Failed to encode physical plan: {:?}, source: {}", physical, source))]
|
||||
EncodePhysical {
|
||||
physical: Arc<dyn ExecutionPlan>,
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Mutate result has failure {}", failure))]
|
||||
MutateFailure { failure: u32, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create RecordBatches, source: {}", source))]
|
||||
CreateRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
@@ -59,6 +83,12 @@ pub enum Error {
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to convert schema, source: {}", source))]
|
||||
ConvertSchema {
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to create gRPC channel, peer address: {}, source: {}",
|
||||
addr,
|
||||
@@ -70,9 +100,11 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
/// Error deserialized from gRPC metadata
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
#[snafu(display("Failed to convert column to vector, source: {}", source))]
|
||||
ColumnToVector {
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc_expr::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -80,15 +112,21 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
Error::ConnectFailed { .. }
|
||||
| Error::MissingResult { .. }
|
||||
| Error::MissingHeader { .. }
|
||||
| Error::TonicStatus { .. }
|
||||
| Error::DecodeSelect { .. }
|
||||
| Error::Datanode { .. }
|
||||
| Error::EncodePhysical { .. }
|
||||
| Error::MutateFailure { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::ConvertSchema { source } => source.status_code(),
|
||||
Error::CreateRecordBatches { source } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ExternalError { code, .. } => *code,
|
||||
Error::ColumnToVector { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod admin;
|
||||
mod client;
|
||||
mod database;
|
||||
mod error;
|
||||
@@ -20,5 +21,5 @@ pub mod load_balance;
|
||||
pub use api;
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
pub use self::database::{Database, ObjectResult, Select};
|
||||
pub use self::error::{Error, Result};
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
[package]
|
||||
name = "cmd"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
default-run = "greptime"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "greptime"
|
||||
@@ -18,17 +18,17 @@ common-telemetry = { path = "../common/telemetry", features = [
|
||||
] }
|
||||
datanode = { path = "../datanode" }
|
||||
frontend = { path = "../frontend" }
|
||||
futures.workspace = true
|
||||
futures = "0.3"
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
serde.workspace = true
|
||||
serde = "1.0"
|
||||
servers = { path = "../servers" }
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tokio = { version = "1.18", features = ["full"] }
|
||||
toml = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
serde.workspace = true
|
||||
serde = "1.0"
|
||||
tempdir = "0.3"
|
||||
|
||||
[build-dependencies]
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOpts;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
@@ -54,8 +54,6 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
@@ -96,11 +94,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
opts.rpc_addr = addr;
|
||||
}
|
||||
|
||||
if cmd.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = cmd.rpc_hostname;
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_addr = addr;
|
||||
}
|
||||
@@ -128,11 +121,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
opts.storage = ObjectStoreConfig::File { data_dir };
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal.dir = wal_dir;
|
||||
opts.wal_dir = wal_dir;
|
||||
}
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -141,7 +134,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::time::Duration;
|
||||
|
||||
use datanode::datanode::ObjectStoreConfig;
|
||||
use servers::Mode;
|
||||
@@ -159,7 +151,7 @@ mod tests {
|
||||
};
|
||||
let options: DatanodeOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(4, options.mysql_runtime_size);
|
||||
let MetaClientOpts {
|
||||
@@ -175,11 +167,10 @@ mod tests {
|
||||
assert!(!tcp_nodelay);
|
||||
|
||||
match options.storage {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir }) => {
|
||||
ObjectStoreConfig::File { data_dir } => {
|
||||
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -225,11 +216,6 @@ mod tests {
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!("/tmp/greptimedb/wal", dn_opts.wal.dir);
|
||||
assert_eq!(Duration::from_secs(600), dn_opts.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, dn_opts.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
|
||||
assert!(!dn_opts.wal.sync_write);
|
||||
assert_eq!(Some(42), dn_opts.node_id);
|
||||
let MetaClientOpts {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -61,13 +61,6 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::auth::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -78,7 +71,6 @@ impl ErrorExt for Error {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use anymap::AnyMap;
|
||||
use clap::Parser;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -22,7 +21,6 @@ use frontend::instance::Instance;
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::Plugins;
|
||||
use meta_client::MetaClientOpts;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
@@ -88,21 +86,21 @@ pub struct StartCommand {
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let plugins = load_frontend_plugins(&self.user_provider)?;
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = Instance::try_new_distributed(&opts)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
instance.set_plugins(plugins.clone());
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
let mut frontend = Frontend::new(
|
||||
opts.clone(),
|
||||
Instance::try_new_distributed(&opts)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?,
|
||||
plugins,
|
||||
);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
|
||||
let mut plugins = Plugins::new();
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<AnyMap> {
|
||||
let mut plugins = AnyMap::new();
|
||||
|
||||
if let Some(provider) = user_provider {
|
||||
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
|
||||
@@ -287,7 +285,7 @@ mod tests {
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use common_telemetry::logging;
|
||||
use meta_srv::bootstrap;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
@@ -56,10 +56,6 @@ struct StartCommand {
|
||||
store_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
selector: Option<String>,
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -95,17 +91,6 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
if let Some(addr) = cmd.store_addr {
|
||||
opts.store_addr = addr;
|
||||
}
|
||||
if let Some(selector_type) = &cmd.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
info!("Using {} selector", selector_type);
|
||||
}
|
||||
|
||||
if cmd.use_memory_store {
|
||||
warn!("Using memory store for Meta. Make sure you are in running tests.");
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -113,8 +98,6 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -124,14 +107,11 @@ mod tests {
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -140,18 +120,15 @@ mod tests {
|
||||
bind_addr: None,
|
||||
server_addr: None,
|
||||
store_addr: None,
|
||||
selector: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/metasrv.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
use_memory_store: false,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(15, options.datanode_lease_secs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,11 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use anymap::AnyMap;
|
||||
use clap::Parser;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -26,8 +25,6 @@ use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use frontend::promql::PromqlOptions;
|
||||
use frontend::Plugins;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -64,7 +61,6 @@ impl SubCommand {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
@@ -73,9 +69,8 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb_options: Option<OpentsdbOptions>,
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub promql_options: Option<PromqlOptions>,
|
||||
pub mode: Mode,
|
||||
pub wal: WalConfig,
|
||||
pub wal_dir: String,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
}
|
||||
@@ -90,9 +85,8 @@ impl Default for StandaloneOptions {
|
||||
opentsdb_options: Some(OpentsdbOptions::default()),
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
promql_options: Some(PromqlOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
wal: WalConfig::default(),
|
||||
wal_dir: "/tmp/greptimedb/wal".to_string(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
}
|
||||
@@ -109,7 +103,6 @@ impl StandaloneOptions {
|
||||
opentsdb_options: self.opentsdb_options,
|
||||
influxdb_options: self.influxdb_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
promql_options: self.promql_options,
|
||||
mode: self.mode,
|
||||
meta_client_opts: None,
|
||||
}
|
||||
@@ -117,7 +110,7 @@ impl StandaloneOptions {
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
wal: self.wal,
|
||||
wal_dir: self.wal_dir,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
..Default::default()
|
||||
@@ -157,7 +150,7 @@ impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let plugins = load_frontend_plugins(&self.user_provider)?;
|
||||
let fe_opts = FrontendOptions::try_from(self)?;
|
||||
let dn_opts: DatanodeOptions = {
|
||||
let mut opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
@@ -194,12 +187,11 @@ impl StartCommand {
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
fe_opts: FrontendOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
plugins: AnyMap,
|
||||
datanode_instance: InstanceRef,
|
||||
) -> Result<Frontend<FeInstance>> {
|
||||
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
||||
frontend_instance.set_script_handler(datanode_instance);
|
||||
frontend_instance.set_plugins(plugins.clone());
|
||||
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
|
||||
}
|
||||
|
||||
@@ -229,7 +221,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
if addr == datanode_grpc_addr {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {}",
|
||||
datanode_grpc_addr
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
@@ -327,10 +320,6 @@ mod tests {
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
|
||||
assert_eq!(
|
||||
None,
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
}
|
||||
|
||||
@@ -358,7 +347,7 @@ mod tests {
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.auth(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -37,23 +37,12 @@ mod tests {
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
|
||||
#[serde(default)]
|
||||
struct MockConfig {
|
||||
path: String,
|
||||
port: u32,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Default for MockConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "test".to_string(),
|
||||
port: 0,
|
||||
host: "localhost".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_file() -> Result<()> {
|
||||
let config = MockConfig {
|
||||
@@ -74,21 +63,6 @@ mod tests {
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, config);
|
||||
|
||||
// Only host in file
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config.host, "greptime.test");
|
||||
assert_eq!(loaded_config.port, 0);
|
||||
assert_eq!(loaded_config.path, "test");
|
||||
|
||||
// Truncate the file.
|
||||
let file = File::create(&test_file).unwrap();
|
||||
file.set_len(0).unwrap();
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, MockConfig::default());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "common-base"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
bitvec = "1.0"
|
||||
@@ -10,7 +10,4 @@ bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { path = "../error" }
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
toml = "0.5"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -15,7 +15,5 @@
|
||||
pub mod bit_vec;
|
||||
pub mod buffer;
|
||||
pub mod bytes;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
@@ -1,321 +0,0 @@
|
||||
// Copyright (c) 2017-present, PingCAP, Inc. Licensed under Apache-2.0.
|
||||
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs without any modification.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{Display, Write};
|
||||
use std::ops::{Div, Mul};
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::de::{Unexpected, Visitor};
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
const UNIT: u64 = 1;
|
||||
|
||||
const BINARY_DATA_MAGNITUDE: u64 = 1024;
|
||||
pub const B: u64 = UNIT;
|
||||
pub const KIB: u64 = B * BINARY_DATA_MAGNITUDE;
|
||||
pub const MIB: u64 = KIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
||||
|
||||
#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd)]
|
||||
pub struct ReadableSize(pub u64);
|
||||
|
||||
impl ReadableSize {
|
||||
pub const fn kb(count: u64) -> ReadableSize {
|
||||
ReadableSize(count * KIB)
|
||||
}
|
||||
|
||||
pub const fn mb(count: u64) -> ReadableSize {
|
||||
ReadableSize(count * MIB)
|
||||
}
|
||||
|
||||
pub const fn gb(count: u64) -> ReadableSize {
|
||||
ReadableSize(count * GIB)
|
||||
}
|
||||
|
||||
pub const fn as_mb(self) -> u64 {
|
||||
self.0 / MIB
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<u64> for ReadableSize {
|
||||
type Output = ReadableSize;
|
||||
|
||||
fn div(self, rhs: u64) -> ReadableSize {
|
||||
ReadableSize(self.0 / rhs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<ReadableSize> for ReadableSize {
|
||||
type Output = u64;
|
||||
|
||||
fn div(self, rhs: ReadableSize) -> u64 {
|
||||
self.0 / rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<u64> for ReadableSize {
|
||||
type Output = ReadableSize;
|
||||
|
||||
fn mul(self, rhs: u64) -> ReadableSize {
|
||||
ReadableSize(self.0 * rhs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ReadableSize {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let size = self.0;
|
||||
let mut buffer = String::new();
|
||||
if size == 0 {
|
||||
write!(buffer, "{}KiB", size).unwrap();
|
||||
} else if size % PIB == 0 {
|
||||
write!(buffer, "{}PiB", size / PIB).unwrap();
|
||||
} else if size % TIB == 0 {
|
||||
write!(buffer, "{}TiB", size / TIB).unwrap();
|
||||
} else if size % GIB as u64 == 0 {
|
||||
write!(buffer, "{}GiB", size / GIB).unwrap();
|
||||
} else if size % MIB as u64 == 0 {
|
||||
write!(buffer, "{}MiB", size / MIB).unwrap();
|
||||
} else if size % KIB as u64 == 0 {
|
||||
write!(buffer, "{}KiB", size / KIB).unwrap();
|
||||
} else {
|
||||
return serializer.serialize_u64(size);
|
||||
}
|
||||
serializer.serialize_str(&buffer)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ReadableSize {
|
||||
type Err = String;
|
||||
|
||||
// This method parses value in binary unit.
|
||||
fn from_str(s: &str) -> Result<ReadableSize, String> {
|
||||
let size_str = s.trim();
|
||||
if size_str.is_empty() {
|
||||
return Err(format!("{:?} is not a valid size.", s));
|
||||
}
|
||||
|
||||
if !size_str.is_ascii() {
|
||||
return Err(format!("ASCII string is expected, but got {:?}", s));
|
||||
}
|
||||
|
||||
// size: digits and '.' as decimal separator
|
||||
let size_len = size_str
|
||||
.to_string()
|
||||
.chars()
|
||||
.take_while(|c| char::is_ascii_digit(c) || ['.', 'e', 'E', '-', '+'].contains(c))
|
||||
.count();
|
||||
|
||||
// unit: alphabetic characters
|
||||
let (size, unit) = size_str.split_at(size_len);
|
||||
|
||||
let unit = match unit.trim() {
|
||||
"K" | "KB" | "KiB" => KIB,
|
||||
"M" | "MB" | "MiB" => MIB,
|
||||
"G" | "GB" | "GiB" => GIB,
|
||||
"T" | "TB" | "TiB" => TIB,
|
||||
"P" | "PB" | "PiB" => PIB,
|
||||
"B" | "" => B,
|
||||
_ => {
|
||||
return Err(format!(
|
||||
"only B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, and PiB are supported: {:?}",
|
||||
s
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
match size.parse::<f64>() {
|
||||
Ok(n) => Ok(ReadableSize((n * unit as f64) as u64)),
|
||||
Err(_) => Err(format!("invalid size string: {:?}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ReadableSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.0 >= PIB {
|
||||
write!(f, "{:.1}PiB", self.0 as f64 / PIB as f64)
|
||||
} else if self.0 >= TIB {
|
||||
write!(f, "{:.1}TiB", self.0 as f64 / TIB as f64)
|
||||
} else if self.0 >= GIB {
|
||||
write!(f, "{:.1}GiB", self.0 as f64 / GIB as f64)
|
||||
} else if self.0 >= MIB {
|
||||
write!(f, "{:.1}MiB", self.0 as f64 / MIB as f64)
|
||||
} else if self.0 >= KIB {
|
||||
write!(f, "{:.1}KiB", self.0 as f64 / KIB as f64)
|
||||
} else {
|
||||
write!(f, "{}B", self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReadableSize {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
struct SizeVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for SizeVisitor {
|
||||
type Value = ReadableSize;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
formatter.write_str("valid size")
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, size: i64) -> Result<ReadableSize, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
if size >= 0 {
|
||||
self.visit_u64(size as u64)
|
||||
} else {
|
||||
Err(E::invalid_value(Unexpected::Signed(size), &self))
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, size: u64) -> Result<ReadableSize, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
Ok(ReadableSize(size))
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, size_str: &str) -> Result<ReadableSize, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
size_str.parse().map_err(E::custom)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(SizeVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_readable_size() {
|
||||
let s = ReadableSize::kb(2);
|
||||
assert_eq!(s.0, 2048);
|
||||
assert_eq!(s.as_mb(), 0);
|
||||
let s = ReadableSize::mb(2);
|
||||
assert_eq!(s.0, 2 * 1024 * 1024);
|
||||
assert_eq!(s.as_mb(), 2);
|
||||
let s = ReadableSize::gb(2);
|
||||
assert_eq!(s.0, 2 * 1024 * 1024 * 1024);
|
||||
assert_eq!(s.as_mb(), 2048);
|
||||
|
||||
assert_eq!((ReadableSize::mb(2) / 2).0, MIB);
|
||||
assert_eq!((ReadableSize::mb(1) / 2).0, 512 * KIB);
|
||||
assert_eq!(ReadableSize::mb(2) / ReadableSize::kb(1), 2048);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_readable_size() {
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct SizeHolder {
|
||||
s: ReadableSize,
|
||||
}
|
||||
|
||||
let legal_cases = vec![
|
||||
(0, "0KiB"),
|
||||
(2 * KIB, "2KiB"),
|
||||
(4 * MIB, "4MiB"),
|
||||
(5 * GIB, "5GiB"),
|
||||
(7 * TIB, "7TiB"),
|
||||
(11 * PIB, "11PiB"),
|
||||
];
|
||||
for (size, exp) in legal_cases {
|
||||
let c = SizeHolder {
|
||||
s: ReadableSize(size),
|
||||
};
|
||||
let res_str = toml::to_string(&c).unwrap();
|
||||
let exp_str = format!("s = {:?}\n", exp);
|
||||
assert_eq!(res_str, exp_str);
|
||||
let res_size: SizeHolder = toml::from_str(&exp_str).unwrap();
|
||||
assert_eq!(res_size.s.0, size);
|
||||
}
|
||||
|
||||
let c = SizeHolder {
|
||||
s: ReadableSize(512),
|
||||
};
|
||||
let res_str = toml::to_string(&c).unwrap();
|
||||
assert_eq!(res_str, "s = 512\n");
|
||||
let res_size: SizeHolder = toml::from_str(&res_str).unwrap();
|
||||
assert_eq!(res_size.s.0, c.s.0);
|
||||
|
||||
let decode_cases = vec![
|
||||
(" 0.5 PB", PIB / 2),
|
||||
("0.5 TB", TIB / 2),
|
||||
("0.5GB ", GIB / 2),
|
||||
("0.5MB", MIB / 2),
|
||||
("0.5KB", KIB / 2),
|
||||
("0.5P", PIB / 2),
|
||||
("0.5T", TIB / 2),
|
||||
("0.5G", GIB / 2),
|
||||
("0.5M", MIB / 2),
|
||||
("0.5K", KIB / 2),
|
||||
("23", 23),
|
||||
("1", 1),
|
||||
("1024B", KIB),
|
||||
// units with binary prefixes
|
||||
(" 0.5 PiB", PIB / 2),
|
||||
("1PiB", PIB),
|
||||
("0.5 TiB", TIB / 2),
|
||||
("2 TiB", TIB * 2),
|
||||
("0.5GiB ", GIB / 2),
|
||||
("787GiB ", GIB * 787),
|
||||
("0.5MiB", MIB / 2),
|
||||
("3MiB", MIB * 3),
|
||||
("0.5KiB", KIB / 2),
|
||||
("1 KiB", KIB),
|
||||
// scientific notation
|
||||
("0.5e6 B", B * 500000),
|
||||
("0.5E6 B", B * 500000),
|
||||
("1e6B", B * 1000000),
|
||||
("8E6B", B * 8000000),
|
||||
("8e7", B * 80000000),
|
||||
("1e-1MB", MIB / 10),
|
||||
("1e+1MB", MIB * 10),
|
||||
("0e+10MB", 0),
|
||||
];
|
||||
for (src, exp) in decode_cases {
|
||||
let src = format!("s = {:?}", src);
|
||||
let res: SizeHolder = toml::from_str(&src).unwrap();
|
||||
assert_eq!(res.s.0, exp);
|
||||
}
|
||||
|
||||
let illegal_cases = vec![
|
||||
"0.5kb", "0.5kB", "0.5Kb", "0.5k", "0.5g", "b", "gb", "1b", "B", "1K24B", " 5_KB",
|
||||
"4B7", "5M_",
|
||||
];
|
||||
for src in illegal_cases {
|
||||
let src_str = format!("s = {:?}", src);
|
||||
assert!(toml::from_str::<SizeHolder>(&src_str).is_err(), "{}", src);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "common-catalog"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1"
|
||||
@@ -11,11 +11,11 @@ common-telemetry = { path = "../telemetry" }
|
||||
datatypes = { path = "../../datatypes" }
|
||||
lazy_static = "1.4"
|
||||
regex = "1.6"
|
||||
serde.workspace = true
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
tempdir = "0.3"
|
||||
tokio.workspace = true
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,9 +14,3 @@
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
/// Formats table fully-qualified name
|
||||
#[inline]
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
[package]
|
||||
name = "common-error"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
strum = { version = "0.24", features = ["std", "derive"] }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -33,99 +33,72 @@ pub trait ErrorExt: std::error::Error {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
/// An opaque boxed error based on errors that implement [ErrorExt] trait.
|
||||
pub struct BoxedError {
|
||||
inner: Box<dyn crate::ext::ErrorExt + Send + Sync>,
|
||||
}
|
||||
|
||||
impl BoxedError {
|
||||
pub fn new<E: crate::ext::ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
Self {
|
||||
inner: Box::new(err),
|
||||
/// A helper macro to define a opaque boxed error based on errors that implement [ErrorExt] trait.
|
||||
#[macro_export]
|
||||
macro_rules! define_opaque_error {
|
||||
($Error:ident) => {
|
||||
/// An error behaves like `Box<dyn Error>`.
|
||||
///
|
||||
/// Define this error as a new type instead of using `Box<dyn Error>` directly so we can implement
|
||||
/// more methods or traits for it.
|
||||
pub struct $Error {
|
||||
inner: Box<dyn $crate::ext::ErrorExt + Send + Sync>,
|
||||
}
|
||||
}
|
||||
|
||||
impl $Error {
|
||||
pub fn new<E: $crate::ext::ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
Self {
|
||||
inner: Box::new(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for $Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Use the pretty debug format of inner error for opaque error.
|
||||
let debug_format = $crate::format::DebugFormat::new(&*self.inner);
|
||||
debug_format.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for $Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for $Error {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.inner.source()
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::ext::ErrorExt for $Error {
|
||||
fn status_code(&self) -> $crate::status_code::StatusCode {
|
||||
self.inner.status_code()
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&$crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self.inner.as_any()
|
||||
}
|
||||
}
|
||||
|
||||
// Implement ErrorCompat for this opaque error so the backtrace is also available
|
||||
// via `ErrorCompat::backtrace()`.
|
||||
impl $crate::snafu::ErrorCompat for $Error {
|
||||
fn backtrace(&self) -> Option<&$crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BoxedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Use the pretty debug format of inner error for opaque error.
|
||||
let debug_format = crate::format::DebugFormat::new(&*self.inner);
|
||||
debug_format.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BoxedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for BoxedError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.inner.source()
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::ext::ErrorExt for BoxedError {
|
||||
fn status_code(&self) -> crate::status_code::StatusCode {
|
||||
self.inner.status_code()
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self.inner.as_any()
|
||||
}
|
||||
}
|
||||
|
||||
// Implement ErrorCompat for this opaque error so the backtrace is also available
|
||||
// via `ErrorCompat::backtrace()`.
|
||||
impl crate::snafu::ErrorCompat for BoxedError {
|
||||
fn backtrace(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
self.inner.backtrace_opt()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type with plain error message
|
||||
#[derive(Debug)]
|
||||
pub struct PlainError {
|
||||
msg: String,
|
||||
status_code: StatusCode,
|
||||
}
|
||||
|
||||
impl PlainError {
|
||||
pub fn new(msg: String, status_code: StatusCode) -> Self {
|
||||
Self { msg, status_code }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PlainError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for PlainError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::ext::ErrorExt for PlainError {
|
||||
fn status_code(&self) -> crate::status_code::StatusCode {
|
||||
self.status_code
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
|
||||
None
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self as _
|
||||
}
|
||||
}
|
||||
// Define a general boxed error.
|
||||
define_opaque_error!(BoxedError);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -158,7 +131,7 @@ mod tests {
|
||||
|
||||
assert!(ErrorCompat::backtrace(&err).is_some());
|
||||
|
||||
let msg = format!("{err:?}");
|
||||
let msg = format!("{:?}", err);
|
||||
assert!(msg.contains("\nBacktrace:\n"));
|
||||
let fmt_msg = format!("{:?}", DebugFormat::new(&err));
|
||||
assert_eq!(msg, fmt_msg);
|
||||
@@ -178,7 +151,7 @@ mod tests {
|
||||
assert!(err.as_any().downcast_ref::<MockError>().is_some());
|
||||
assert!(err.source().is_some());
|
||||
|
||||
let msg = format!("{err:?}");
|
||||
let msg = format!("{:?}", err);
|
||||
assert!(msg.contains("\nBacktrace:\n"));
|
||||
assert!(msg.contains("Caused by"));
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -31,11 +31,11 @@ impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
|
||||
write!(f, "{}.", self.0)?;
|
||||
if let Some(source) = self.0.source() {
|
||||
// Source error use debug format for more verbose info.
|
||||
write!(f, " Caused by: {source:?}")?;
|
||||
write!(f, " Caused by: {:?}", source)?;
|
||||
}
|
||||
if let Some(backtrace) = self.0.backtrace_opt() {
|
||||
// Add a newline to separate causes and backtrace.
|
||||
write!(f, "\nBacktrace:\n{backtrace}")?;
|
||||
write!(f, "\nBacktrace:\n{}", backtrace)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -24,9 +24,6 @@ pub mod prelude {
|
||||
pub use crate::ext::{BoxedError, ErrorExt};
|
||||
pub use crate::format::DebugFormat;
|
||||
pub use crate::status_code::StatusCode;
|
||||
|
||||
pub const INNER_ERROR_CODE: &str = "INNER_ERROR_CODE";
|
||||
pub const INNER_ERROR_MSG: &str = "INNER_ERROR_MSG";
|
||||
}
|
||||
|
||||
pub use snafu;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -14,10 +14,8 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use strum::EnumString;
|
||||
|
||||
/// Common status code for public API.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum StatusCode {
|
||||
// ====== Begin of common status code ==============
|
||||
/// Success.
|
||||
@@ -53,7 +51,6 @@ pub enum StatusCode {
|
||||
TableNotFound = 4001,
|
||||
TableColumnNotFound = 4002,
|
||||
TableColumnExists = 4003,
|
||||
DatabaseNotFound = 4004,
|
||||
// ====== End of catalog related status code =======
|
||||
|
||||
// ====== Begin of storage related status code =====
|
||||
@@ -77,8 +74,6 @@ pub enum StatusCode {
|
||||
AuthHeaderNotFound = 7003,
|
||||
/// Invalid http authorization header
|
||||
InvalidAuthHeader = 7004,
|
||||
/// Illegal request to connect catalog-schema
|
||||
AccessDenied = 7005,
|
||||
// ====== End of auth related status code =====
|
||||
}
|
||||
|
||||
@@ -91,7 +86,7 @@ impl StatusCode {
|
||||
impl fmt::Display for StatusCode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// The current debug format is suitable to display.
|
||||
write!(f, "{self:?}")
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +95,7 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
fn assert_status_code_display(code: StatusCode, msg: &str) {
|
||||
let code_msg = format!("{code}");
|
||||
let code_msg = format!("{}", code);
|
||||
assert_eq!(msg, code_msg);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "common-function-macro"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
@@ -15,5 +15,5 @@ syn = "1.0"
|
||||
arc-swap = "1.0"
|
||||
common-query = { path = "../query" }
|
||||
datatypes = { path = "../../datatypes" }
|
||||
snafu.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
static_assertions = "1.1.0"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
// Copyright 2022 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "common-function"
|
||||
edition.workspace = true
|
||||
version.workspace = true
|
||||
license.workspace = true
|
||||
version = "0.1.0"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
arc-swap = "1.0"
|
||||
@@ -11,14 +11,14 @@ common-error = { path = "../error" }
|
||||
common-function-macro = { path = "../function-macro" }
|
||||
common-query = { path = "../query" }
|
||||
common-time = { path = "../time" }
|
||||
datafusion.workspace = true
|
||||
datafusion-common = "14.0.0"
|
||||
datatypes = { path = "../../datatypes" }
|
||||
libc = "0.2"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.10"
|
||||
paste = "1.0"
|
||||
snafu.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
statrs = "0.15"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user