mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 13:52:59 +00:00
Compare commits
108 Commits
v0.2.0-pre
...
v0.3.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fbb7db42aa | ||
|
|
a1587595d9 | ||
|
|
abd5a8ecbb | ||
|
|
4ddab8e982 | ||
|
|
1833e487a4 | ||
|
|
c93b5743e8 | ||
|
|
550c494d25 | ||
|
|
2ab0e42d6f | ||
|
|
05e6ca1e14 | ||
|
|
b9661818f2 | ||
|
|
f86390345c | ||
|
|
7191bb9652 | ||
|
|
34c7f78861 | ||
|
|
610651fa8f | ||
|
|
c48067f88d | ||
|
|
ec1b95c250 | ||
|
|
fbf1ddd006 | ||
|
|
d679cfcb53 | ||
|
|
2c82ded975 | ||
|
|
d4f3f617e4 | ||
|
|
6fe117d7d5 | ||
|
|
b0ab641602 | ||
|
|
224ec9bd25 | ||
|
|
d86b3386dc | ||
|
|
c8301feed7 | ||
|
|
b1920c41a4 | ||
|
|
c471007edd | ||
|
|
2818f466d3 | ||
|
|
d7a906e0bd | ||
|
|
6e1bb9e458 | ||
|
|
494ad570c5 | ||
|
|
12d59e6341 | ||
|
|
479ef9d379 | ||
|
|
93ffe1ff33 | ||
|
|
d461328238 | ||
|
|
6aae5b7286 | ||
|
|
7dbac89000 | ||
|
|
0b0b5a10da | ||
|
|
51be35a7b1 | ||
|
|
9e4887f29f | ||
|
|
cca34aa914 | ||
|
|
0ac50632aa | ||
|
|
b1f7ad097a | ||
|
|
a77a4a4bd1 | ||
|
|
47f1cbaaed | ||
|
|
8e3c3cbc40 | ||
|
|
9f0efc748d | ||
|
|
939a51aea9 | ||
|
|
bf35620904 | ||
|
|
09f55e3cd8 | ||
|
|
b88d8e5b82 | ||
|
|
a709a5c842 | ||
|
|
fb9978e95d | ||
|
|
ef4e473e6d | ||
|
|
1a245f35b9 | ||
|
|
8d8a480dc1 | ||
|
|
197c34bc17 | ||
|
|
4d9afee8ef | ||
|
|
7f14d40798 | ||
|
|
eb50cee601 | ||
|
|
92c0808766 | ||
|
|
f9ea6b63bf | ||
|
|
2287db7ff7 | ||
|
|
69acf32914 | ||
|
|
b9db2cfd83 | ||
|
|
6d247f73fd | ||
|
|
2cf828da3c | ||
|
|
f2167663b2 | ||
|
|
17daf4cdff | ||
|
|
7c6754d03e | ||
|
|
e64fea3a15 | ||
|
|
22b5a94d02 | ||
|
|
d374859e24 | ||
|
|
c5dba29f9e | ||
|
|
9f442dedf9 | ||
|
|
5d77ed00bb | ||
|
|
c75845c570 | ||
|
|
1ee9ad4ca1 | ||
|
|
f2cc912c87 | ||
|
|
2a9f482bc7 | ||
|
|
d5e4662181 | ||
|
|
9cd2cf630d | ||
|
|
7152a1b79e | ||
|
|
f2cfd8e608 | ||
|
|
e8cd2f0e48 | ||
|
|
830367b8f4 | ||
|
|
37678e2e02 | ||
|
|
b6647af2e3 | ||
|
|
d2c90b4c59 | ||
|
|
5a05e3107c | ||
|
|
e4cd08c750 | ||
|
|
e8bb00f0be | ||
|
|
ff2784da0f | ||
|
|
4652b62481 | ||
|
|
0e4d4f0300 | ||
|
|
145f8eb5a7 | ||
|
|
de8b889701 | ||
|
|
1c65987026 | ||
|
|
c6f024a171 | ||
|
|
0c88bb09e3 | ||
|
|
f4190cfca6 | ||
|
|
b933ffddd0 | ||
|
|
1214b5b43e | ||
|
|
a47134a971 | ||
|
|
dc85a4b5bb | ||
|
|
0937ccdb61 | ||
|
|
408de51be8 | ||
|
|
f7b7a9c801 |
@@ -12,4 +12,5 @@ rustflags = [
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
"-Aclippy::items_after_test_module",
|
||||
]
|
||||
|
||||
@@ -3,6 +3,7 @@ GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
GT_S3_ENDPOINT_URL=S3 endpoint url
|
||||
GT_S3_REGION=S3 region
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
1
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -81,6 +81,5 @@ body:
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
render: bash
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
5
.github/workflows/develop.yml
vendored
5
.github/workflows/develop.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -216,7 +216,7 @@ jobs:
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -224,6 +224,7 @@ jobs:
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
197
.github/workflows/release.yml
vendored
197
.github/workflows/release.yml
vendored
@@ -11,34 +11,24 @@ on:
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.3.0
|
||||
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
## FIXME(zyy17): Enable it after the tests are stabled.
|
||||
DISABLE_RUN_TESTS: true
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: false
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build binary
|
||||
build-macos:
|
||||
name: Build macOS binary
|
||||
strategy:
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
@@ -49,16 +39,6 @@ jobs:
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
@@ -72,6 +52,99 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install Protoc for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
# - name: Run tests
|
||||
# if: env.DISABLE_RUN_TESTS == 'false'
|
||||
# run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
build-linux:
|
||||
name: Build linux binary
|
||||
strategy:
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -96,11 +169,6 @@ jobs:
|
||||
sudo cp protoc/bin/protoc /usr/local/bin/
|
||||
sudo cp -r protoc/include/google /usr/local/include/
|
||||
|
||||
- name: Install Protoc for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -114,12 +182,6 @@ jobs:
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -146,6 +208,10 @@ jobs:
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
@@ -189,10 +255,6 @@ jobs:
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -215,7 +277,7 @@ jobs:
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
needs: [build-linux, build-macos]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
@@ -301,7 +363,7 @@ jobs:
|
||||
release:
|
||||
name: Release artifacts
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build,docker]
|
||||
needs: [build-linux, build-macos, docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
@@ -319,35 +381,50 @@ jobs:
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
|
||||
- name: Set whether it is the latest release
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generateReleaseNotes: true
|
||||
artifacts: |
|
||||
**/greptime-*
|
||||
|
||||
- name: Publish release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name != 'schedule'
|
||||
with:
|
||||
name: "Release ${{ github.ref_name }}"
|
||||
files: |
|
||||
name: "${{ github.ref_name }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
generateReleaseNotes: true
|
||||
artifacts: |
|
||||
**/greptime-*
|
||||
|
||||
docker-push-uhub:
|
||||
name: Push docker image to UCloud Container Registry
|
||||
docker-push-acr:
|
||||
name: Push docker image to alibaba cloud container registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
@@ -359,12 +436,12 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
- name: Login to alibaba cloud container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uhub.service.ucloud.cn
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
registry: registry.cn-hangzhou.aliyuncs.com
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
@@ -381,9 +458,9 @@ jobs:
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
|
||||
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
@@ -107,6 +107,6 @@ The core team will be thrilled if you participate in any way you like. When you
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
- [GreptimeDB Docs](https://greptime.com/docs)
|
||||
- [Learn GreptimeDB](https://greptime.com/products/db)
|
||||
- [GreptimeDB Docs](https://docs.greptime.com/)
|
||||
- [Learn GreptimeDB](https://greptime.com/product/db)
|
||||
- [Greptime Inc. Website](https://greptime.com)
|
||||
|
||||
1785
Cargo.lock
generated
1785
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
25
Cargo.toml
25
Cargo.toml
@@ -15,6 +15,7 @@ members = [
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -47,7 +48,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.1"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -59,12 +60,14 @@ arrow-schema = { version = "37.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "37.0"
|
||||
@@ -77,9 +80,15 @@ snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.33"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "f0798c4c648d89f51abe63e870919c75dd463199" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "f0798c4c648d89f51abe63e870919c75dd463199"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
28
README.md
28
README.md
@@ -47,6 +47,14 @@ for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
|
||||
## Quick Start
|
||||
|
||||
### GreptimePlay
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
<a href="https://greptime.com/playground" target="_blank"><img
|
||||
src="https://www.greptime.com/assets/greptime_play_button_colorful.1bbe2746.png"
|
||||
alt="GreptimePlay" width="200px" /></a>
|
||||
|
||||
### Build
|
||||
|
||||
#### Build from Source
|
||||
@@ -131,16 +139,16 @@ about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
|
||||
SELECT * FROM monitor;
|
||||
```
|
||||
|
||||
```TEXT
|
||||
+-------+---------------------+------+--------+
|
||||
| host | ts | cpu | memory |
|
||||
+-------+---------------------+------+--------+
|
||||
| host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
|
||||
| host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
|
||||
| host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
|
||||
+-------+---------------------+------+--------+
|
||||
3 rows in set (0.01 sec)
|
||||
```
|
||||
```TEXT
|
||||
+-------+--------------------------+------+--------+
|
||||
| host | ts | cpu | memory |
|
||||
+-------+--------------------------+------+--------+
|
||||
| host1 | 2022-08-19 16:32:35+0800 | 66.6 | 1024 |
|
||||
| host2 | 2022-08-19 16:32:36+0800 | 77.7 | 2048 |
|
||||
| host3 | 2022-08-19 16:32:37+0800 | 88.8 | 4096 |
|
||||
+-------+--------------------------+------+--------+
|
||||
3 rows in set (0.03 sec)
|
||||
```
|
||||
|
||||
You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
|
||||
|
||||
@@ -53,8 +53,11 @@ gc_duration = '30s'
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
# [procedure.store]
|
||||
# type = "File"
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# max_retry_times = 3
|
||||
# retry_delay = "500ms"
|
||||
[procedure]
|
||||
max_retry_times = 3
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
[logging]
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
level = "info"
|
||||
|
||||
@@ -56,3 +56,8 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
[logging]
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
level = "info"
|
||||
|
||||
@@ -13,3 +13,8 @@ datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
[logging]
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
level = "info"
|
||||
|
||||
@@ -118,13 +118,15 @@ gc_duration = '30s'
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Procedure storage options.
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
# # Storage type.
|
||||
# type = "File"
|
||||
# # Procedure data path.
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# # Procedure max retry time.
|
||||
# max_retry_times = 3
|
||||
# # Initial retry delay of procedures, increases exponentially
|
||||
# retry_delay = "500ms"
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Log options
|
||||
[logging]
|
||||
# Specify logs directory.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
level = "debug"
|
||||
|
||||
527
docs/schema-structs.md
Normal file
527
docs/schema-structs.md
Normal file
@@ -0,0 +1,527 @@
|
||||
# Schema Structs
|
||||
|
||||
# Common Schemas
|
||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
||||
|
||||
## ColumnSchema
|
||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
||||
|
||||
```rust
|
||||
pub struct ColumnSchema {
|
||||
pub name: String,
|
||||
pub data_type: ConcreteDataType,
|
||||
is_nullable: bool,
|
||||
is_time_index: bool,
|
||||
default_constraint: Option<ColumnDefaultConstraint>,
|
||||
metadata: Metadata,
|
||||
}
|
||||
```
|
||||
|
||||
## Schema
|
||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
||||
|
||||
```rust
|
||||
use arrow::datatypes::Schema as ArrowSchema;
|
||||
|
||||
pub struct Schema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
name_to_index: HashMap<String, usize>,
|
||||
arrow_schema: Arc<ArrowSchema>,
|
||||
timestamp_index: Option<usize>,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
pub type SchemaRef = Arc<Schema>;
|
||||
```
|
||||
|
||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
||||
|
||||
## RawSchema
|
||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
||||
|
||||
```rust
|
||||
pub struct RawSchema {
|
||||
pub column_schemas: Vec<ColumnSchema>,
|
||||
pub timestamp_index: Option<usize>,
|
||||
pub version: u32,
|
||||
}
|
||||
```
|
||||
|
||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
||||
|
||||
# Schema of the Table
|
||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
||||
```rust
|
||||
pub struct TableMeta {
|
||||
pub schema: SchemaRef,
|
||||
pub primary_key_indices: Vec<usize>,
|
||||
pub value_indices: Vec<usize>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
||||
|
||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
||||
|
||||
Suppose we create a table with the following SQL
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"host",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"datacenter"
|
||||
],
|
||||
"time_index":0,
|
||||
"version":0
|
||||
},
|
||||
"primary_key_indices":[
|
||||
4,
|
||||
1
|
||||
],
|
||||
"value_indices":[
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Schemas of the storage engine
|
||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
||||
|
||||
The storage engine maintains schemas of regions in more complicated ways because it
|
||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
||||
- maintains additional metadata like column id or column family
|
||||
|
||||
So the storage engine defines several schema structs:
|
||||
- RegionSchema
|
||||
- StoreSchema
|
||||
- ProjectedSchema
|
||||
|
||||
## RegionSchema
|
||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
||||
|
||||
```rust
|
||||
pub struct RegionSchema {
|
||||
user_schema: SchemaRef,
|
||||
store_schema: StoreSchemaRef,
|
||||
columns: ColumnsMetadataRef,
|
||||
}
|
||||
```
|
||||
|
||||
Each region reserves some columns called `internal columns` for internal usage:
|
||||
- `__sequence`, sequence number of a row
|
||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
||||
|
||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
||||
- User schema, a `Schema` struct that doesn't have internal columns
|
||||
- Store schema, a `StoreSchema` struct that has internal columns
|
||||
|
||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
||||
|
||||
`RegionSchema` organizes columns in the following order:
|
||||
```
|
||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
We can ignore the `__version` column because it is disabled now:
|
||||
|
||||
```
|
||||
key columns, timestamp, value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
||||
|
||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
"store_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## StoreSchema
|
||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
||||
```rust
|
||||
struct StoreSchema {
|
||||
columns: Vec<ColumnMetadata>,
|
||||
schema: SchemaRef,
|
||||
row_key_end: usize,
|
||||
user_column_end: usize,
|
||||
}
|
||||
```
|
||||
|
||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
||||
|
||||
The `StoreSchema` of the region above is similar to this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
}
|
||||
```
|
||||
|
||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
||||
```rust
|
||||
impl StoreSchema {
|
||||
#[inline]
|
||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
||||
0..self.row_key_end
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
||||
self.row_key_end..self.user_column_end
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
||||
|
||||
## ProjectedSchema
|
||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
||||
```rust
|
||||
pub struct ProjectedSchema {
|
||||
projection: Option<Projection>,
|
||||
schema_to_read: StoreSchemaRef,
|
||||
projected_user_schema: SchemaRef,
|
||||
}
|
||||
```
|
||||
|
||||
We need to handle many cases while doing projection:
|
||||
- The columns' order of table and region is different
|
||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
||||
- We support `ALTER TABLE` so data files may have different schemas.
|
||||
|
||||
### Projection
|
||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
||||
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
||||
|
||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
||||
|
||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
||||
|
||||
So we can construct the following `ProjectedSchema`:
|
||||
```json
|
||||
{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":4
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
||||
|
||||
### ReadAdapter
|
||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
||||
|
||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
||||
|
||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
||||
```rust
|
||||
#[derive(Debug)]
|
||||
pub struct ReadAdapter {
|
||||
source_schema: StoreSchemaRef,
|
||||
dest_schema: ProjectedSchemaRef,
|
||||
indices_in_result: Vec<Option<usize>>,
|
||||
is_source_needed: Vec<bool>,
|
||||
}
|
||||
```
|
||||
|
||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
||||
|
||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
||||
|
||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
||||
```sql
|
||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
||||
```
|
||||
|
||||
The new `StoreSchema` becomes:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":6
|
||||
}
|
||||
```
|
||||
|
||||
Note that we bump the version of the schema to 1.
|
||||
|
||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
||||
```json
|
||||
{
|
||||
"source_schema":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"dest_schema":{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
},
|
||||
"indices_in_result":[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
null,
|
||||
4,
|
||||
5
|
||||
],
|
||||
"is_source_needed":[
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
||||
|
||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
||||
|
||||
```text
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ store_schema │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema │ │
|
||||
│ │ version 1 │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ user_schema │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ RegionSchema │
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
┌──────────────▼───────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ │
|
||||
│ │ schema_to_read │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema (projected) │ │
|
||||
│ │ version 1 │ │
|
||||
│ └──────────────────────────┘ │
|
||||
┌───┤ ├───┐
|
||||
│ │ ┌──────────────────────────┐ │ │
|
||||
│ │ │ projected_user_schema │ │ │
|
||||
│ │ └──────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ProjectedSchema │ │
|
||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
||||
│ │
|
||||
│ │
|
||||
┌──────▼───────┐ ┌───────▼──────┐
|
||||
│ │ │ │
|
||||
│ ReadAdapter │ │ ReadAdapter │
|
||||
│ │ │ │
|
||||
└──────▲───────┘ └───────▲──────┘
|
||||
│ │
|
||||
│ │
|
||||
source schema │ │ source schema
|
||||
│ │
|
||||
┌───────┴─────────┐ ┌────────┴────────┐
|
||||
│ │ │ │
|
||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ version 0 │ │ │ │ version 1 │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ │ └─────────────┘ │
|
||||
│ │ │ │
|
||||
│ SST 0 │ │ SST 1 │
|
||||
│ │ │ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
# Conversion
|
||||
This figure shows the conversion between schemas:
|
||||
```text
|
||||
┌─────────────┐ schema From ┌─────────────┐
|
||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
||||
│ TableMeta │ │ │ │ RawSchema │
|
||||
│ │ │ │ ┌─────────────────────────┤ │
|
||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
||||
│ │ │ ├─────────────────────► │
|
||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
||||
│ │ │ │ ◄─────────────────────┤ │ │
|
||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ └────────────────────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ columns │ user_schema() │ │ │
|
||||
│ │ │ │ projected_user_schema() schema() │
|
||||
│ │ │ │ │ │
|
||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
||||
│ │ ├─────────────────────────► │ │ │
|
||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
||||
│ │ └─────────────────────────────────────────► │ │
|
||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
||||
│ ◄──────────────────────────────────────────────┤ │
|
||||
└─────────────────────────┘ columns └───────────────┘
|
||||
```
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-02-26"
|
||||
channel = "nightly-2023-05-03"
|
||||
|
||||
@@ -7,9 +7,12 @@ set -e
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
OUT_DIR="${1:-$SCRIPT_DIR}"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
|
||||
echo "Downloading assets to dir: $OUT_DIR"
|
||||
cd $OUT_DIR
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
|
||||
@@ -51,13 +51,17 @@ get_os_type
|
||||
get_arch_type
|
||||
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
|
||||
# Use the latest nightly version.
|
||||
if [ "${VERSION}" = "latest" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest/download/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
else
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest version."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0bebe5f69c91cdfbce85cb8f45f9fcd28185261c" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e8abf8241c908448dce595399e89c89a40d048bd" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -24,8 +24,10 @@ datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
key-lock = "0.1"
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
metrics.workspace = true
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
|
||||
@@ -1,318 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Catalog adapter between datafusion and greptime query engine.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use datafusion::catalog::catalog::{
|
||||
CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider,
|
||||
};
|
||||
use datafusion::catalog::schema::SchemaProvider as DfSchemaProvider;
|
||||
use datafusion::datasource::TableProvider as DfTableProvider;
|
||||
use datafusion::error::Result as DataFusionResult;
|
||||
use snafu::ResultExt;
|
||||
use table::table::adapter::{DfTableProviderAdapter, TableAdapter};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, Result, SchemaProviderOperationSnafu};
|
||||
use crate::{
|
||||
CatalogListRef, CatalogProvider, CatalogProviderRef, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
pub struct DfCatalogListAdapter {
|
||||
catalog_list: CatalogListRef,
|
||||
}
|
||||
|
||||
impl DfCatalogListAdapter {
|
||||
pub fn new(catalog_list: CatalogListRef) -> DfCatalogListAdapter {
|
||||
DfCatalogListAdapter { catalog_list }
|
||||
}
|
||||
}
|
||||
|
||||
impl DfCatalogList for DfCatalogListAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: Arc<dyn DfCatalogProvider>,
|
||||
) -> Option<Arc<dyn DfCatalogProvider>> {
|
||||
let catalog_adapter = Arc::new(CatalogProviderAdapter {
|
||||
df_catalog_provider: catalog,
|
||||
});
|
||||
self.catalog_list
|
||||
.register_catalog(name, catalog_adapter)
|
||||
.expect("datafusion does not accept fallible catalog access") // TODO(hl): datafusion register catalog does not handles errors
|
||||
.map(|catalog_provider| Arc::new(DfCatalogProviderAdapter { catalog_provider }) as _)
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Vec<String> {
|
||||
// TODO(hl): datafusion register catalog does not handles errors
|
||||
self.catalog_list
|
||||
.catalog_names()
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Option<Arc<dyn DfCatalogProvider>> {
|
||||
self.catalog_list
|
||||
.catalog(name)
|
||||
.expect("datafusion does not accept fallible catalog access") // TODO(hl): datafusion register catalog does not handles errors
|
||||
.map(|catalog_provider| Arc::new(DfCatalogProviderAdapter { catalog_provider }) as _)
|
||||
}
|
||||
}
|
||||
|
||||
/// Datafusion's CatalogProvider -> greptime CatalogProvider
|
||||
struct CatalogProviderAdapter {
|
||||
df_catalog_provider: Arc<dyn DfCatalogProvider>,
|
||||
}
|
||||
|
||||
impl CatalogProvider for CatalogProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.df_catalog_provider.schema_names())
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
&self,
|
||||
_name: String,
|
||||
_schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
todo!("register_schema is not supported in Datafusion catalog provider")
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
Ok(self
|
||||
.df_catalog_provider
|
||||
.schema(name)
|
||||
.map(|df_schema_provider| Arc::new(SchemaProviderAdapter { df_schema_provider }) as _))
|
||||
}
|
||||
}
|
||||
|
||||
///Greptime CatalogProvider -> datafusion's CatalogProvider
|
||||
struct DfCatalogProviderAdapter {
|
||||
catalog_provider: CatalogProviderRef,
|
||||
}
|
||||
|
||||
impl DfCatalogProvider for DfCatalogProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
self.catalog_provider
|
||||
.schema_names()
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Option<Arc<dyn DfSchemaProvider>> {
|
||||
self.catalog_provider
|
||||
.schema(name)
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
.map(|schema_provider| Arc::new(DfSchemaProviderAdapter { schema_provider }) as _)
|
||||
}
|
||||
}
|
||||
|
||||
/// Greptime SchemaProvider -> datafusion SchemaProvider
|
||||
struct DfSchemaProviderAdapter {
|
||||
schema_provider: Arc<dyn SchemaProvider>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DfSchemaProvider for DfSchemaProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
self.schema_provider
|
||||
.table_names()
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Option<Arc<dyn DfTableProvider>> {
|
||||
self.schema_provider
|
||||
.table(name)
|
||||
.await
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
.map(|table| Arc::new(DfTableProviderAdapter::new(table)) as _)
|
||||
}
|
||||
|
||||
fn register_table(
|
||||
&self,
|
||||
name: String,
|
||||
table: Arc<dyn DfTableProvider>,
|
||||
) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
|
||||
let table = Arc::new(TableAdapter::new(table)?);
|
||||
match self.schema_provider.register_table(name, table)? {
|
||||
Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
|
||||
match self.schema_provider.deregister_table(name)? {
|
||||
Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> bool {
|
||||
self.schema_provider
|
||||
.table_exist(name)
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
}
|
||||
|
||||
/// Datafusion SchemaProviderAdapter -> greptime SchemaProviderAdapter
|
||||
struct SchemaProviderAdapter {
|
||||
df_schema_provider: Arc<dyn DfSchemaProvider>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for SchemaProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
/// Retrieves the list of available table names in this schema.
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.df_schema_provider.table_names())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table = self.df_schema_provider.table(name).await;
|
||||
let table = table.map(|table_provider| {
|
||||
match table_provider
|
||||
.as_any()
|
||||
.downcast_ref::<DfTableProviderAdapter>()
|
||||
{
|
||||
Some(adapter) => adapter.table(),
|
||||
None => {
|
||||
// TODO(yingwen): Avoid panic here.
|
||||
let adapter =
|
||||
TableAdapter::new(table_provider).expect("convert datafusion table");
|
||||
Arc::new(adapter) as _
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let table_provider = Arc::new(DfTableProviderAdapter::new(table.clone()));
|
||||
Ok(self
|
||||
.df_schema_provider
|
||||
.register_table(name, table_provider)
|
||||
.context(error::DatafusionSnafu {
|
||||
msg: "Fail to register table to datafusion",
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(SchemaProviderOperationSnafu)?
|
||||
.map(|_| table))
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
self.df_schema_provider
|
||||
.deregister_table(name)
|
||||
.context(error::DatafusionSnafu {
|
||||
msg: "Fail to deregister table from datafusion",
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(SchemaProviderOperationSnafu)?
|
||||
.map(|table| {
|
||||
let adapter = TableAdapter::new(table)
|
||||
.context(error::TableSchemaMismatchSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(SchemaProviderOperationSnafu)?;
|
||||
Ok(Arc::new(adapter) as _)
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
Ok(self.df_schema_provider.table_exist(name))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use table::table::numbers::NumbersTable;
|
||||
|
||||
use super::*;
|
||||
use crate::local::{new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
pub fn test_register_schema() {
|
||||
let adapter = CatalogProviderAdapter {
|
||||
df_catalog_provider: Arc::new(
|
||||
datafusion::catalog::catalog::MemoryCatalogProvider::new(),
|
||||
),
|
||||
};
|
||||
|
||||
adapter
|
||||
.register_schema(
|
||||
"whatever".to_string(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_table() {
|
||||
let adapter = DfSchemaProviderAdapter {
|
||||
schema_provider: Arc::new(MemorySchemaProvider::new()),
|
||||
};
|
||||
|
||||
adapter
|
||||
.register_table(
|
||||
"test_table".to_string(),
|
||||
Arc::new(DfTableProviderAdapter::new(Arc::new(
|
||||
NumbersTable::default(),
|
||||
))),
|
||||
)
|
||||
.unwrap();
|
||||
adapter.table("test_table").await.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_catalog() {
|
||||
let catalog_list = DfCatalogListAdapter {
|
||||
catalog_list: new_memory_catalog_list().unwrap(),
|
||||
};
|
||||
assert!(catalog_list
|
||||
.register_catalog(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(DfCatalogProviderAdapter {
|
||||
catalog_provider: Arc::new(MemoryCatalogProvider::new()),
|
||||
}),
|
||||
)
|
||||
.is_none());
|
||||
|
||||
catalog_list.catalog("test_catalog").unwrap();
|
||||
}
|
||||
}
|
||||
@@ -20,12 +20,21 @@ use common_error::prelude::{Snafu, StatusCode};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::Location;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Failed to re-compile script due to internal error, source: {}",
|
||||
source
|
||||
))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table, source: {}", source))]
|
||||
OpenSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
@@ -105,7 +114,7 @@ pub enum Error {
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
#[snafu(display("Table not found: {}", table))]
|
||||
TableNotExist { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
@@ -127,6 +136,9 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel, source: {}", source))]
|
||||
ParallelOpenTable { source: JoinError },
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
@@ -261,7 +273,8 @@ impl ErrorExt for Error {
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::InvalidSystemTableDef { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidSystemTableDef { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
@@ -295,9 +308,10 @@ impl ErrorExt for Error {
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::CompileScriptInternal { source }
|
||||
| Error::SchemaProviderOperation { source }
|
||||
| Error::Internal { source } => source.status_code(),
|
||||
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
|
||||
@@ -191,6 +191,7 @@ impl TableRegionalKey {
|
||||
pub struct TableRegionalValue {
|
||||
pub version: TableVersion,
|
||||
pub regions_ids: Vec<u32>,
|
||||
pub engine_name: Option<String>,
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod columns;
|
||||
mod tables;
|
||||
|
||||
use std::any::Any;
|
||||
@@ -23,15 +24,18 @@ use snafu::ResultExt;
|
||||
use table::table::adapter::TableAdapter;
|
||||
use table::TableRef;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::{DatafusionSnafu, Result, TableSchemaMismatchSnafu};
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::{CatalogProviderRef, SchemaProvider};
|
||||
|
||||
const TABLES: &str = "tables";
|
||||
const COLUMNS: &str = "columns";
|
||||
|
||||
pub(crate) struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
tables: Vec<String>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
@@ -39,6 +43,7 @@ impl InformationSchemaProvider {
|
||||
Self {
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
tables: vec![TABLES.to_string(), COLUMNS.to_string()],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -49,32 +54,49 @@ impl SchemaProvider for InformationSchemaProvider {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(vec![TABLES.to_string()])
|
||||
async fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.tables.clone())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table = if name.eq_ignore_ascii_case(TABLES) {
|
||||
Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
))
|
||||
} else {
|
||||
return Ok(None);
|
||||
let table = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => {
|
||||
let inner = Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
));
|
||||
Arc::new(
|
||||
StreamingTable::try_new(inner.schema().clone(), vec![inner]).with_context(
|
||||
|_| DatafusionSnafu {
|
||||
msg: format!("Failed to get InformationSchema table '{name}'"),
|
||||
},
|
||||
)?,
|
||||
)
|
||||
}
|
||||
COLUMNS => {
|
||||
let inner = Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
));
|
||||
Arc::new(
|
||||
StreamingTable::try_new(inner.schema().clone(), vec![inner]).with_context(
|
||||
|_| DatafusionSnafu {
|
||||
msg: format!("Failed to get InformationSchema table '{name}'"),
|
||||
},
|
||||
)?,
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let table = Arc::new(
|
||||
StreamingTable::try_new(table.schema().clone(), vec![table]).with_context(|_| {
|
||||
DatafusionSnafu {
|
||||
msg: format!("Failed to get InformationSchema table '{name}'"),
|
||||
}
|
||||
})?,
|
||||
);
|
||||
let table = TableAdapter::new(table).context(TableSchemaMismatchSnafu)?;
|
||||
Ok(Some(Arc::new(table)))
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
Ok(matches!(name.to_ascii_lowercase().as_str(), TABLES))
|
||||
async fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
let normalized_name = name.to_ascii_lowercase();
|
||||
Ok(self.tables.contains(&normalized_name))
|
||||
}
|
||||
}
|
||||
|
||||
184
src/catalog/src/information_schema/columns.rs
Normal file
184
src/catalog/src/information_schema/columns.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, Result};
|
||||
use crate::CatalogProviderRef;
|
||||
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
}
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const COLUMN_NAME: &str = "column_name";
|
||||
const DATA_TYPE: &str = "data_type";
|
||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaColumnsBuilder {
|
||||
InformationSchemaColumnsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaColumnsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
column_names: StringVectorBuilder,
|
||||
data_types: StringVectorBuilder,
|
||||
semantic_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaColumnsBuilder {
|
||||
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
column_names: StringVectorBuilder::with_capacity(42),
|
||||
data_types: StringVectorBuilder::with_capacity(42),
|
||||
semantic_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
|
||||
for schema_name in self.catalog_provider.schema_names().await? {
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
|
||||
for table_name in schema.table_names().await? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
} else if keys.contains(&idx) {
|
||||
SEMANTIC_TYPE_PRIMARY_KEY
|
||||
} else {
|
||||
SEMANTIC_TYPE_FIELD
|
||||
};
|
||||
self.add_column(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_column(
|
||||
&mut self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
column_name: &str,
|
||||
data_type: &str,
|
||||
semantic_type: &str,
|
||||
) {
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.column_names.push(Some(column_name));
|
||||
self.data_types.push(Some(data_type));
|
||||
self.semantic_types.push(Some(semantic_type));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.column_names.finish()),
|
||||
Arc::new(self.data_types.finish()),
|
||||
Arc::new(self.semantic_types.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaColumns {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -23,12 +23,11 @@ use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatch
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::StringVectorBuilder;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, Result};
|
||||
use crate::information_schema::TABLES;
|
||||
use crate::CatalogProviderRef;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
@@ -44,6 +43,8 @@ impl InformationSchemaTables {
|
||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
@@ -73,6 +74,8 @@ struct InformationSchemaTablesBuilder {
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
@@ -85,6 +88,8 @@ impl InformationSchemaTablesBuilder {
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
table_types: StringVectorBuilder::with_capacity(42),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(42),
|
||||
engines: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,26 +97,26 @@ impl InformationSchemaTablesBuilder {
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
|
||||
for schema_name in self.catalog_provider.schema_names()? {
|
||||
for schema_name in self.catalog_provider.schema_names().await? {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name)? else { continue };
|
||||
for table_name in schema.table_names()? {
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
|
||||
for table_name in schema.table_names().await? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
self.add_table(&catalog_name, &schema_name, &table_name, table.table_type());
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Add a final list for the information schema tables themselves
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
INFORMATION_SCHEMA_NAME,
|
||||
TABLES,
|
||||
TableType::View,
|
||||
);
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
@@ -121,6 +126,8 @@ impl InformationSchemaTablesBuilder {
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
table_type: TableType,
|
||||
table_id: Option<u32>,
|
||||
engine: Option<&str>,
|
||||
) {
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
@@ -130,6 +137,8 @@ impl InformationSchemaTablesBuilder {
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
}));
|
||||
self.table_ids.push(table_id);
|
||||
self.engines.push(engine);
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -138,6 +147,8 @@ impl InformationSchemaTablesBuilder {
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
@@ -29,66 +29,51 @@ use table::TableRef;
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
|
||||
|
||||
pub mod datafusion;
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub(crate) mod information_schema;
|
||||
pub mod local;
|
||||
mod metrics;
|
||||
pub mod remote;
|
||||
pub mod schema;
|
||||
pub mod system;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
/// Represent a list of named catalogs
|
||||
pub trait CatalogList: Sync + Send {
|
||||
/// Returns the catalog list as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Adds a new catalog to this catalog list
|
||||
/// If a catalog of the same name existed before, it is replaced in the list and returned.
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>>;
|
||||
|
||||
/// Retrieves the list of available catalog names
|
||||
fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific catalog by name, provided it exists.
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>>;
|
||||
}
|
||||
|
||||
/// Represents a catalog, comprising a number of named schemas.
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogProvider: Sync + Send {
|
||||
/// Returns the catalog provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available schema names in this catalog.
|
||||
fn schema_names(&self) -> Result<Vec<String>>;
|
||||
async fn schema_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Registers schema to this catalog.
|
||||
fn register_schema(
|
||||
async fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Retrieves a specific schema from the catalog by name, provided it exists.
|
||||
fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
async fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogListRef = Arc<dyn CatalogList>;
|
||||
pub type CatalogProviderRef = Arc<dyn CatalogProvider>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: CatalogList {
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
/// Starts a catalog manager.
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
async fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>>;
|
||||
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
@@ -108,7 +93,11 @@ pub trait CatalogManager: CatalogList {
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
-> error::Result<()>;
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
async fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>>;
|
||||
|
||||
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
async fn table(
|
||||
@@ -117,6 +106,8 @@ pub trait CatalogManager: CatalogList {
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
@@ -238,15 +229,15 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names().await else { return (region_number, region_stats) };
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name).await else { continue };
|
||||
|
||||
let Ok(schema_names) = catalog.schema_names() else { continue };
|
||||
let Ok(schema_names) = catalog.schema_names().await else { continue };
|
||||
for schema_name in schema_names {
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name).await else { continue };
|
||||
|
||||
let Ok(table_names) = schema.table_names() else { continue };
|
||||
let Ok(table_names) = schema.table_names().await else { continue };
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, UInt8Vector};
|
||||
use futures_util::lock::Mutex;
|
||||
use metrics::increment_gauge;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::manager::TableEngineManagerRef;
|
||||
use table::engine::EngineContext;
|
||||
@@ -48,9 +49,9 @@ use crate::system::{
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
handle_system_table_request, CatalogManager, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -88,7 +89,7 @@ impl LocalCatalogManager {
|
||||
|
||||
/// Scan all entries from system catalog table
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
self.init_system_catalog()?;
|
||||
self.init_system_catalog().await?;
|
||||
let system_records = self.system.information_schema.system.records().await?;
|
||||
let entries = self.collect_system_catalog_entries(system_records).await?;
|
||||
let max_table_id = self.handle_system_catalog_entries(entries).await?;
|
||||
@@ -114,27 +115,27 @@ impl LocalCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_system_catalog(&self) -> Result<()> {
|
||||
async fn init_system_catalog(&self) -> Result<()> {
|
||||
let system_schema = Arc::new(MemorySchemaProvider::new());
|
||||
system_schema.register_table(
|
||||
system_schema.register_table_sync(
|
||||
SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
self.system.information_schema.system.clone(),
|
||||
)?;
|
||||
let system_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
system_catalog.register_schema(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
|
||||
system_catalog.register_schema_sync(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
|
||||
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
|
||||
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
let default_schema = Arc::new(MemorySchemaProvider::new());
|
||||
|
||||
// Add numbers table for test
|
||||
let table = Arc::new(NumbersTable::default());
|
||||
default_schema.register_table("numbers".to_string(), table)?;
|
||||
default_schema.register_table_sync("numbers".to_string(), table)?;
|
||||
|
||||
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
|
||||
default_catalog.register_schema_sync(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -213,16 +214,17 @@ impl LocalCatalogManager {
|
||||
info!("Register catalog: {}", c.catalog_name);
|
||||
}
|
||||
Entry::Schema(s) => {
|
||||
let catalog =
|
||||
self.catalogs
|
||||
.catalog(&s.catalog_name)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &s.catalog_name,
|
||||
})?;
|
||||
catalog.register_schema(
|
||||
s.schema_name.clone(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)?;
|
||||
self.catalogs
|
||||
.catalog(&s.catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &s.catalog_name,
|
||||
})?
|
||||
.register_schema(
|
||||
s.schema_name.clone(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)
|
||||
.await?;
|
||||
info!("Registered schema: {:?}", s);
|
||||
}
|
||||
Entry::Table(t) => {
|
||||
@@ -243,14 +245,16 @@ impl LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(&t.catalog_name)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &t.catalog_name,
|
||||
})?;
|
||||
let catalog =
|
||||
self.catalogs
|
||||
.catalog(&t.catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &t.catalog_name,
|
||||
})?;
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)?
|
||||
.schema(&t.schema_name)
|
||||
.await?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
@@ -286,37 +290,11 @@ impl LocalCatalogManager {
|
||||
),
|
||||
})?;
|
||||
|
||||
schema.register_table(t.table_name.clone(), option)?;
|
||||
schema.register_table(t.table_name.clone(), option).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogList for LocalCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
self.catalogs.register_catalog(name, catalog)
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
self.catalogs.catalog_names()
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
} else {
|
||||
self.catalogs.catalog(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableIdProvider for LocalCatalogManager {
|
||||
async fn next_table_id(&self) -> table::Result<TableId> {
|
||||
@@ -347,10 +325,12 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
@@ -388,7 +368,14 @@ impl CatalogManager for LocalCatalogManager {
|
||||
engine,
|
||||
)
|
||||
.await?;
|
||||
schema.register_table(request.table_name, request.table)?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.await?;
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(catalog_name, schema_name)],
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
@@ -409,11 +396,13 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
@@ -421,7 +410,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
!schema.table_exist(&request.new_table_name)?,
|
||||
!schema.table_exist(&request.new_table_name).await?,
|
||||
TableExistsSnafu {
|
||||
table: &request.new_table_name
|
||||
}
|
||||
@@ -447,6 +436,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let renamed = schema
|
||||
.rename_table(&request.table_name, request.new_table_name.clone())
|
||||
.await
|
||||
.is_ok();
|
||||
Ok(renamed)
|
||||
}
|
||||
@@ -497,13 +487,14 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
catalog.schema(schema_name)?.is_none(),
|
||||
catalog.schema(schema_name).await?.is_none(),
|
||||
SchemaExistsSnafu {
|
||||
schema: schema_name,
|
||||
}
|
||||
@@ -511,12 +502,18 @@ impl CatalogManager for LocalCatalogManager {
|
||||
self.system
|
||||
.register_schema(request.catalog, schema_name.clone())
|
||||
.await?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
catalog
|
||||
.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))
|
||||
.await?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let catalog_name = request.create_table_request.catalog_name.clone();
|
||||
let schema_name = request.create_table_request.schema_name.clone();
|
||||
|
||||
ensure!(
|
||||
!*self.init_lock.lock().await,
|
||||
IllegalManagerStateSnafu {
|
||||
@@ -526,17 +523,23 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let mut sys_table_requests = self.system_table_requests.lock().await;
|
||||
sys_table_requests.push(request);
|
||||
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&catalog_name, &schema_name)],
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
self.catalogs
|
||||
.catalog(catalog)?
|
||||
.catalog(catalog)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.schema(schema)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn table(
|
||||
@@ -547,16 +550,42 @@ impl CatalogManager for LocalCatalogManager {
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name).await
|
||||
}
|
||||
|
||||
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
if catalog.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
} else {
|
||||
self.catalogs.catalog(catalog).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
self.catalogs.catalog_names().await
|
||||
}
|
||||
|
||||
async fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
self.catalogs.register_catalog(name, catalog).await
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -21,6 +21,7 @@ use std::sync::{Arc, RwLock};
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use metrics::{decrement_gauge, increment_gauge};
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
@@ -31,7 +32,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
@@ -51,10 +52,10 @@ impl Default for MemoryCatalogManager {
|
||||
};
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
manager
|
||||
.register_catalog("greptime".to_string(), default_catalog.clone())
|
||||
.register_catalog_sync("greptime".to_string(), default_catalog.clone())
|
||||
.unwrap();
|
||||
default_catalog
|
||||
.register_schema("public".to_string(), Arc::new(MemorySchemaProvider::new()))
|
||||
.register_schema_sync("public".to_string(), Arc::new(MemorySchemaProvider::new()))
|
||||
.unwrap();
|
||||
manager
|
||||
}
|
||||
@@ -75,70 +76,81 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
let schema = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
.schema(&request.schema)
|
||||
.await?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.await
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
let catalog = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
let schema =
|
||||
catalog
|
||||
.schema(&request.schema)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.await
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
let schema = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.schema(&request.schema)
|
||||
.await?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
.await
|
||||
.map(|v| v.is_some())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
let catalog = self
|
||||
.catalog(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
catalog
|
||||
.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))
|
||||
.await?;
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
@@ -147,10 +159,9 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
if let Some(c) = catalogs.get(catalog) {
|
||||
c.schema(schema)
|
||||
async fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
if let Some(c) = self.catalog(catalog) {
|
||||
c.schema(schema).await
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
@@ -162,15 +173,31 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let Some(c) = c.get(catalog) else { return Ok(None) };
|
||||
c.clone()
|
||||
};
|
||||
match catalog.schema(schema)? {
|
||||
None => Ok(None),
|
||||
Some(s) => s.table(table_name).await,
|
||||
}
|
||||
let Some(catalog) = self
|
||||
.catalog(catalog) else { return Ok(None)};
|
||||
let Some(s) = catalog.schema(schema).await? else { return Ok(None) };
|
||||
s.table(table_name).await
|
||||
}
|
||||
|
||||
async fn catalog(&self, catalog: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).cloned())
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
self.register_catalog_sync(name, catalog)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,14 +219,8 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogList for MemoryCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
pub fn register_catalog_sync(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
@@ -208,14 +229,8 @@ impl CatalogList for MemoryCatalogManager {
|
||||
Ok(catalogs.insert(name, catalog))
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs.keys().map(|s| s.to_string()).collect())
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs.get(name).cloned())
|
||||
fn catalog(&self, catalog_name: &str) -> Option<CatalogProviderRef> {
|
||||
self.catalogs.read().unwrap().get(catalog_name).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,19 +252,13 @@ impl MemoryCatalogProvider {
|
||||
schemas: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProvider for MemoryCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>> {
|
||||
pub fn schema_names_sync(&self) -> Result<Vec<String>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
pub fn register_schema_sync(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
@@ -259,15 +268,39 @@ impl CatalogProvider for MemoryCatalogProvider {
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
pub fn schema_sync(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogProvider for MemoryCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn schema_names(&self) -> Result<Vec<String>> {
|
||||
self.schema_names_sync()
|
||||
}
|
||||
|
||||
async fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
self.register_schema_sync(name, schema)
|
||||
}
|
||||
|
||||
async fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
self.schema_sync(name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a schema.
|
||||
pub struct MemorySchemaProvider {
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
@@ -280,31 +313,8 @@ impl MemorySchemaProvider {
|
||||
tables: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemorySchemaProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
pub fn register_table_sync(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if let Some(existing) = tables.get(name.as_str()) {
|
||||
// if table with the same name but different table id exists, then it's a fatal bug
|
||||
@@ -322,7 +332,7 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
pub fn rename_table_sync(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let Some(table) = tables.remove(name) else {
|
||||
return TableNotFoundSnafu {
|
||||
@@ -340,14 +350,53 @@ impl SchemaProvider for MemorySchemaProvider {
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
pub fn table_exist_sync(&self, name: &str) -> Result<bool> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.contains_key(name))
|
||||
}
|
||||
|
||||
pub fn deregister_table_sync(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
}
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
impl Default for MemorySchemaProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn table_names(&self) -> Result<Vec<String>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.contains_key(name))
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
|
||||
async fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
self.register_table_sync(name, table)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
self.rename_table_sync(name, new_name)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
self.deregister_table_sync(name)
|
||||
}
|
||||
|
||||
async fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
self.table_exist_sync(name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,15 +417,20 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_list().unwrap();
|
||||
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
|
||||
let default_catalog = CatalogManager::catalog(&*catalog_list, DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
default_schema
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table = default_schema.table("numbers").await.unwrap();
|
||||
@@ -388,17 +442,17 @@ mod tests {
|
||||
async fn test_mem_provider() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "numbers";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
assert!(!provider.table_exist_sync(table_name).unwrap());
|
||||
provider.deregister_table_sync(table_name).unwrap();
|
||||
let test_table = NumbersTable::default();
|
||||
// register table successfully
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), Arc::new(test_table))
|
||||
.register_table_sync(table_name.to_string(), Arc::new(test_table))
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.table_exist_sync(table_name).unwrap());
|
||||
let other_table = NumbersTable::new(12);
|
||||
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
|
||||
let result = provider.register_table_sync(table_name.to_string(), Arc::new(other_table));
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
@@ -407,27 +461,27 @@ mod tests {
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(!provider.table_exist_sync(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
// register test table
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), test_table.clone())
|
||||
.register_table_sync(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.table_exist_sync(table_name).unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table(table_name, new_table_name.to_string())
|
||||
.rename_table_sync(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
assert!(!provider.table_exist_sync(table_name).unwrap());
|
||||
provider.deregister_table_sync(table_name).unwrap();
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
assert!(provider.table_exist_sync(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
@@ -435,7 +489,9 @@ mod tests {
|
||||
);
|
||||
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider.register_table(new_table_name.to_string(), other_table);
|
||||
let result = provider
|
||||
.register_table(new_table_name.to_string(), other_table)
|
||||
.await;
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
@@ -445,6 +501,7 @@ mod tests {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
@@ -460,7 +517,7 @@ mod tests {
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(table_name).await.unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers_new";
|
||||
@@ -472,8 +529,8 @@ mod tests {
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(!schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(new_table_name).unwrap());
|
||||
assert!(!schema.table_exist(table_name).await.unwrap());
|
||||
assert!(schema.table_exist(new_table_name).await.unwrap());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
@@ -507,6 +564,7 @@ mod tests {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
@@ -518,7 +576,7 @@ mod tests {
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(schema.table_exist("numbers").unwrap());
|
||||
assert!(schema.table_exist("numbers").await.unwrap());
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -529,6 +587,6 @@ mod tests {
|
||||
.deregister_table(deregister_table_req)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!schema.table_exist("numbers").unwrap());
|
||||
assert!(!schema.table_exist("numbers").await.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,29 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use sqlparser::ast::Query as SpQuery;
|
||||
use common_catalog::build_db_string;
|
||||
|
||||
use crate::errors::ParserError;
|
||||
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
||||
|
||||
/// Query statement instance.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Query {
|
||||
pub inner: SpQuery,
|
||||
}
|
||||
|
||||
/// Automatically converts from sqlparser Query instance to SqlQuery.
|
||||
impl TryFrom<SpQuery> for Query {
|
||||
type Error = ParserError;
|
||||
|
||||
fn try_from(q: SpQuery) -> Result<Self, Self::Error> {
|
||||
Ok(Query { inner: q })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Query> for SpQuery {
|
||||
type Error = ParserError;
|
||||
|
||||
fn try_from(value: Query) -> Result<Self, Self::Error> {
|
||||
Ok(value.inner)
|
||||
}
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
|
||||
(METRIC_DB_LABEL, build_db_string(catalog, schema))
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,14 +28,14 @@ pub trait SchemaProvider: Sync + Send {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available table names in this schema.
|
||||
fn table_names(&self) -> Result<Vec<String>>;
|
||||
async fn table_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific table from the schema by name, provided it exists.
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, adds a new table to this schema.
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
|
||||
async fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("register_table({name}, <table>)"),
|
||||
}
|
||||
@@ -44,7 +44,7 @@ pub trait SchemaProvider: Sync + Send {
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
async fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("rename_table({name}, {new_name})"),
|
||||
}
|
||||
@@ -53,7 +53,7 @@ pub trait SchemaProvider: Sync + Send {
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
async fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("deregister_table({name})"),
|
||||
}
|
||||
@@ -63,7 +63,7 @@ pub trait SchemaProvider: Sync + Send {
|
||||
/// If supported by the implementation, checks the table exist in the schema provider or not.
|
||||
/// If no matched table in the schema provider, return false.
|
||||
/// Otherwise, return true.
|
||||
fn table_exist(&self, name: &str) -> Result<bool>;
|
||||
async fn table_exist(&self, name: &str) -> Result<bool>;
|
||||
}
|
||||
|
||||
pub type SchemaProviderRef = Arc<dyn SchemaProvider>;
|
||||
|
||||
@@ -80,6 +80,10 @@ impl Table for SystemCatalogTable {
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
|
||||
fn statistics(&self) -> Option<table::stats::TableStatistics> {
|
||||
self.0.statistics()
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemCatalogTable {
|
||||
|
||||
@@ -28,10 +28,10 @@ use crate::error::{
|
||||
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::CatalogListRef;
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_list: CatalogListRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
|
||||
disallow_cross_schema_query: bool,
|
||||
default_catalog: String,
|
||||
@@ -40,12 +40,12 @@ pub struct DfTableSourceProvider {
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
pub fn new(
|
||||
catalog_list: CatalogListRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
disallow_cross_schema_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_list,
|
||||
catalog_manager,
|
||||
disallow_cross_schema_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog(),
|
||||
@@ -104,17 +104,22 @@ impl DfTableSourceProvider {
|
||||
|
||||
let schema = if schema_name != INFORMATION_SCHEMA_NAME {
|
||||
let catalog = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.catalog_manager
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
catalog
|
||||
.schema(schema_name)
|
||||
.await?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
} else {
|
||||
let catalog_provider = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.catalog_manager
|
||||
.catalog(catalog_name)
|
||||
.await?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog_name.to_string(),
|
||||
|
||||
@@ -40,7 +40,7 @@ impl SchemaProvider for InformationSchema {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>, Error> {
|
||||
async fn table_names(&self) -> Result<Vec<String>, Error> {
|
||||
Ok(vec![SYSTEM_CATALOG_TABLE_NAME.to_string()])
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ impl SchemaProvider for InformationSchema {
|
||||
}
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool, Error> {
|
||||
async fn table_exist(&self, name: &str) -> Result<bool, Error> {
|
||||
Ok(name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
|
||||
}
|
||||
}
|
||||
@@ -116,16 +116,17 @@ impl SystemCatalog {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogProvider for SystemCatalog {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>, Error> {
|
||||
async fn schema_names(&self) -> Result<Vec<String>, Error> {
|
||||
Ok(vec![INFORMATION_SCHEMA_NAME.to_string()])
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
async fn register_schema(
|
||||
&self,
|
||||
_name: String,
|
||||
_schema: SchemaProviderRef,
|
||||
@@ -133,7 +134,7 @@ impl CatalogProvider for SystemCatalog {
|
||||
panic!("System catalog does not support registering schema!")
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>, Error> {
|
||||
async fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>, Error> {
|
||||
if name.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) {
|
||||
Ok(Some(self.information_schema.clone()))
|
||||
} else {
|
||||
|
||||
@@ -20,14 +20,16 @@ mod tests {
|
||||
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::{error, info};
|
||||
use common_test_util::temp_dir::TempDir;
|
||||
use mito::config::EngineConfig;
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
async fn create_local_catalog_manager() -> Result<LocalCatalogManager, catalog::error::Error> {
|
||||
let (_dir, object_store) =
|
||||
async fn create_local_catalog_manager(
|
||||
) -> Result<(TempDir, LocalCatalogManager), catalog::error::Error> {
|
||||
let (dir, object_store) =
|
||||
mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
|
||||
let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
@@ -37,13 +39,13 @@ mod tests {
|
||||
let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
|
||||
let catalog_manager = LocalCatalogManager::try_new(engine_manager).await.unwrap();
|
||||
catalog_manager.start().await?;
|
||||
Ok(catalog_manager)
|
||||
Ok((dir, catalog_manager))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
|
||||
// register table
|
||||
let table_name = "test_table";
|
||||
let table_id = 42;
|
||||
@@ -81,7 +83,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_duplicate_register() {
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
@@ -118,8 +120,9 @@ mod tests {
|
||||
fn test_concurrent_register() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
|
||||
let catalog_manager =
|
||||
Arc::new(rt.block_on(async { create_local_catalog_manager().await.unwrap() }));
|
||||
let (_dir, catalog_manager) =
|
||||
rt.block_on(async { create_local_catalog_manager().await.unwrap() });
|
||||
let catalog_manager = Arc::new(catalog_manager);
|
||||
|
||||
let succeed: Arc<Mutex<Option<TableRef>>> = Arc::new(Mutex::new(None));
|
||||
|
||||
|
||||
@@ -20,7 +20,9 @@ use std::sync::Arc;
|
||||
|
||||
use async_stream::stream;
|
||||
use catalog::error::Error;
|
||||
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use catalog::remote::{Kv, KvBackend, ValueIter};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_recordbatch::RecordBatch;
|
||||
use common_telemetry::logging::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
@@ -34,11 +36,36 @@ use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockKvBackend {
|
||||
map: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Default for MockKvBackend {
|
||||
fn default() -> Self {
|
||||
let mut map = BTreeMap::default();
|
||||
let catalog_value = CatalogValue {}.as_bytes().unwrap();
|
||||
let schema_value = SchemaValue {}.as_bytes().unwrap();
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
let default_schema_key = SchemaKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
// create default catalog and schema
|
||||
map.insert(default_catalog_key.into(), catalog_value);
|
||||
map.insert(default_schema_key.into(), schema_value);
|
||||
|
||||
let map = RwLock::new(map);
|
||||
Self { map }
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for MockKvBackend {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
futures::executor::block_on(async {
|
||||
|
||||
@@ -26,11 +26,11 @@ mod tests {
|
||||
use catalog::remote::{
|
||||
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
|
||||
};
|
||||
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
|
||||
@@ -78,35 +78,47 @@ mod tests {
|
||||
|
||||
async fn prepare_components(
|
||||
node_id: u64,
|
||||
) -> (KvBackendRef, TableEngineRef, Arc<RemoteCatalogManager>) {
|
||||
) -> (
|
||||
KvBackendRef,
|
||||
TableEngineRef,
|
||||
Arc<RemoteCatalogManager>,
|
||||
TableEngineManagerRef,
|
||||
) {
|
||||
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
|
||||
MITO_ENGINE.to_string(),
|
||||
table_engine.clone(),
|
||||
));
|
||||
let catalog_manager = RemoteCatalogManager::new(engine_manager, node_id, backend.clone());
|
||||
let catalog_manager =
|
||||
RemoteCatalogManager::new(engine_manager.clone(), node_id, backend.clone());
|
||||
catalog_manager.start().await.unwrap();
|
||||
(backend, table_engine, Arc::new(catalog_manager))
|
||||
(
|
||||
backend,
|
||||
table_engine,
|
||||
Arc::new(catalog_manager),
|
||||
engine_manager as Arc<_>,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remote_catalog_default() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_id = 42;
|
||||
let (_, _, catalog_manager) = prepare_components(node_id).await;
|
||||
let (_, _, catalog_manager, _) = prepare_components(node_id).await;
|
||||
assert_eq!(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string()],
|
||||
catalog_manager.catalog_names().unwrap()
|
||||
catalog_manager.catalog_names().await.unwrap()
|
||||
);
|
||||
|
||||
let default_catalog = catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
default_catalog.schema_names().unwrap()
|
||||
default_catalog.schema_names().await.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -114,7 +126,7 @@ mod tests {
|
||||
async fn test_remote_catalog_register_nonexistent() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_id = 42;
|
||||
let (_, table_engine, catalog_manager) = prepare_components(node_id).await;
|
||||
let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
|
||||
// register a new table with an nonexistent catalog
|
||||
let catalog_name = "nonexistent_catalog".to_string();
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
@@ -159,18 +171,20 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_register_table() {
|
||||
let node_id = 42;
|
||||
let (_, table_engine, catalog_manager) = prepare_components(node_id).await;
|
||||
let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
|
||||
let default_catalog = catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
default_catalog.schema_names().unwrap()
|
||||
default_catalog.schema_names().await.unwrap()
|
||||
);
|
||||
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
@@ -208,31 +222,36 @@ mod tests {
|
||||
table,
|
||||
};
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
assert_eq!(vec![table_name], default_schema.table_names().unwrap());
|
||||
assert_eq!(
|
||||
vec![table_name],
|
||||
default_schema.table_names().await.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_catalog_schema_table() {
|
||||
let node_id = 42;
|
||||
let (backend, table_engine, catalog_manager) = prepare_components(node_id).await;
|
||||
let (backend, table_engine, catalog_manager, engine_manager) =
|
||||
prepare_components(node_id).await;
|
||||
|
||||
let catalog_name = "test_catalog".to_string();
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let catalog = Arc::new(RemoteCatalogProvider::new(
|
||||
catalog_name.clone(),
|
||||
backend.clone(),
|
||||
engine_manager.clone(),
|
||||
node_id,
|
||||
));
|
||||
|
||||
// register catalog to catalog manager
|
||||
catalog_manager
|
||||
.register_catalog(catalog_name.clone(), catalog)
|
||||
CatalogManager::register_catalog(&*catalog_manager, catalog_name.clone(), catalog)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
HashSet::<String>::from_iter(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
|
||||
),
|
||||
HashSet::from_iter(catalog_manager.catalog_names().unwrap().into_iter())
|
||||
HashSet::from_iter(catalog_manager.catalog_names().await.unwrap().into_iter())
|
||||
);
|
||||
|
||||
let table_to_register = table_engine
|
||||
@@ -273,24 +292,32 @@ mod tests {
|
||||
|
||||
let new_catalog = catalog_manager
|
||||
.catalog(&catalog_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.expect("catalog should exist since it's already registered");
|
||||
let schema = Arc::new(RemoteSchemaProvider::new(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
node_id,
|
||||
engine_manager,
|
||||
backend.clone(),
|
||||
));
|
||||
|
||||
let prev = new_catalog
|
||||
.register_schema(schema_name.clone(), schema.clone())
|
||||
.await
|
||||
.expect("Register schema should not fail");
|
||||
assert!(prev.is_none());
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
|
||||
assert_eq!(
|
||||
HashSet::from([schema_name.clone()]),
|
||||
new_catalog.schema_names().unwrap().into_iter().collect()
|
||||
new_catalog
|
||||
.schema_names()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.collect()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,4 +37,4 @@ prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.4"
|
||||
version = "0.7"
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
@@ -156,6 +157,11 @@ impl Client {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(PrometheusGatewayClient::new(channel))
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let mut client = HealthCheckClient::new(channel);
|
||||
|
||||
@@ -18,14 +18,14 @@ use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery,
|
||||
QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::{logging, timer};
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -33,7 +33,7 @@ use snafu::{ensure, ResultExt};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
use crate::{error, metrics, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
@@ -107,6 +107,16 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
||||
self.handle(Request::Insert(request)).await
|
||||
}
|
||||
|
||||
pub async fn delete(&self, request: DeleteRequest) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
|
||||
self.handle(Request::Delete(request)).await
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
@@ -115,7 +125,7 @@ impl Database {
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
request: Some(request),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
@@ -130,6 +140,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
@@ -137,6 +148,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
@@ -150,6 +162,7 @@ impl Database {
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
@@ -162,6 +175,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
@@ -169,6 +183,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
@@ -176,6 +191,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
@@ -183,6 +199,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_FLUSH_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
@@ -190,6 +207,8 @@ impl Database {
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
|
||||
@@ -16,6 +16,7 @@ mod client;
|
||||
mod database;
|
||||
mod error;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
25
src/client/src/metrics.rs
Normal file
25
src/client/src/metrics.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! client metrics
|
||||
pub const METRIC_GRPC_CREATE_TABLE: &str = "grpc.create_table";
|
||||
pub const METRIC_GRPC_PROMQL_RANGE_QUERY: &str = "grpc.promql.range_query";
|
||||
pub const METRIC_GRPC_INSERT: &str = "grpc.insert";
|
||||
pub const METRIC_GRPC_DELETE: &str = "grpc.delete";
|
||||
pub const METRIC_GRPC_SQL: &str = "grpc.sql";
|
||||
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
||||
pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
|
||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
||||
@@ -36,6 +36,7 @@ query = { path = "../query" }
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
|
||||
@@ -18,16 +18,17 @@ use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::options::{Options, TopLevelOptions};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(name = "greptimedb", version = print_version())]
|
||||
struct Command {
|
||||
#[clap(long, default_value = "/tmp/greptimedb/logs")]
|
||||
log_dir: String,
|
||||
#[clap(long, default_value = "info")]
|
||||
log_level: String,
|
||||
#[clap(long)]
|
||||
log_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
log_level: Option<String>,
|
||||
#[clap(subcommand)]
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
@@ -63,8 +64,20 @@ impl Application {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn build(self) -> Result<Application> {
|
||||
self.subcmd.build().await
|
||||
async fn build(self, opts: Options) -> Result<Application> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
fn load_options(&self) -> Result<Options> {
|
||||
let top_level_opts = self.top_level_options();
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
|
||||
fn top_level_options(&self) -> TopLevelOptions {
|
||||
TopLevelOptions {
|
||||
log_dir: self.log_dir.clone(),
|
||||
log_level: self.log_level.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,28 +96,40 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Application> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
async fn build(self, opts: Options) -> Result<Application> {
|
||||
match (self, opts) {
|
||||
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
|
||||
let app = cmd.build(*dn_opts).await?;
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
|
||||
let app = cmd.build(*fe_opts).await?;
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
SubCommand::Metasrv(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
|
||||
let app = cmd.build(*meta_opts).await?;
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
SubCommand::Standalone(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
|
||||
let app = cmd.build(opts.fe_opts, opts.dn_opts).await?;
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
SubCommand::Cli(cmd) => {
|
||||
(SubCommand::Cli(cmd), Options::Cli(_)) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Frontend(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Metasrv(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Standalone(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Cli(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,14 +169,15 @@ async fn main() -> Result<()> {
|
||||
// TODO(dennis):
|
||||
// 1. adds ip/port to app
|
||||
let app_name = &cmd.subcmd.to_string();
|
||||
let log_dir = &cmd.log_dir;
|
||||
let log_level = &cmd.log_level;
|
||||
|
||||
let opts = cmd.load_options()?;
|
||||
let logging_opts = opts.logging_options();
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts);
|
||||
|
||||
let mut app = cmd.build().await?;
|
||||
let mut app = cmd.build(opts).await?;
|
||||
|
||||
tokio::select! {
|
||||
result = app.run() => {
|
||||
|
||||
@@ -17,9 +17,11 @@ mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
pub use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
@@ -45,6 +47,17 @@ impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.cmd.build().await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut logging_opts = LoggingOptions::default();
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
logging_opts.dir = dir;
|
||||
}
|
||||
if let Some(level) = top_level_opts.log_level {
|
||||
logging_opts.level = level;
|
||||
}
|
||||
Ok(Options::Cli(Box::new(logging_opts)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -76,3 +89,46 @@ impl AttachCommand {
|
||||
Ok(Instance { repl })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
|
||||
assert_eq!("info", logging_opts.level);
|
||||
assert!(!logging_opts.enable_jaeger_tracing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
|
||||
assert_eq!("debug", logging_opts.level);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,14 +16,13 @@ use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{
|
||||
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
|
||||
};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -50,8 +49,12 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
pub async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,9 +64,15 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -87,53 +96,43 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
|
||||
let opts: DatanodeOptions = self.try_into()?;
|
||||
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
type Error = Error;
|
||||
fn try_from(cmd: StartCommand) -> Result<Self> {
|
||||
let mut opts: DatanodeOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: DatanodeOptions = if let Some(path) = &self.config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
DatanodeOptions::default()
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
if let Some(level) = top_level_opts.log_level {
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
if let Some(addr) = self.rpc_addr.clone() {
|
||||
opts.rpc_addr = addr;
|
||||
}
|
||||
|
||||
if cmd.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = cmd.rpc_hostname;
|
||||
if self.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
if let Some(addr) = self.mysql_addr.clone() {
|
||||
opts.mysql_addr = addr;
|
||||
}
|
||||
|
||||
if let Some(node_id) = cmd.node_id {
|
||||
if let Some(node_id) = self.node_id {
|
||||
opts.node_id = Some(node_id);
|
||||
}
|
||||
|
||||
if let Some(meta_addr) = cmd.metasrv_addr {
|
||||
if let Some(meta_addr) = self.metasrv_addr.clone() {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = meta_addr
|
||||
@@ -151,33 +150,38 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
.fail();
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
if let Some(data_dir) = self.data_dir.clone() {
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
if let Some(wal_dir) = self.wal_dir.clone() {
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
if let Some(http_addr) = self.http_addr.clone() {
|
||||
opts.http_opts.addr = http_addr
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
|
||||
// Disable dashboard in datanode.
|
||||
opts.http_opts.disable_dashboard = true;
|
||||
|
||||
Ok(opts)
|
||||
Ok(Options::Datanode(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -228,6 +232,10 @@ mod tests {
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
checkpoint_on_startup = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -235,7 +243,10 @@ mod tests {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let options: DatanodeOptions = cmd.try_into().unwrap();
|
||||
|
||||
let Options::Datanode(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(2, options.mysql_runtime_size);
|
||||
@@ -283,37 +294,60 @@ mod tests {
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
|
||||
assert_eq!("debug".to_string(), options.logging.level);
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
assert_eq!(
|
||||
Mode::Standalone,
|
||||
DatanodeOptions::try_from(StartCommand::default())
|
||||
.unwrap()
|
||||
.mode
|
||||
);
|
||||
if let Options::Datanode(opt) = StartCommand::default()
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Standalone, opt.mode)
|
||||
}
|
||||
|
||||
let mode = DatanodeOptions::try_from(StartCommand {
|
||||
if let Options::Datanode(opt) = (StartCommand {
|
||||
node_id: Some(42),
|
||||
metasrv_addr: Some("127.0.0.1:3002".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap()
|
||||
.mode;
|
||||
assert_matches!(mode, Mode::Distributed);
|
||||
{
|
||||
assert_eq!(Mode::Distributed, opt.mode)
|
||||
}
|
||||
|
||||
assert!(DatanodeOptions::try_from(StartCommand {
|
||||
assert!((StartCommand {
|
||||
metasrv_addr: Some("127.0.0.1:3002".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.is_err());
|
||||
|
||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||
DatanodeOptions::try_from(StartCommand {
|
||||
(StartCommand {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand::default();
|
||||
|
||||
let options = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let logging_opt = options.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,12 +26,12 @@ use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::{auth, Mode};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -61,8 +61,12 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
pub async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,9 +76,15 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -108,94 +118,74 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
user_provider: Option<String>,
|
||||
#[clap(long)]
|
||||
disable_dashboard: bool,
|
||||
disable_dashboard: Option<bool>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts, plugins)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
|
||||
let mut plugins = Plugins::new();
|
||||
|
||||
if let Some(provider) = user_provider {
|
||||
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
|
||||
plugins.insert::<UserProviderRef>(provider);
|
||||
}
|
||||
Ok(plugins)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(cmd: StartCommand) -> Result<Self> {
|
||||
let mut opts: FrontendOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: FrontendOptions = if let Some(path) = &self.config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
FrontendOptions::default()
|
||||
};
|
||||
|
||||
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
|
||||
|
||||
let mut http_options = HttpOptions {
|
||||
disable_dashboard: cmd.disable_dashboard,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.http_addr {
|
||||
http_options.addr = addr;
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
if let Some(level) = top_level_opts.log_level {
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
opts.http_options = Some(http_options);
|
||||
let tls_option = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(addr) = cmd.grpc_addr {
|
||||
if let Some(addr) = self.http_addr.clone() {
|
||||
opts.http_options.get_or_insert_with(Default::default).addr = addr;
|
||||
}
|
||||
|
||||
if let Some(disable_dashboard) = self.disable_dashboard {
|
||||
opts.http_options
|
||||
.get_or_insert_with(Default::default)
|
||||
.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
|
||||
if let Some(addr) = self.grpc_addr.clone() {
|
||||
opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
if let Some(addr) = self.mysql_addr.clone() {
|
||||
opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
tls: tls_option.clone(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
if let Some(addr) = self.prom_addr.clone() {
|
||||
opts.prom_options = Some(PromOptions { addr });
|
||||
}
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
if let Some(addr) = self.postgres_addr.clone() {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
tls: tls_option,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.opentsdb_addr {
|
||||
if let Some(addr) = self.opentsdb_addr.clone() {
|
||||
opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(enable) = cmd.influxdb_enable {
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = cmd.metasrv_addr {
|
||||
if let Some(metasrv_addr) = self.metasrv_addr.clone() {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
@@ -205,8 +195,34 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
.collect::<Vec<_>>();
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
Ok(opts)
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
|
||||
let plugins = Plugins::new();
|
||||
|
||||
if let Some(provider) = user_provider {
|
||||
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
|
||||
plugins.insert::<UserProviderRef>(provider);
|
||||
}
|
||||
Ok(plugins)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -235,10 +251,12 @@ mod tests {
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: false,
|
||||
disable_dashboard: Some(false),
|
||||
};
|
||||
|
||||
let opts: FrontendOptions = command.try_into().unwrap();
|
||||
let Options::Frontend(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
|
||||
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
|
||||
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
|
||||
assert_eq!(
|
||||
@@ -281,6 +299,10 @@ mod tests {
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -298,10 +320,11 @@ mod tests {
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: false,
|
||||
disable_dashboard: Some(false),
|
||||
};
|
||||
|
||||
let fe_opts = FrontendOptions::try_from(command).unwrap();
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
@@ -311,6 +334,9 @@ mod tests {
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
|
||||
assert_eq!("debug".to_string(), fe_opts.logging.level);
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -329,7 +355,7 @@ mod tests {
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
disable_dashboard: false,
|
||||
disable_dashboard: Some(false),
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
@@ -340,8 +366,42 @@ mod tests {
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(
|
||||
Identity::UserId("test", None),
|
||||
Password::PlainText("test".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: None,
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: Some(false),
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let logging_opt = options.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,5 +19,6 @@ pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod frontend;
|
||||
pub mod metasrv;
|
||||
pub mod options;
|
||||
pub mod standalone;
|
||||
mod toml_loader;
|
||||
|
||||
@@ -15,12 +15,13 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use common_telemetry::logging;
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::Result;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -50,8 +51,12 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
pub async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,9 +66,15 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -89,62 +100,62 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
let opts: MetaSrvOptions = self.try_into()?;
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(cmd: StartCommand) -> Result<Self> {
|
||||
let mut opts: MetaSrvOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: MetaSrvOptions = if let Some(path) = &self.config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
MetaSrvOptions::default()
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.bind_addr {
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
if let Some(level) = top_level_opts.log_level {
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
if let Some(addr) = self.bind_addr.clone() {
|
||||
opts.bind_addr = addr;
|
||||
}
|
||||
if let Some(addr) = cmd.server_addr {
|
||||
if let Some(addr) = self.server_addr.clone() {
|
||||
opts.server_addr = addr;
|
||||
}
|
||||
if let Some(addr) = cmd.store_addr {
|
||||
if let Some(addr) = self.store_addr.clone() {
|
||||
opts.store_addr = addr;
|
||||
}
|
||||
if let Some(selector_type) = &cmd.selector {
|
||||
if let Some(selector_type) = &self.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
info!("Using {} selector", selector_type);
|
||||
}
|
||||
|
||||
if cmd.use_memory_store {
|
||||
warn!("Using memory store for Meta. Make sure you are in running tests.");
|
||||
if self.use_memory_store {
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
if let Some(http_addr) = self.http_addr.clone() {
|
||||
opts.http_opts.addr = http_addr;
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http_opts.disable_dashboard = true;
|
||||
|
||||
Ok(opts)
|
||||
Ok(Options::Metasrv(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,9 +180,10 @@ mod tests {
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
@@ -186,6 +198,10 @@ mod tests {
|
||||
datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
use_memory_store = false
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -199,11 +215,40 @@ mod tests {
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(15, options.datanode_lease_secs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug".to_string(), options.logging.level);
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let logging_opt = options.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level);
|
||||
}
|
||||
}
|
||||
|
||||
49
src/cmd/src/options.rs
Normal file
49
src/cmd/src/options.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::datanode::DatanodeOptions;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
|
||||
pub struct MixOptions {
|
||||
pub fe_opts: FrontendOptions,
|
||||
pub dn_opts: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
}
|
||||
|
||||
pub enum Options {
|
||||
Datanode(Box<DatanodeOptions>),
|
||||
Frontend(Box<FrontendOptions>),
|
||||
Metasrv(Box<MetaSrvOptions>),
|
||||
Standalone(Box<MixOptions>),
|
||||
Cli(Box<LoggingOptions>),
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn logging_options(&self) -> &LoggingOptions {
|
||||
match self {
|
||||
Options::Datanode(opts) => &opts.logging,
|
||||
Options::Frontend(opts) => &opts.logging,
|
||||
Options::Metasrv(opts) => &opts.logging,
|
||||
Options::Standalone(opts) => &opts.logging,
|
||||
Options::Cli(opts) => opts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TopLevelOptions {
|
||||
pub log_dir: Option<String>,
|
||||
pub log_level: Option<String>,
|
||||
}
|
||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
@@ -35,10 +36,11 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
|
||||
StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -48,8 +50,16 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
pub async fn build(
|
||||
self,
|
||||
fe_opts: FrontendOptions,
|
||||
dn_opts: DatanodeOptions,
|
||||
) -> Result<Instance> {
|
||||
self.subcmd.build(fe_opts, dn_opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_options)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,9 +69,15 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
SubCommand::Start(cmd) => cmd.build(fe_opts, dn_opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -81,7 +97,8 @@ pub struct StandaloneOptions {
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub logging: LoggingOptions,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -99,7 +116,8 @@ impl Default for StandaloneOptions {
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: None,
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -117,6 +135,7 @@ impl StandaloneOptions {
|
||||
prometheus_options: self.prometheus_options,
|
||||
prom_options: self.prom_options,
|
||||
meta_client_options: None,
|
||||
logging: self.logging,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,21 +215,108 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let fe_opts = FrontendOptions::try_from(self)?;
|
||||
let dn_opts: DatanodeOptions = {
|
||||
let mut opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
opts.enable_memory_catalog = enable_memory_catalog;
|
||||
opts.datanode_options()
|
||||
let config_file = &self.config_file;
|
||||
let mut opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
|
||||
opts.enable_memory_catalog = enable_memory_catalog;
|
||||
|
||||
let mut fe_opts = opts.clone().frontend_options();
|
||||
let mut logging = opts.logging.clone();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
if let Some(dir) = top_level_options.log_dir {
|
||||
logging.dir = dir;
|
||||
}
|
||||
if let Some(level) = top_level_options.log_level {
|
||||
logging.level = level;
|
||||
}
|
||||
|
||||
fe_opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(addr) = self.http_addr.clone() {
|
||||
fe_opts.http_options = Some(HttpOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = self.rpc_addr.clone() {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
if addr == datanode_grpc_addr {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
fe_opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = self.mysql_addr.clone() {
|
||||
fe_opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = self.prom_addr.clone() {
|
||||
fe_opts.prom_options = Some(PromOptions { addr })
|
||||
}
|
||||
|
||||
if let Some(addr) = self.postgres_addr.clone() {
|
||||
fe_opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = self.opentsdb_addr.clone() {
|
||||
fe_opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
fe_opts.influxdb_options = Some(InfluxdbOptions { enable: true });
|
||||
}
|
||||
|
||||
let tls_option = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(mut mysql_options) = fe_opts.mysql_options {
|
||||
mysql_options.tls = tls_option.clone();
|
||||
fe_opts.mysql_options = Some(mysql_options);
|
||||
}
|
||||
|
||||
if let Some(mut postgres_options) = fe_opts.postgres_options {
|
||||
postgres_options.tls = tls_option;
|
||||
fe_opts.postgres_options = Some(postgres_options);
|
||||
}
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
fe_opts,
|
||||
dn_opts,
|
||||
logging,
|
||||
})))
|
||||
}
|
||||
|
||||
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
|
||||
info!(
|
||||
"Standalone frontend options: {:#?}, datanode options: {:#?}",
|
||||
fe_opts, dn_opts
|
||||
@@ -223,7 +329,7 @@ impl StartCommand {
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts, plugins)
|
||||
.build_servers(&fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -243,143 +349,17 @@ async fn build_frontend(
|
||||
Ok(frontend_instance)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(cmd: StartCommand) -> std::result::Result<Self, Self::Error> {
|
||||
let opts: StandaloneOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
|
||||
let mut opts = opts.frontend_options();
|
||||
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(addr) = cmd.http_addr {
|
||||
opts.http_options = Some(HttpOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
if addr == datanode_grpc_addr {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr })
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.opentsdb_addr {
|
||||
opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if cmd.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
|
||||
}
|
||||
|
||||
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
|
||||
|
||||
if let Some(mut mysql_options) = opts.mysql_options {
|
||||
mysql_options.tls = tls_option.clone();
|
||||
opts.mysql_options = Some(mysql_options);
|
||||
}
|
||||
|
||||
if let Some(mut postgres_options) = opts.postgres_options {
|
||||
postgres_options.tls = tls_option;
|
||||
opts.postgres_options = Some(postgres_options);
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_read_config_file() {
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/standalone.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
};
|
||||
|
||||
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
fe_opts.http_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4001".to_string(),
|
||||
fe_opts.grpc_options.unwrap().addr
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4002",
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
|
||||
assert_eq!(
|
||||
None,
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
@@ -405,7 +385,10 @@ mod tests {
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.authenticate(
|
||||
Identity::UserId("test", None),
|
||||
Password::PlainText("test".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
@@ -416,4 +399,136 @@ mod tests {
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
|
||||
enable_memory_catalog = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "S3"
|
||||
access_key_id = "access_key_id"
|
||||
secret_access_key = "secret_access_key"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
checkpoint_on_startup = true
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
};
|
||||
|
||||
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
let fe_opts = options.fe_opts;
|
||||
let dn_opts = options.dn_opts;
|
||||
let logging_opts = options.logging;
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
fe_opts.http_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4001".to_string(),
|
||||
fe_opts.grpc_options.unwrap().addr
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4002",
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr
|
||||
);
|
||||
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
|
||||
assert_eq!(
|
||||
None,
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir);
|
||||
match &dn_opts.storage.store {
|
||||
datanode::datanode::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
||||
format!("{:?}", s3_config.access_key_id)
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!("debug".to_string(), logging_opts.level);
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: None,
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,6 +92,14 @@ impl StringBytes {
|
||||
pub fn as_utf8(&self) -> &str {
|
||||
unsafe { std::str::from_utf8_unchecked(&self.0) }
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for StringBytes {
|
||||
@@ -178,6 +186,17 @@ mod tests {
|
||||
assert_eq!(world, &bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_len() {
|
||||
let hello = b"hello".to_vec();
|
||||
let bytes = Bytes::from(hello.clone());
|
||||
assert_eq!(bytes.len(), hello.len());
|
||||
|
||||
let zero = b"".to_vec();
|
||||
let bytes = Bytes::from(zero);
|
||||
assert!(bytes.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_bytes_from() {
|
||||
let hello = "hello".to_string();
|
||||
@@ -191,6 +210,17 @@ mod tests {
|
||||
assert_eq!(&bytes, world);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_bytes_len() {
|
||||
let hello = "hello".to_string();
|
||||
let bytes = StringBytes::from(hello.clone());
|
||||
assert_eq!(bytes.len(), hello.len());
|
||||
|
||||
let zero = "".to_string();
|
||||
let bytes = StringBytes::from(zero);
|
||||
assert!(bytes.is_empty());
|
||||
}
|
||||
|
||||
fn check_str(expect: &str, given: &str) {
|
||||
assert_eq!(expect, given);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,60 @@ pub mod bytes;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
|
||||
use core::any::Any;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
pub type Plugins = anymap::Map<dyn core::any::Any + Send + Sync>;
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Plugins {
|
||||
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
|
||||
}
|
||||
|
||||
impl Plugins {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(anymap::Map::new())),
|
||||
}
|
||||
}
|
||||
|
||||
fn lock(&self) -> MutexGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||
self.inner.lock().unwrap()
|
||||
}
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
self.lock().insert(value);
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
let binding = self.lock();
|
||||
binding.get::<T>().cloned()
|
||||
}
|
||||
|
||||
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&mut T>) -> R,
|
||||
{
|
||||
let mut binding = self.lock();
|
||||
let opt = binding.get_mut::<T>();
|
||||
mapper(opt)
|
||||
}
|
||||
|
||||
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&T) -> R,
|
||||
{
|
||||
let binding = self.lock();
|
||||
binding.get::<T>().map(mapper)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
let binding = self.lock();
|
||||
binding.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
let binding = self.lock();
|
||||
binding.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
|
||||
/// User defined table id starts from this value.
|
||||
pub const MIN_USER_TABLE_ID: u32 = 1024;
|
||||
/// the max system table id
|
||||
pub const MAX_SYS_TABLE_ID: u32 = MIN_USER_TABLE_ID - 1;
|
||||
/// system_catalog table id
|
||||
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
|
||||
/// scripts table id
|
||||
@@ -28,3 +30,7 @@ pub const SCRIPTS_TABLE_ID: u32 = 1;
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
|
||||
|
||||
pub const SEMANTIC_TYPE_PRIMARY_KEY: &str = "PRIMARY KEY";
|
||||
pub const SEMANTIC_TYPE_FIELD: &str = "FIELD";
|
||||
pub const SEMANTIC_TYPE_TIME_INDEX: &str = "TIME INDEX";
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use consts::DEFAULT_CATALOG_NAME;
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
@@ -20,3 +22,23 @@ pub mod error;
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
/// Build db name from catalog and schema string
|
||||
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
schema.to_string()
|
||||
} else {
|
||||
format!("{catalog}-{schema}")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_db_string() {
|
||||
assert_eq!("test", build_db_string(DEFAULT_CATALOG_NAME, "test"));
|
||||
assert_eq!("a0b1c2d3-test", build_db_string("a0b1c2d3", "test"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,9 +16,12 @@ async-compression = { version = "0.3", features = [
|
||||
"tokio",
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-base = { path = "../base" }
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
datafusion.workspace = true
|
||||
derive_builder = "0.12"
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
regex = "1.7"
|
||||
@@ -26,3 +29,6 @@ snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
url = "2.3"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../test-util" }
|
||||
|
||||
138
src/common/datasource/src/buffered_writer.rs
Normal file
138
src/common/datasource/src/buffered_writer.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use async_trait::async_trait;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use object_store::Writer;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||
use tokio_util::compat::Compat;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
pub struct BufferedWriter<T, U> {
|
||||
writer: T,
|
||||
/// None stands for [`BufferedWriter`] closed.
|
||||
encoder: Option<U>,
|
||||
buffer: SharedBuffer,
|
||||
bytes_written: u64,
|
||||
flushed: bool,
|
||||
threshold: usize,
|
||||
}
|
||||
|
||||
pub trait DfRecordBatchEncoder {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ArrowWriterCloser {
|
||||
async fn close(mut self) -> Result<FileMetaData>;
|
||||
}
|
||||
|
||||
pub type DefaultBufferedWriter<E> = BufferedWriter<Compat<Writer>, E>;
|
||||
|
||||
impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder + ArrowWriterCloser>
|
||||
BufferedWriter<T, U>
|
||||
{
|
||||
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.take()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
let metadata = encoder.close().await?;
|
||||
let written = self.try_flush(true).await?;
|
||||
// It's important to shut down! flushes all pending writes
|
||||
self.close().await?;
|
||||
Ok((metadata, written))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncWrite + Send + Unpin, U: DfRecordBatchEncoder> BufferedWriter<T, U> {
|
||||
pub async fn close(&mut self) -> Result<()> {
|
||||
self.writer.shutdown().await.context(error::AsyncWriteSnafu)
|
||||
}
|
||||
|
||||
pub fn new(threshold: usize, buffer: SharedBuffer, encoder: U, writer: T) -> Self {
|
||||
Self {
|
||||
threshold,
|
||||
writer,
|
||||
encoder: Some(encoder),
|
||||
buffer,
|
||||
bytes_written: 0,
|
||||
flushed: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes_written(&self) -> u64 {
|
||||
self.bytes_written
|
||||
}
|
||||
|
||||
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.as_mut()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
encoder.write(batch)?;
|
||||
self.try_flush(false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flushed(&self) -> bool {
|
||||
self.flushed
|
||||
}
|
||||
|
||||
pub async fn try_flush(&mut self, all: bool) -> Result<u64> {
|
||||
let mut bytes_written: u64 = 0;
|
||||
|
||||
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
|
||||
// and write to underlying storage.
|
||||
while self.buffer.buffer.lock().unwrap().len() >= self.threshold {
|
||||
let chunk = {
|
||||
let mut buffer = self.buffer.buffer.lock().unwrap();
|
||||
buffer.split_to(self.threshold)
|
||||
};
|
||||
let size = chunk.len();
|
||||
|
||||
self.writer
|
||||
.write_all(&chunk)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
|
||||
bytes_written += size as u64;
|
||||
}
|
||||
|
||||
if all {
|
||||
bytes_written += self.try_flush_all().await?;
|
||||
}
|
||||
|
||||
self.flushed = bytes_written > 0;
|
||||
self.bytes_written += bytes_written;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
|
||||
async fn try_flush_all(&mut self) -> Result<u64> {
|
||||
let remain = self.buffer.buffer.lock().unwrap().split();
|
||||
let size = remain.len();
|
||||
|
||||
self.writer
|
||||
.write_all(&remain)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
|
||||
Ok(size as u64)
|
||||
}
|
||||
}
|
||||
@@ -13,24 +13,28 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::io;
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
|
||||
use bytes::Bytes;
|
||||
use futures::Stream;
|
||||
use tokio::io::{AsyncRead, BufReader};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum CompressionType {
|
||||
/// Gzip-ed file
|
||||
GZIP,
|
||||
Gzip,
|
||||
/// Bzip2-ed file
|
||||
BZIP2,
|
||||
Bzip2,
|
||||
/// Xz-ed file (liblzma)
|
||||
XZ,
|
||||
Xz,
|
||||
/// Zstd-ed file,
|
||||
ZSTD,
|
||||
Zstd,
|
||||
/// Uncompressed file
|
||||
UNCOMPRESSED,
|
||||
Uncompressed,
|
||||
}
|
||||
|
||||
impl FromStr for CompressionType {
|
||||
@@ -39,11 +43,11 @@ impl FromStr for CompressionType {
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
let s = s.to_uppercase();
|
||||
match s.as_str() {
|
||||
"GZIP" | "GZ" => Ok(Self::GZIP),
|
||||
"BZIP2" | "BZ2" => Ok(Self::BZIP2),
|
||||
"XZ" => Ok(Self::XZ),
|
||||
"ZST" | "ZSTD" => Ok(Self::ZSTD),
|
||||
"" => Ok(Self::UNCOMPRESSED),
|
||||
"GZIP" | "GZ" => Ok(Self::Gzip),
|
||||
"BZIP2" | "BZ2" => Ok(Self::Bzip2),
|
||||
"XZ" => Ok(Self::Xz),
|
||||
"ZST" | "ZSTD" => Ok(Self::Zstd),
|
||||
"" => Ok(Self::Uncompressed),
|
||||
_ => error::UnsupportedCompressionTypeSnafu {
|
||||
compression_type: s,
|
||||
}
|
||||
@@ -55,18 +59,18 @@ impl FromStr for CompressionType {
|
||||
impl Display for CompressionType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(match self {
|
||||
Self::GZIP => "GZIP",
|
||||
Self::BZIP2 => "BZIP2",
|
||||
Self::XZ => "XZ",
|
||||
Self::ZSTD => "ZSTD",
|
||||
Self::UNCOMPRESSED => "",
|
||||
Self::Gzip => "GZIP",
|
||||
Self::Bzip2 => "BZIP2",
|
||||
Self::Xz => "XZ",
|
||||
Self::Zstd => "ZSTD",
|
||||
Self::Uncompressed => "",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl CompressionType {
|
||||
pub const fn is_compressed(&self) -> bool {
|
||||
!matches!(self, &Self::UNCOMPRESSED)
|
||||
!matches!(self, &Self::Uncompressed)
|
||||
}
|
||||
|
||||
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
|
||||
@@ -74,11 +78,32 @@ impl CompressionType {
|
||||
s: T,
|
||||
) -> Box<dyn AsyncRead + Unpin + Send> {
|
||||
match self {
|
||||
CompressionType::GZIP => Box::new(GzipDecoder::new(BufReader::new(s))),
|
||||
CompressionType::BZIP2 => Box::new(BzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::XZ => Box::new(XzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::ZSTD => Box::new(ZstdDecoder::new(BufReader::new(s))),
|
||||
CompressionType::UNCOMPRESSED => Box::new(s),
|
||||
CompressionType::Gzip => Box::new(GzipDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Bzip2 => Box::new(BzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Xz => Box::new(XzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Zstd => Box::new(ZstdDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
|
||||
match self {
|
||||
CompressionType::Gzip => {
|
||||
Box::new(ReaderStream::new(GzipDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Bzip2 => {
|
||||
Box::new(ReaderStream::new(BzDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Xz => {
|
||||
Box::new(ReaderStream::new(XzDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Zstd => {
|
||||
Box::new(ReaderStream::new(ZstdDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,9 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::prelude::*;
|
||||
use datafusion::parquet::errors::ParquetError;
|
||||
use snafu::Location;
|
||||
use url::ParseError;
|
||||
|
||||
@@ -22,19 +24,32 @@ use url::ParseError;
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
||||
UnsupportedCompressionType { compression_type: String },
|
||||
UnsupportedCompressionType {
|
||||
compression_type: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
UnsupportedBackendProtocol { protocol: String },
|
||||
UnsupportedBackendProtocol {
|
||||
protocol: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported format protocol: {}", format))]
|
||||
UnsupportedFormat { format: String, location: Location },
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String },
|
||||
EmptyHostPath { url: String, location: Location },
|
||||
|
||||
#[snafu(display("Invalid path: {}", path))]
|
||||
InvalidPath { path: String },
|
||||
InvalidPath { path: String, location: Location },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
InvalidUrl { url: String, source: ParseError },
|
||||
InvalidUrl {
|
||||
url: String,
|
||||
source: ParseError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decompression, source: {}", source))]
|
||||
Decompression {
|
||||
@@ -55,6 +70,37 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write object to path: {}, source: {}", path, source))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write: {}", source))]
|
||||
AsyncWrite {
|
||||
source: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write record batch: {}", source))]
|
||||
WriteRecordBatch {
|
||||
location: Location,
|
||||
source: ArrowError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode record batch: {}", source))]
|
||||
EncodeRecordBatch {
|
||||
location: Location,
|
||||
source: ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read record batch: {}", source))]
|
||||
ReadRecordBatch {
|
||||
location: Location,
|
||||
source: datafusion::error::DataFusionError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read parquet source: {}", source))]
|
||||
ReadParquetSnafu {
|
||||
location: Location,
|
||||
@@ -67,9 +113,8 @@ pub enum Error {
|
||||
source: datafusion::parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to infer schema from file: {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to infer schema from file, source: {}", source))]
|
||||
InferSchema {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: arrow_schema::ArrowError,
|
||||
},
|
||||
@@ -82,13 +127,32 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String },
|
||||
InvalidConnection { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to join handle: {}", source))]
|
||||
JoinHandle {
|
||||
location: Location,
|
||||
source: tokio::task::JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
||||
ParseFormat {
|
||||
key: &'static str,
|
||||
value: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to merge schema: {}", source))]
|
||||
MergeSchema {
|
||||
source: arrow_schema::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field: {}", name))]
|
||||
MissingRequiredField { name: String, location: Location },
|
||||
|
||||
#[snafu(display("Buffered writer closed"))]
|
||||
BufferedWriterClosed { location: Location },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -97,21 +161,32 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
BuildBackend { .. } | ListObjects { .. } | ReadObject { .. } => {
|
||||
StatusCode::StorageUnavailable
|
||||
}
|
||||
BuildBackend { .. }
|
||||
| ListObjects { .. }
|
||||
| ReadObject { .. }
|
||||
| WriteObject { .. }
|
||||
| AsyncWrite { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| UnsupportedCompressionType { .. }
|
||||
| UnsupportedFormat { .. }
|
||||
| InvalidConnection { .. }
|
||||
| InvalidUrl { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. }
|
||||
| InferSchema { .. }
|
||||
| ReadParquetSnafu { .. }
|
||||
| ParquetToSchema { .. } => StatusCode::InvalidArguments,
|
||||
| ParquetToSchema { .. }
|
||||
| ParseFormat { .. }
|
||||
| MergeSchema { .. }
|
||||
| MissingRequiredField { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Decompression { .. } | JoinHandle { .. } => StatusCode::Unexpected,
|
||||
Decompression { .. }
|
||||
| JoinHandle { .. }
|
||||
| ReadRecordBatch { .. }
|
||||
| WriteRecordBatch { .. }
|
||||
| EncodeRecordBatch { .. }
|
||||
| BufferedWriterClosed { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,13 +205,23 @@ impl ErrorExt for Error {
|
||||
ParquetToSchema { location, .. } => Some(*location),
|
||||
Decompression { location, .. } => Some(*location),
|
||||
JoinHandle { location, .. } => Some(*location),
|
||||
ParseFormat { location, .. } => Some(*location),
|
||||
MergeSchema { location, .. } => Some(*location),
|
||||
MissingRequiredField { location, .. } => Some(*location),
|
||||
WriteObject { location, .. } => Some(*location),
|
||||
ReadRecordBatch { location, .. } => Some(*location),
|
||||
WriteRecordBatch { location, .. } => Some(*location),
|
||||
AsyncWrite { location, .. } => Some(*location),
|
||||
EncodeRecordBatch { location, .. } => Some(*location),
|
||||
BufferedWriterClosed { location, .. } => Some(*location),
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. }
|
||||
| InvalidUrl { .. }
|
||||
| InvalidConnection { .. }
|
||||
| UnsupportedCompressionType { .. } => None,
|
||||
UnsupportedBackendProtocol { location, .. } => Some(*location),
|
||||
EmptyHostPath { location, .. } => Some(*location),
|
||||
InvalidPath { location, .. } => Some(*location),
|
||||
InvalidUrl { location, .. } => Some(*location),
|
||||
InvalidConnection { location, .. } => Some(*location),
|
||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
||||
UnsupportedFormat { location, .. } => Some(*location),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,16 +15,193 @@
|
||||
pub mod csv;
|
||||
pub mod json;
|
||||
pub mod parquet;
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
pub const DEFAULT_SCHEMA_INFER_MAX_RECORD: usize = 1000;
|
||||
|
||||
use arrow::datatypes::SchemaRef;
|
||||
use async_trait::async_trait;
|
||||
use object_store::ObjectStore;
|
||||
use std::collections::HashMap;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::task::Poll;
|
||||
|
||||
use crate::error::Result;
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::{ArrowError, Schema as ArrowSchema};
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes};
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::physical_plan::file_format::FileOpenFuture;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use futures::StreamExt;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
||||
|
||||
use self::csv::CsvFormat;
|
||||
use self::json::JsonFormat;
|
||||
use self::parquet::ParquetFormat;
|
||||
use crate::buffered_writer::{BufferedWriter, DfRecordBatchEncoder};
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
|
||||
pub const FORMAT_DELIMITER: &str = "delimiter";
|
||||
pub const FORMAT_SCHEMA_INFER_MAX_RECORD: &str = "schema_infer_max_record";
|
||||
pub const FORMAT_HAS_HEADER: &str = "has_header";
|
||||
pub const FORMAT_TYPE: &str = "format";
|
||||
pub const FILE_PATTERN: &str = "pattern";
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Format {
|
||||
Csv(CsvFormat),
|
||||
Json(JsonFormat),
|
||||
Parquet(ParquetFormat),
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for Format {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(options: &HashMap<String, String>) -> Result<Self> {
|
||||
let format = options
|
||||
.get(FORMAT_TYPE)
|
||||
.map(|format| format.to_ascii_uppercase())
|
||||
.unwrap_or_else(|| "PARQUET".to_string());
|
||||
|
||||
match format.as_str() {
|
||||
"CSV" => Ok(Self::Csv(CsvFormat::try_from(options)?)),
|
||||
"JSON" => Ok(Self::Json(JsonFormat::try_from(options)?)),
|
||||
"PARQUET" => Ok(Self::Parquet(ParquetFormat::default())),
|
||||
_ => error::UnsupportedFormatSnafu { format: &format }.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait FileFormat: Send + Sync + std::fmt::Debug {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef>;
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<ArrowSchema>;
|
||||
}
|
||||
|
||||
pub trait ArrowDecoder: Send + 'static {
|
||||
/// Decode records from `buf` returning the number of bytes read.
|
||||
///
|
||||
/// This method returns `Ok(0)` once `batch_size` objects have been parsed since the
|
||||
/// last call to [`Self::flush`], or `buf` is exhausted.
|
||||
///
|
||||
/// Any remaining bytes should be included in the next call to [`Self::decode`].
|
||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError>;
|
||||
|
||||
/// Flushes the currently buffered data to a [`RecordBatch`].
|
||||
///
|
||||
/// This should only be called after [`Self::decode`] has returned `Ok(0)`,
|
||||
/// otherwise may return an error if part way through decoding a record
|
||||
///
|
||||
/// Returns `Ok(None)` if no buffered data.
|
||||
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError>;
|
||||
}
|
||||
|
||||
impl ArrowDecoder for arrow::csv::reader::Decoder {
|
||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
|
||||
self.decode(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl ArrowDecoder for arrow::json::RawDecoder {
|
||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
|
||||
self.decode(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
||||
object_store: Arc<ObjectStore>,
|
||||
path: String,
|
||||
compression_type: CompressionType,
|
||||
decoder_factory: F,
|
||||
) -> DataFusionResult<FileOpenFuture> {
|
||||
let mut decoder = decoder_factory()?;
|
||||
Ok(Box::pin(async move {
|
||||
let reader = object_store
|
||||
.reader(&path)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let mut upstream = compression_type.convert_stream(reader).fuse();
|
||||
|
||||
let mut buffered = Bytes::new();
|
||||
|
||||
let stream = futures::stream::poll_fn(move |cx| {
|
||||
loop {
|
||||
if buffered.is_empty() {
|
||||
if let Some(result) = futures::ready!(upstream.poll_next_unpin(cx)) {
|
||||
buffered = result?;
|
||||
};
|
||||
}
|
||||
|
||||
let decoded = decoder.decode(buffered.as_ref())?;
|
||||
|
||||
if decoded == 0 {
|
||||
break;
|
||||
} else {
|
||||
buffered.advance(decoded);
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(decoder.flush().transpose())
|
||||
});
|
||||
|
||||
Ok(stream.boxed())
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn infer_schemas(
|
||||
store: &ObjectStore,
|
||||
files: &[String],
|
||||
file_format: &dyn FileFormat,
|
||||
) -> Result<ArrowSchema> {
|
||||
let mut schemas = Vec::with_capacity(files.len());
|
||||
for file in files {
|
||||
schemas.push(file_format.infer_schema(store, file).await?)
|
||||
}
|
||||
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
|
||||
}
|
||||
|
||||
pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
encoder_factory: U,
|
||||
) -> Result<usize> {
|
||||
let writer = store
|
||||
.writer(path)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path })?
|
||||
.compat_write();
|
||||
|
||||
let buffer = SharedBuffer::with_capacity(threshold);
|
||||
let encoder = encoder_factory(buffer.clone());
|
||||
let mut writer = BufferedWriter::new(threshold, buffer, encoder, writer);
|
||||
|
||||
let mut rows = 0;
|
||||
|
||||
while let Some(batch) = stream.next().await {
|
||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||
writer.write(&batch).await?;
|
||||
rows += batch.num_rows();
|
||||
}
|
||||
|
||||
// Flushes all pending writes
|
||||
writer.try_flush(true).await?;
|
||||
|
||||
writer.close().await?;
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
@@ -12,21 +12,32 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::csv;
|
||||
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
|
||||
use arrow_schema::SchemaRef;
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use common_runtime;
|
||||
use datafusion::error::Result as DataFusionResult;
|
||||
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use derive_builder::Builder;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::io::SyncIoBridge;
|
||||
|
||||
use super::stream_to_file;
|
||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::{self, FileFormat};
|
||||
use crate::file_format::{self, open_with_decoder, FileFormat};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct CsvFormat {
|
||||
pub has_header: bool,
|
||||
pub delimiter: u8,
|
||||
@@ -34,24 +45,128 @@ pub struct CsvFormat {
|
||||
pub compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for CsvFormat {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: &HashMap<String, String>) -> Result<Self> {
|
||||
let mut format = CsvFormat::default();
|
||||
if let Some(delimiter) = value.get(file_format::FORMAT_DELIMITER) {
|
||||
// TODO(weny): considers to support parse like "\t" (not only b'\t')
|
||||
format.delimiter = u8::from_str(delimiter).map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_DELIMITER,
|
||||
value: delimiter,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
};
|
||||
if let Some(compression_type) = value.get(file_format::FORMAT_COMPRESSION_TYPE) {
|
||||
format.compression_type = CompressionType::from_str(compression_type)?;
|
||||
};
|
||||
if let Some(schema_infer_max_record) =
|
||||
value.get(file_format::FORMAT_SCHEMA_INFER_MAX_RECORD)
|
||||
{
|
||||
format.schema_infer_max_record =
|
||||
Some(schema_infer_max_record.parse::<usize>().map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
value: schema_infer_max_record,
|
||||
}
|
||||
.build()
|
||||
})?);
|
||||
};
|
||||
if let Some(has_header) = value.get(file_format::FORMAT_HAS_HEADER) {
|
||||
format.has_header = has_header.parse().map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_HAS_HEADER,
|
||||
value: has_header,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
}
|
||||
Ok(format)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CsvFormat {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
has_header: true,
|
||||
delimiter: b',',
|
||||
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
|
||||
compression_type: CompressionType::UNCOMPRESSED,
|
||||
compression_type: CompressionType::Uncompressed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Builder)]
|
||||
pub struct CsvConfig {
|
||||
batch_size: usize,
|
||||
file_schema: SchemaRef,
|
||||
#[builder(default = "None")]
|
||||
file_projection: Option<Vec<usize>>,
|
||||
#[builder(default = "true")]
|
||||
has_header: bool,
|
||||
#[builder(default = "b','")]
|
||||
delimiter: u8,
|
||||
}
|
||||
|
||||
impl CsvConfig {
|
||||
fn builder(&self) -> csv::ReaderBuilder {
|
||||
let mut builder = csv::ReaderBuilder::new()
|
||||
.with_schema(self.file_schema.clone())
|
||||
.with_delimiter(self.delimiter)
|
||||
.with_batch_size(self.batch_size)
|
||||
.has_header(self.has_header);
|
||||
|
||||
if let Some(proj) = &self.file_projection {
|
||||
builder = builder.with_projection(proj.clone());
|
||||
}
|
||||
|
||||
builder
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CsvOpener {
|
||||
config: Arc<CsvConfig>,
|
||||
object_store: Arc<ObjectStore>,
|
||||
compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl CsvOpener {
|
||||
/// Return a new [`CsvOpener`]. The caller must ensure [`CsvConfig`].file_schema must correspond to the opening file.
|
||||
pub fn new(
|
||||
config: CsvConfig,
|
||||
object_store: ObjectStore,
|
||||
compression_type: CompressionType,
|
||||
) -> Self {
|
||||
CsvOpener {
|
||||
config: Arc::new(config),
|
||||
object_store: Arc::new(object_store),
|
||||
compression_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FileOpener for CsvOpener {
|
||||
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
|
||||
open_with_decoder(
|
||||
self.object_store.clone(),
|
||||
meta.location().to_string(),
|
||||
self.compression_type,
|
||||
|| Ok(self.config.builder().build_decoder()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for CsvFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
let reader = store
|
||||
.reader(&path)
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path: &path })?;
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let decoded = self.compression_type.convert_async_read(reader);
|
||||
|
||||
@@ -64,20 +179,40 @@ impl FileFormat for CsvFormat {
|
||||
|
||||
let (schema, _records_read) =
|
||||
infer_csv_schema(reader, delimiter, schema_infer_max_record, has_header)
|
||||
.context(error::InferSchemaSnafu { path: &path })?;
|
||||
|
||||
Ok(Arc::new(schema))
|
||||
.context(error::InferSchemaSnafu)?;
|
||||
Ok(schema)
|
||||
})
|
||||
.await
|
||||
.context(error::JoinHandleSnafu)?
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stream_to_csv(
|
||||
stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
) -> Result<usize> {
|
||||
stream_to_file(stream, store, path, threshold, |buffer| {
|
||||
csv::Writer::new(buffer)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
self.write(batch).context(error::WriteRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::file_format::{
|
||||
FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
};
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -88,10 +223,7 @@ mod tests {
|
||||
async fn infer_schema_basic() {
|
||||
let csv = CsvFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = csv
|
||||
.infer_schema(&store, "simple.csv".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let schema = csv.infer_schema(&store, "simple.csv").await.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(
|
||||
@@ -122,7 +254,7 @@ mod tests {
|
||||
};
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "schema_infer_limit.csv".to_string())
|
||||
.infer_schema(&store, "schema_infer_limit.csv")
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
@@ -140,7 +272,7 @@ mod tests {
|
||||
let json = CsvFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "schema_infer_limit.csv".to_string())
|
||||
.infer_schema(&store, "schema_infer_limit.csv")
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
@@ -155,4 +287,33 @@ mod tests {
|
||||
formatted
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from() {
|
||||
let mut map = HashMap::new();
|
||||
let format: CsvFormat = CsvFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(format, CsvFormat::default());
|
||||
|
||||
map.insert(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
);
|
||||
|
||||
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
|
||||
map.insert(FORMAT_DELIMITER.to_string(), b'\t'.to_string());
|
||||
map.insert(FORMAT_HAS_HEADER.to_string(), "false".to_string());
|
||||
|
||||
let format = CsvFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
format,
|
||||
CsvFormat {
|
||||
compression_type: CompressionType::Zstd,
|
||||
schema_infer_max_record: Some(2000),
|
||||
delimiter: b'\t',
|
||||
has_header: false,
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,43 +12,79 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io::BufReader;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::datatypes::SchemaRef;
|
||||
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
|
||||
use arrow::json::writer::LineDelimited;
|
||||
use arrow::json::{self, RawReaderBuilder};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::Schema;
|
||||
use async_trait::async_trait;
|
||||
use common_runtime;
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::io::SyncIoBridge;
|
||||
|
||||
use super::stream_to_file;
|
||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::{self, FileFormat};
|
||||
use crate::file_format::{self, open_with_decoder, FileFormat};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct JsonFormat {
|
||||
pub schema_infer_max_record: Option<usize>,
|
||||
pub compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for JsonFormat {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: &HashMap<String, String>) -> Result<Self> {
|
||||
let mut format = JsonFormat::default();
|
||||
if let Some(compression_type) = value.get(file_format::FORMAT_COMPRESSION_TYPE) {
|
||||
format.compression_type = CompressionType::from_str(compression_type)?
|
||||
};
|
||||
if let Some(schema_infer_max_record) =
|
||||
value.get(file_format::FORMAT_SCHEMA_INFER_MAX_RECORD)
|
||||
{
|
||||
format.schema_infer_max_record =
|
||||
Some(schema_infer_max_record.parse::<usize>().map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
value: schema_infer_max_record,
|
||||
}
|
||||
.build()
|
||||
})?);
|
||||
};
|
||||
Ok(format)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for JsonFormat {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
|
||||
compression_type: CompressionType::UNCOMPRESSED,
|
||||
compression_type: CompressionType::Uncompressed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for JsonFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
let reader = store
|
||||
.reader(&path)
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path: &path })?;
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let decoded = self.compression_type.convert_async_read(reader);
|
||||
|
||||
@@ -59,20 +95,79 @@ impl FileFormat for JsonFormat {
|
||||
|
||||
let iter = ValueIter::new(&mut reader, schema_infer_max_record);
|
||||
|
||||
let schema = infer_json_schema_from_iterator(iter)
|
||||
.context(error::InferSchemaSnafu { path: &path })?;
|
||||
let schema = infer_json_schema_from_iterator(iter).context(error::InferSchemaSnafu)?;
|
||||
|
||||
Ok(Arc::new(schema))
|
||||
Ok(schema)
|
||||
})
|
||||
.await
|
||||
.context(error::JoinHandleSnafu)?
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JsonOpener {
|
||||
batch_size: usize,
|
||||
projected_schema: SchemaRef,
|
||||
object_store: Arc<ObjectStore>,
|
||||
compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl JsonOpener {
|
||||
/// Return a new [`JsonOpener`]. Any fields not present in `projected_schema` will be ignored.
|
||||
pub fn new(
|
||||
batch_size: usize,
|
||||
projected_schema: SchemaRef,
|
||||
object_store: ObjectStore,
|
||||
compression_type: CompressionType,
|
||||
) -> Self {
|
||||
Self {
|
||||
batch_size,
|
||||
projected_schema,
|
||||
object_store: Arc::new(object_store),
|
||||
compression_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FileOpener for JsonOpener {
|
||||
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
|
||||
open_with_decoder(
|
||||
self.object_store.clone(),
|
||||
meta.location().to_string(),
|
||||
self.compression_type,
|
||||
|| {
|
||||
RawReaderBuilder::new(self.projected_schema.clone())
|
||||
.with_batch_size(self.batch_size)
|
||||
.build_decoder()
|
||||
.map_err(DataFusionError::from)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stream_to_json(
|
||||
stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
) -> Result<usize> {
|
||||
stream_to_file(stream, store, path, threshold, |buffer| {
|
||||
json::LineDelimitedWriter::new(buffer)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
self.write(batch.clone())
|
||||
.context(error::WriteRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD};
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -83,10 +178,7 @@ mod tests {
|
||||
async fn infer_schema_basic() {
|
||||
let json = JsonFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "simple.json".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let schema = json.infer_schema(&store, "simple.json").await.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(
|
||||
@@ -108,7 +200,7 @@ mod tests {
|
||||
};
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "schema_infer_limit.json".to_string())
|
||||
.infer_schema(&store, "schema_infer_limit.json")
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
@@ -118,4 +210,29 @@ mod tests {
|
||||
formatted
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from() {
|
||||
let mut map = HashMap::new();
|
||||
let format = JsonFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(format, JsonFormat::default());
|
||||
|
||||
map.insert(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
);
|
||||
|
||||
map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
|
||||
|
||||
let format = JsonFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
format,
|
||||
JsonFormat {
|
||||
compression_type: CompressionType::Zstd,
|
||||
schema_infer_max_record: Some(2000),
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,28 +12,39 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef;
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::Schema;
|
||||
use async_trait::async_trait;
|
||||
use datafusion::error::Result as DatafusionResult;
|
||||
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
|
||||
use datafusion::parquet::arrow::parquet_to_arrow_schema;
|
||||
use object_store::ObjectStore;
|
||||
use datafusion::parquet::arrow::{parquet_to_arrow_schema, ArrowWriter};
|
||||
use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
||||
use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use datafusion::physical_plan::file_format::{FileMeta, ParquetFileReaderFactory};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use futures::future::BoxFuture;
|
||||
use object_store::{ObjectStore, Reader};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct ParquetFormat {}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for ParquetFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
let mut reader = store
|
||||
.reader(&path)
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path: &path })?;
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let metadata = reader
|
||||
.get_metadata()
|
||||
@@ -47,14 +58,107 @@ impl FileFormat for ParquetFormat {
|
||||
)
|
||||
.context(error::ParquetToSchemaSnafu)?;
|
||||
|
||||
Ok(Arc::new(schema))
|
||||
Ok(schema)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DefaultParquetFileReaderFactory {
|
||||
object_store: ObjectStore,
|
||||
}
|
||||
|
||||
/// Returns a AsyncFileReader factory
|
||||
impl DefaultParquetFileReaderFactory {
|
||||
pub fn new(object_store: ObjectStore) -> Self {
|
||||
Self { object_store }
|
||||
}
|
||||
}
|
||||
|
||||
impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
|
||||
// TODO(weny): Supports [`metadata_size_hint`].
|
||||
// The upstream has a implementation supports [`metadata_size_hint`],
|
||||
// however it coupled with Box<dyn ObjectStore>.
|
||||
fn create_reader(
|
||||
&self,
|
||||
_partition_index: usize,
|
||||
file_meta: FileMeta,
|
||||
_metadata_size_hint: Option<usize>,
|
||||
_metrics: &ExecutionPlanMetricsSet,
|
||||
) -> DatafusionResult<Box<dyn AsyncFileReader + Send>> {
|
||||
let path = file_meta.location().to_string();
|
||||
let object_store = self.object_store.clone();
|
||||
|
||||
Ok(Box::new(LazyParquetFileReader::new(object_store, path)))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LazyParquetFileReader {
|
||||
object_store: ObjectStore,
|
||||
reader: Option<Reader>,
|
||||
path: String,
|
||||
}
|
||||
|
||||
impl LazyParquetFileReader {
|
||||
pub fn new(object_store: ObjectStore, path: String) -> Self {
|
||||
LazyParquetFileReader {
|
||||
object_store,
|
||||
path,
|
||||
reader: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Must initialize the reader, or throw an error from the future.
|
||||
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
|
||||
if self.reader.is_none() {
|
||||
let reader = self.object_store.reader(&self.path).await?;
|
||||
self.reader = Some(reader);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncFileReader for LazyParquetFileReader {
|
||||
fn get_bytes(
|
||||
&mut self,
|
||||
range: std::ops::Range<usize>,
|
||||
) -> BoxFuture<'_, ParquetResult<bytes::Bytes>> {
|
||||
Box::pin(async move {
|
||||
self.maybe_initialize()
|
||||
.await
|
||||
.map_err(|e| ParquetError::External(Box::new(e)))?;
|
||||
// Safety: Must initialized
|
||||
self.reader.as_mut().unwrap().get_bytes(range).await
|
||||
})
|
||||
}
|
||||
|
||||
fn get_metadata(&mut self) -> BoxFuture<'_, ParquetResult<Arc<ParquetMetaData>>> {
|
||||
Box::pin(async move {
|
||||
self.maybe_initialize()
|
||||
.await
|
||||
.map_err(|e| ParquetError::External(Box::new(e)))?;
|
||||
// Safety: Must initialized
|
||||
self.reader.as_mut().unwrap().get_metadata().await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DfRecordBatchEncoder for ArrowWriter<SharedBuffer> {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
self.write(batch).context(error::EncodeRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
async fn close(self) -> Result<FileMetaData> {
|
||||
self.close().context(error::EncodeRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -67,10 +171,7 @@ mod tests {
|
||||
async fn infer_schema_basic() {
|
||||
let json = ParquetFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "basic.parquet".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let schema = json.infer_schema(&store, "basic.parquet").await.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(vec!["num: Int64: NULL", "str: Utf8: NULL"], formatted);
|
||||
|
||||
228
src/common/datasource/src/file_format/tests.rs
Normal file
228
src/common/datasource/src/file_format/tests.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::vec;
|
||||
|
||||
use datafusion::assert_batches_eq;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream, ParquetExec};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::prelude::SessionContext;
|
||||
use futures::StreamExt;
|
||||
|
||||
use super::FORMAT_TYPE;
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error;
|
||||
use crate::file_format::csv::{CsvConfigBuilder, CsvOpener};
|
||||
use crate::file_format::json::JsonOpener;
|
||||
use crate::file_format::parquet::DefaultParquetFileReaderFactory;
|
||||
use crate::file_format::Format;
|
||||
use crate::test_util::{self, scan_config, test_basic_schema, test_store};
|
||||
|
||||
struct Test<'a, T: FileOpener> {
|
||||
config: FileScanConfig,
|
||||
opener: T,
|
||||
expected: Vec<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a, T: FileOpener> Test<'a, T> {
|
||||
pub async fn run(self) {
|
||||
let result = FileStream::new(
|
||||
&self.config,
|
||||
0,
|
||||
self.opener,
|
||||
&ExecutionPlanMetricsSet::new(),
|
||||
)
|
||||
.unwrap()
|
||||
.map(|b| b.unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
assert_batches_eq!(self.expected, &result);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_json_opener() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let json_opener = JsonOpener::new(
|
||||
100,
|
||||
schema.clone(),
|
||||
store.clone(),
|
||||
CompressionType::Uncompressed,
|
||||
);
|
||||
|
||||
let path = &test_util::get_data_dir("tests/json/basic.json")
|
||||
.display()
|
||||
.to_string();
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: json_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
"| 5 | test |",
|
||||
"| 2 | hello |",
|
||||
"| 4 | foo |",
|
||||
"+-----+-------+",
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: json_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+------+",
|
||||
"| num | str |",
|
||||
"+-----+------+",
|
||||
"| 5 | test |",
|
||||
"+-----+------+",
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_csv_opener() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
let path = &test_util::get_data_dir("tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string();
|
||||
let csv_conf = CsvConfigBuilder::default()
|
||||
.batch_size(test_util::TEST_BATCH_SIZE)
|
||||
.file_schema(schema.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let csv_opener = CsvOpener::new(csv_conf, store, CompressionType::Uncompressed);
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: csv_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
"| 5 | test |",
|
||||
"| 2 | hello |",
|
||||
"| 4 | foo |",
|
||||
"+-----+-------+",
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: csv_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+------+",
|
||||
"| num | str |",
|
||||
"+-----+------+",
|
||||
"| 5 | test |",
|
||||
"+-----+------+",
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_parquet_exec() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let path = &test_util::get_data_dir("tests/parquet/basic.parquet")
|
||||
.display()
|
||||
.to_string();
|
||||
let base_config = scan_config(schema.clone(), None, path);
|
||||
|
||||
let exec = ParquetExec::new(base_config, None, None)
|
||||
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
let context = Arc::new(TaskContext::from(&ctx));
|
||||
|
||||
// The stream batch size can be set by ctx.session_config.batch_size
|
||||
let result = exec
|
||||
.execute(0, context)
|
||||
.unwrap()
|
||||
.map(|b| b.unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
assert_batches_eq!(
|
||||
vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
"| 5 | test |",
|
||||
"| 2 | hello |",
|
||||
"| 4 | foo |",
|
||||
"+-----+-------+",
|
||||
],
|
||||
&result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format() {
|
||||
let value = [(FORMAT_TYPE.to_string(), "csv".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Csv(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "Parquet".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Parquet(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "JSON".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Json(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "Foobar".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(
|
||||
Format::try_from(&value).unwrap_err(),
|
||||
error::Error::UnsupportedFormat { .. }
|
||||
);
|
||||
|
||||
let value = HashMap::new();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Parquet(_));
|
||||
}
|
||||
@@ -12,10 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod buffered_writer;
|
||||
pub mod compression;
|
||||
pub mod error;
|
||||
pub mod file_format;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod share_buffer;
|
||||
#[cfg(test)]
|
||||
pub mod test_util;
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
pub mod util;
|
||||
|
||||
@@ -27,7 +27,7 @@ use crate::error::{self, Result};
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// parse url returns (schema,Option<host>,path)
|
||||
/// Returns (schema, Option<host>, path)
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
@@ -43,7 +43,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
|
||||
pub fn build_backend(url: &str, connection: &HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
|
||||
@@ -20,17 +20,17 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
const ENDPOINT_URL: &str = "endpoint_url";
|
||||
const ACCESS_KEY_ID: &str = "access_key_id";
|
||||
const SECRET_ACCESS_KEY: &str = "secret_access_key";
|
||||
const SESSION_TOKEN: &str = "session_token";
|
||||
const REGION: &str = "region";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "enable_virtual_host_style";
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: &str,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
connection: &HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
|
||||
46
src/common/datasource/src/share_buffer.rs
Normal file
46
src/common/datasource/src/share_buffer.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::io::Write;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct SharedBuffer {
|
||||
pub buffer: Arc<Mutex<BytesMut>>,
|
||||
}
|
||||
|
||||
impl SharedBuffer {
|
||||
pub fn with_capacity(size: usize) -> Self {
|
||||
Self {
|
||||
buffer: Arc::new(Mutex::new(BytesMut::with_capacity(size))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for SharedBuffer {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
let len = buf.len();
|
||||
let mut buffer = self.buffer.lock().unwrap();
|
||||
buffer.put_slice(buf);
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
// This flush implementation is intentionally left to blank.
|
||||
// The actual flush is in `BufferedWriter::try_flush`
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -13,11 +13,24 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef;
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datafusion::datasource::listing::PartitionedFile;
|
||||
use datafusion::datasource::object_store::ObjectStoreUrl;
|
||||
use datafusion::physical_plan::file_format::{FileScanConfig, FileStream};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use object_store::services::Fs;
|
||||
use object_store::ObjectStore;
|
||||
|
||||
use crate::compression::CompressionType;
|
||||
use crate::file_format::csv::{stream_to_csv, CsvConfigBuilder, CsvOpener};
|
||||
use crate::file_format::json::{stream_to_json, JsonOpener};
|
||||
use crate::test_util;
|
||||
|
||||
pub const TEST_BATCH_SIZE: usize = 100;
|
||||
|
||||
pub fn get_data_dir(path: &str) -> PathBuf {
|
||||
// https://doc.rust-lang.org/cargo/reference/environment-variables.html
|
||||
let dir = env!("CARGO_MANIFEST_DIR");
|
||||
@@ -25,7 +38,7 @@ pub fn get_data_dir(path: &str) -> PathBuf {
|
||||
PathBuf::from(dir).join(path)
|
||||
}
|
||||
|
||||
pub fn format_schema(schema: SchemaRef) -> Vec<String> {
|
||||
pub fn format_schema(schema: Schema) -> Vec<String> {
|
||||
schema
|
||||
.fields()
|
||||
.iter()
|
||||
@@ -46,3 +59,117 @@ pub fn test_store(root: &str) -> ObjectStore {
|
||||
|
||||
ObjectStore::new(builder).unwrap().finish()
|
||||
}
|
||||
|
||||
pub fn test_tmp_store(root: &str) -> (ObjectStore, TempDir) {
|
||||
let dir = create_temp_dir(root);
|
||||
|
||||
let mut builder = Fs::default();
|
||||
builder.root("/");
|
||||
|
||||
(ObjectStore::new(builder).unwrap().finish(), dir)
|
||||
}
|
||||
|
||||
pub fn test_basic_schema() -> SchemaRef {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("num", DataType::Int64, false),
|
||||
Field::new("str", DataType::Utf8, false),
|
||||
]);
|
||||
Arc::new(schema)
|
||||
}
|
||||
|
||||
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
|
||||
FileScanConfig {
|
||||
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
|
||||
file_schema,
|
||||
file_groups: vec![vec![PartitionedFile::new(filename.to_string(), 10)]],
|
||||
statistics: Default::default(),
|
||||
projection: None,
|
||||
limit,
|
||||
table_partition_cols: vec![],
|
||||
output_ordering: None,
|
||||
infinite_source: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let json_opener = JsonOpener::new(
|
||||
test_util::TEST_BATCH_SIZE,
|
||||
schema.clone(),
|
||||
store.clone(),
|
||||
CompressionType::Uncompressed,
|
||||
);
|
||||
|
||||
let size = store.read(origin_path).await.unwrap().len();
|
||||
|
||||
let config = scan_config(schema.clone(), None, origin_path);
|
||||
|
||||
let stream = FileStream::new(&config, 0, json_opener, &ExecutionPlanMetricsSet::new()).unwrap();
|
||||
|
||||
let (tmp_store, dir) = test_tmp_store("test_stream_to_json");
|
||||
|
||||
let output_path = format!("{}/{}", dir.path().display(), "output");
|
||||
|
||||
stream_to_json(
|
||||
Box::pin(stream),
|
||||
tmp_store.clone(),
|
||||
&output_path,
|
||||
threshold(size),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let written = tmp_store.read(&output_path).await.unwrap();
|
||||
let origin = store.read(origin_path).await.unwrap();
|
||||
|
||||
// ignores `\n`
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(&written).trim_end_matches('\n'),
|
||||
String::from_utf8_lossy(&origin).trim_end_matches('\n'),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let csv_conf = CsvConfigBuilder::default()
|
||||
.batch_size(test_util::TEST_BATCH_SIZE)
|
||||
.file_schema(schema.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let csv_opener = CsvOpener::new(csv_conf, store.clone(), CompressionType::Uncompressed);
|
||||
|
||||
let size = store.read(origin_path).await.unwrap().len();
|
||||
|
||||
let config = scan_config(schema.clone(), None, origin_path);
|
||||
|
||||
let stream = FileStream::new(&config, 0, csv_opener, &ExecutionPlanMetricsSet::new()).unwrap();
|
||||
|
||||
let (tmp_store, dir) = test_tmp_store("test_stream_to_csv");
|
||||
|
||||
let output_path = format!("{}/{}", dir.path().display(), "output");
|
||||
|
||||
stream_to_csv(
|
||||
Box::pin(stream),
|
||||
tmp_store.clone(),
|
||||
&output_path,
|
||||
threshold(size),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let written = tmp_store.read(&output_path).await.unwrap();
|
||||
let origin = store.read(origin_path).await.unwrap();
|
||||
|
||||
// ignores `\n`
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(&written).trim_end_matches('\n'),
|
||||
String::from_utf8_lossy(&origin).trim_end_matches('\n'),
|
||||
)
|
||||
}
|
||||
|
||||
61
src/common/datasource/src/tests.rs
Normal file
61
src/common/datasource/src/tests.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::test_util;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stream_to_json() {
|
||||
// A small threshold
|
||||
// Triggers the flush each writes
|
||||
test_util::setup_stream_to_json_test(
|
||||
&test_util::get_data_dir("tests/json/basic.json")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size / 2,
|
||||
)
|
||||
.await;
|
||||
|
||||
// A large threshold
|
||||
// Only triggers the flush at last
|
||||
test_util::setup_stream_to_json_test(
|
||||
&test_util::get_data_dir("tests/json/basic.json")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size * 2,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stream_to_csv() {
|
||||
// A small threshold
|
||||
// Triggers the flush each writes
|
||||
test_util::setup_stream_to_csv_test(
|
||||
&test_util::get_data_dir("tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size / 2,
|
||||
)
|
||||
.await;
|
||||
|
||||
// A large threshold
|
||||
// Only triggers the flush at last
|
||||
test_util::setup_stream_to_csv_test(
|
||||
&test_util::get_data_dir("tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string(),
|
||||
|size| size * 2,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
3
src/common/datasource/tests/json/basic.json
Normal file
3
src/common/datasource/tests/json/basic.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{"num":5,"str":"test"}
|
||||
{"num":2,"str":"hello"}
|
||||
{"num":4,"str":"foo"}
|
||||
@@ -83,10 +83,12 @@ pub enum StatusCode {
|
||||
}
|
||||
|
||||
impl StatusCode {
|
||||
/// Returns `true` if `code` is success.
|
||||
pub fn is_success(code: u32) -> bool {
|
||||
Self::Success as u32 == code
|
||||
}
|
||||
|
||||
/// Returns `true` if the error with this code is retryable.
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
match self {
|
||||
StatusCode::StorageUnavailable
|
||||
@@ -114,6 +116,35 @@ impl StatusCode {
|
||||
| StatusCode::AccessDenied => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if we should print an error log for an error with
|
||||
/// this status code.
|
||||
pub fn should_log_error(&self) -> bool {
|
||||
match self {
|
||||
StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::Internal
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted => true,
|
||||
StatusCode::Success
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
| StatusCode::InvalidAuthHeader
|
||||
| StatusCode::AccessDenied => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StatusCode {
|
||||
|
||||
@@ -92,7 +92,7 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
|
||||
pub fn create_table_schema(expr: &CreateTableExpr, require_time_index: bool) -> Result<RawSchema> {
|
||||
let column_schemas = expr
|
||||
.column_defs
|
||||
.iter()
|
||||
@@ -101,14 +101,17 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
|
||||
})
|
||||
.collect::<Result<Vec<ColumnSchema>>>()?;
|
||||
|
||||
ensure!(
|
||||
column_schemas
|
||||
.iter()
|
||||
.any(|column| column.name == expr.time_index),
|
||||
MissingTimestampColumnSnafu {
|
||||
msg: format!("CreateExpr: {expr:?}")
|
||||
}
|
||||
);
|
||||
// allow external table schema without the time index
|
||||
if require_time_index {
|
||||
ensure!(
|
||||
column_schemas
|
||||
.iter()
|
||||
.any(|column| column.name == expr.time_index),
|
||||
MissingTimestampColumnSnafu {
|
||||
msg: format!("CreateExpr: {expr:?}")
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
let column_schemas = column_schemas
|
||||
.into_iter()
|
||||
@@ -127,8 +130,9 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
|
||||
pub fn create_expr_to_request(
|
||||
table_id: TableId,
|
||||
expr: CreateTableExpr,
|
||||
require_time_index: bool,
|
||||
) -> Result<CreateTableRequest> {
|
||||
let schema = create_table_schema(&expr)?;
|
||||
let schema = create_table_schema(&expr, require_time_index)?;
|
||||
let primary_key_indices = expr
|
||||
.primary_keys
|
||||
.iter()
|
||||
|
||||
113
src/common/grpc-expr/src/delete.rs
Normal file
113
src/common/grpc-expr/src/delete.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::{Column, DeleteRequest as GrpcDeleteRequest};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::requests::DeleteRequest;
|
||||
|
||||
use crate::error::{ColumnDataTypeSnafu, IllegalDeleteRequestSnafu, Result};
|
||||
use crate::insert::add_values_to_builder;
|
||||
|
||||
pub fn to_table_delete_request(request: GrpcDeleteRequest) -> Result<DeleteRequest> {
|
||||
let row_count = request.row_count as usize;
|
||||
|
||||
let mut key_column_values = HashMap::with_capacity(request.key_columns.len());
|
||||
for Column {
|
||||
column_name,
|
||||
values,
|
||||
null_mask,
|
||||
datatype,
|
||||
..
|
||||
} in request.key_columns
|
||||
{
|
||||
let Some(values) = values else { continue };
|
||||
|
||||
let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
|
||||
.context(ColumnDataTypeSnafu)?
|
||||
.into();
|
||||
|
||||
let vector_builder = &mut datatype.create_mutable_vector(row_count);
|
||||
|
||||
add_values_to_builder(vector_builder, values, row_count, null_mask)?;
|
||||
|
||||
ensure!(
|
||||
key_column_values
|
||||
.insert(column_name.clone(), vector_builder.to_vector())
|
||||
.is_none(),
|
||||
IllegalDeleteRequestSnafu {
|
||||
reason: format!("Duplicated column '{column_name}' in delete request.")
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Ok(DeleteRequest { key_column_values })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::column::Values;
|
||||
use api::v1::ColumnDataType;
|
||||
use datatypes::prelude::{ScalarVector, VectorRef};
|
||||
use datatypes::vectors::{Int32Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_to_table_delete_request() {
|
||||
let grpc_request = GrpcDeleteRequest {
|
||||
table_name: "foo".to_string(),
|
||||
region_number: 0,
|
||||
key_columns: vec![
|
||||
Column {
|
||||
column_name: "id".to_string(),
|
||||
values: Some(Values {
|
||||
i32_values: vec![1, 2, 3],
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "name".to_string(),
|
||||
values: Some(Values {
|
||||
string_values: vec!["a".to_string(), "b".to_string(), "c".to_string()],
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 3,
|
||||
};
|
||||
|
||||
let mut request = to_table_delete_request(grpc_request).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
Arc::new(Int32Vector::from_slice(vec![1, 2, 3])) as VectorRef,
|
||||
request.key_column_values.remove("id").unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Arc::new(StringVector::from_slice(&["a", "b", "c"])) as VectorRef,
|
||||
request.key_column_values.remove("name").unwrap()
|
||||
);
|
||||
assert!(request.key_column_values.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -31,8 +31,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to convert bytes to insert batch, source: {}", source))]
|
||||
DecodeInsert { source: DecodeError },
|
||||
|
||||
#[snafu(display("Illegal insert data"))]
|
||||
IllegalInsertData { location: Location },
|
||||
#[snafu(display("Illegal delete request, reason: {reason}"))]
|
||||
IllegalDeleteRequest { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
@@ -87,6 +87,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected values length, reason: {}", reason))]
|
||||
UnexpectedValuesLength { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("The column name already exists, column: {}", column))]
|
||||
ColumnAlreadyExists { column: String, location: Location },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -95,9 +101,11 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
|
||||
Error::DecodeInsert { .. } | Error::IllegalInsertData { .. } => {
|
||||
|
||||
Error::DecodeInsert { .. } | Error::IllegalDeleteRequest { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::ColumnDataType { .. } => StatusCode::Internal,
|
||||
Error::DuplicatedTimestampColumn { .. } | Error::MissingTimestampColumn { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
@@ -108,6 +116,9 @@ impl ErrorExt for Error {
|
||||
Error::ColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
Error::UnexpectedValuesLength { .. } | Error::ColumnAlreadyExists { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -34,8 +34,9 @@ use table::metadata::TableId;
|
||||
use table::requests::InsertRequest;
|
||||
|
||||
use crate::error::{
|
||||
ColumnDataTypeSnafu, CreateVectorSnafu, DuplicatedTimestampColumnSnafu, IllegalInsertDataSnafu,
|
||||
InvalidColumnProtoSnafu, MissingTimestampColumnSnafu, Result,
|
||||
ColumnAlreadyExistsSnafu, ColumnDataTypeSnafu, CreateVectorSnafu,
|
||||
DuplicatedTimestampColumnSnafu, InvalidColumnProtoSnafu, MissingTimestampColumnSnafu, Result,
|
||||
UnexpectedValuesLengthSnafu,
|
||||
};
|
||||
const TAG_SEMANTIC_TYPE: i32 = SemanticType::Tag as i32;
|
||||
const TIMESTAMP_SEMANTIC_TYPE: i32 = SemanticType::Timestamp as i32;
|
||||
@@ -292,9 +293,11 @@ pub fn to_table_insert_request(
|
||||
|
||||
ensure!(
|
||||
columns_values
|
||||
.insert(column_name, vector_builder.to_vector())
|
||||
.insert(column_name.clone(), vector_builder.to_vector())
|
||||
.is_none(),
|
||||
IllegalInsertDataSnafu
|
||||
ColumnAlreadyExistsSnafu {
|
||||
column: column_name
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -307,7 +310,7 @@ pub fn to_table_insert_request(
|
||||
})
|
||||
}
|
||||
|
||||
fn add_values_to_builder(
|
||||
pub(crate) fn add_values_to_builder(
|
||||
builder: &mut Box<dyn MutableVector>,
|
||||
values: Values,
|
||||
row_count: usize,
|
||||
@@ -317,7 +320,12 @@ fn add_values_to_builder(
|
||||
let values = convert_values(&data_type, values);
|
||||
|
||||
if null_mask.is_empty() {
|
||||
ensure!(values.len() == row_count, IllegalInsertDataSnafu);
|
||||
ensure!(
|
||||
values.len() == row_count,
|
||||
UnexpectedValuesLengthSnafu {
|
||||
reason: "If null_mask is empty, the length of values must be equal to row_count."
|
||||
}
|
||||
);
|
||||
|
||||
values.iter().try_for_each(|value| {
|
||||
builder
|
||||
@@ -328,7 +336,9 @@ fn add_values_to_builder(
|
||||
let null_mask = BitVec::from_vec(null_mask);
|
||||
ensure!(
|
||||
null_mask.count_ones() + values.len() == row_count,
|
||||
IllegalInsertDataSnafu
|
||||
UnexpectedValuesLengthSnafu {
|
||||
reason: "If null_mask is not empty, the sum of the number of nulls and the length of values must be equal to row_count."
|
||||
}
|
||||
);
|
||||
|
||||
let mut idx_of_values = 0;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod alter;
|
||||
pub mod delete;
|
||||
pub mod error;
|
||||
pub mod insert;
|
||||
|
||||
|
||||
@@ -230,9 +230,9 @@ impl Default for ChannelConfig {
|
||||
rate_limit: None,
|
||||
initial_stream_window_size: None,
|
||||
initial_connection_window_size: None,
|
||||
http2_keep_alive_interval: None,
|
||||
http2_keep_alive_interval: Some(Duration::from_secs(30)),
|
||||
http2_keep_alive_timeout: None,
|
||||
http2_keep_alive_while_idle: None,
|
||||
http2_keep_alive_while_idle: Some(true),
|
||||
http2_adaptive_window: None,
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
@@ -497,9 +497,9 @@ mod tests {
|
||||
rate_limit: None,
|
||||
initial_stream_window_size: None,
|
||||
initial_connection_window_size: None,
|
||||
http2_keep_alive_interval: None,
|
||||
http2_keep_alive_interval: Some(Duration::from_secs(30)),
|
||||
http2_keep_alive_timeout: None,
|
||||
http2_keep_alive_while_idle: None,
|
||||
http2_keep_alive_while_idle: Some(true),
|
||||
http2_adaptive_window: None,
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
|
||||
9
src/common/procedure-test/Cargo.toml
Normal file
9
src/common/procedure-test/Cargo.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "common-procedure-test"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-procedure = { path = "../procedure" }
|
||||
54
src/common/procedure-test/src/lib.rs
Normal file
54
src/common/procedure-test/src/lib.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Test utilities for procedures.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::{
|
||||
BoxedProcedure, Context, ContextProvider, ProcedureId, ProcedureState, Result, Status,
|
||||
};
|
||||
|
||||
/// A Mock [ContextProvider] that always return [ProcedureState::Done].
|
||||
struct MockContextProvider {}
|
||||
|
||||
#[async_trait]
|
||||
impl ContextProvider for MockContextProvider {
|
||||
async fn procedure_state(&self, _procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(Some(ProcedureState::Done))
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a procedure until it returns [Status::Done].
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the `procedure` has subprocedure to execute.
|
||||
pub async fn execute_procedure_until_done(procedure: &mut BoxedProcedure) {
|
||||
let ctx = Context {
|
||||
procedure_id: ProcedureId::random(),
|
||||
provider: Arc::new(MockContextProvider {}),
|
||||
};
|
||||
|
||||
loop {
|
||||
match procedure.execute(&ctx).await.unwrap() {
|
||||
Status::Executing { .. } => (),
|
||||
Status::Suspended { subprocedures, .. } => assert!(
|
||||
subprocedures.is_empty(),
|
||||
"Executing subprocedure is unsupported"
|
||||
),
|
||||
Status::Done => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ mod runner;
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
@@ -36,6 +36,9 @@ use crate::{
|
||||
ProcedureWithId, Watcher,
|
||||
};
|
||||
|
||||
/// The expired time of a procedure's metadata.
|
||||
const META_TTL: Duration = Duration::from_secs(60 * 10);
|
||||
|
||||
/// Shared metadata of a procedure.
|
||||
///
|
||||
/// # Note
|
||||
@@ -126,6 +129,8 @@ pub(crate) struct ManagerContext {
|
||||
procedures: RwLock<HashMap<ProcedureId, ProcedureMetaRef>>,
|
||||
/// Messages loaded from the procedure store.
|
||||
messages: Mutex<HashMap<ProcedureId, ProcedureMessage>>,
|
||||
/// Ids and finished time of finished procedures.
|
||||
finished_procedures: Mutex<VecDeque<(ProcedureId, Instant)>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -143,6 +148,7 @@ impl ManagerContext {
|
||||
lock_map: LockMap::new(),
|
||||
procedures: RwLock::new(HashMap::new()),
|
||||
messages: Mutex::new(HashMap::new()),
|
||||
finished_procedures: Mutex::new(VecDeque::new()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,6 +291,49 @@ impl ManagerContext {
|
||||
messages.remove(procedure_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clean resources of finished procedures.
|
||||
fn on_procedures_finish(&self, procedure_ids: &[ProcedureId]) {
|
||||
self.remove_messages(procedure_ids);
|
||||
|
||||
// Since users need to query the procedure state, so we can't remove the
|
||||
// meta of the procedure directly.
|
||||
let now = Instant::now();
|
||||
let mut finished_procedures = self.finished_procedures.lock().unwrap();
|
||||
finished_procedures.extend(procedure_ids.iter().map(|id| (*id, now)));
|
||||
}
|
||||
|
||||
/// Remove metadata of outdated procedures.
|
||||
fn remove_outdated_meta(&self, ttl: Duration) {
|
||||
let ids = {
|
||||
let mut finished_procedures = self.finished_procedures.lock().unwrap();
|
||||
if finished_procedures.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut ids_to_remove = Vec::new();
|
||||
while let Some((id, finish_time)) = finished_procedures.front() {
|
||||
if finish_time.elapsed() > ttl {
|
||||
ids_to_remove.push(*id);
|
||||
finished_procedures.pop_front();
|
||||
} else {
|
||||
// The rest procedures are finished later, so we can break
|
||||
// the loop.
|
||||
break;
|
||||
}
|
||||
}
|
||||
ids_to_remove
|
||||
};
|
||||
|
||||
if ids.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut procedures = self.procedures.write().unwrap();
|
||||
for id in ids {
|
||||
procedures.remove(&id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Config for [LocalManager].
|
||||
@@ -377,14 +426,18 @@ impl ProcedureManager for LocalManager {
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
|
||||
// TODO(yingwen): We can use a repeated task to remove outdated meta.
|
||||
self.manager_ctx.remove_outdated_meta(META_TTL);
|
||||
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)
|
||||
}
|
||||
|
||||
async fn recover(&self) -> Result<()> {
|
||||
logging::info!("LocalManager start to recover");
|
||||
let recover_start = Instant::now();
|
||||
|
||||
let procedure_store = ProcedureStore::new(self.state_store.clone());
|
||||
let messages = procedure_store.load_messages().await?;
|
||||
let (messages, finished_ids) = procedure_store.load_messages().await?;
|
||||
|
||||
for (procedure_id, message) in &messages {
|
||||
if message.parent_id.is_none() {
|
||||
@@ -412,14 +465,36 @@ impl ProcedureManager for LocalManager {
|
||||
}
|
||||
}
|
||||
|
||||
if !finished_ids.is_empty() {
|
||||
logging::info!(
|
||||
"LocalManager try to clean finished procedures, num: {}",
|
||||
finished_ids.len()
|
||||
);
|
||||
|
||||
for procedure_id in finished_ids {
|
||||
if let Err(e) = procedure_store.delete_procedure(procedure_id).await {
|
||||
logging::error!(e; "Failed to delete procedure {}", procedure_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logging::info!(
|
||||
"LocalManager finish recovery, cost: {}ms",
|
||||
recover_start.elapsed().as_millis()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
self.manager_ctx.remove_outdated_meta(META_TTL);
|
||||
|
||||
Ok(self.manager_ctx.state(procedure_id))
|
||||
}
|
||||
|
||||
fn procedure_watcher(&self, procedure_id: ProcedureId) -> Option<Watcher> {
|
||||
self.manager_ctx.remove_outdated_meta(META_TTL);
|
||||
|
||||
self.manager_ctx.watcher(procedure_id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,7 +150,19 @@ impl Runner {
|
||||
// If this is the root procedure, clean up message cache.
|
||||
if self.meta.parent_id.is_none() {
|
||||
let procedure_ids = self.manager_ctx.procedures_in_tree(&self.meta);
|
||||
self.manager_ctx.remove_messages(&procedure_ids);
|
||||
// Clean resources.
|
||||
self.manager_ctx.on_procedures_finish(&procedure_ids);
|
||||
for id in procedure_ids {
|
||||
if let Err(e) = self.store.delete_procedure(id).await {
|
||||
logging::error!(
|
||||
e;
|
||||
"Runner {}-{} failed to delete procedure {}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
id,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logging::info!(
|
||||
@@ -451,6 +463,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::local::test_util;
|
||||
use crate::store::proc_path;
|
||||
use crate::{ContextProvider, Error, LockKey, Procedure};
|
||||
|
||||
const ROOT_ID: &str = "9f805a1f-05f7-490c-9f91-bd56e3cc54c1";
|
||||
@@ -472,7 +485,7 @@ mod tests {
|
||||
}
|
||||
|
||||
async fn check_files(object_store: &ObjectStore, procedure_id: ProcedureId, files: &[&str]) {
|
||||
let dir = format!("{procedure_id}/");
|
||||
let dir = proc_path!("{procedure_id}/");
|
||||
let lister = object_store.list(&dir).await.unwrap();
|
||||
let mut files_in_dir: Vec<_> = lister
|
||||
.map_ok(|de| de.name().to_string())
|
||||
@@ -719,25 +732,28 @@ mod tests {
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta));
|
||||
// Replace the manager ctx.
|
||||
runner.manager_ctx = manager_ctx;
|
||||
runner.manager_ctx = manager_ctx.clone();
|
||||
|
||||
runner.run().await;
|
||||
|
||||
// Check files on store.
|
||||
// Check child procedures.
|
||||
for child_id in children_ids {
|
||||
check_files(
|
||||
&object_store,
|
||||
child_id,
|
||||
&["0000000000.step", "0000000001.commit"],
|
||||
)
|
||||
.await;
|
||||
let state = manager_ctx.state(child_id).unwrap();
|
||||
assert!(state.is_done(), "{state:?}");
|
||||
}
|
||||
let state = manager_ctx.state(procedure_id).unwrap();
|
||||
assert!(state.is_done(), "{state:?}");
|
||||
// Files are removed.
|
||||
check_files(&object_store, procedure_id, &[]).await;
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(5)).await;
|
||||
// Clean outdated meta.
|
||||
manager_ctx.remove_outdated_meta(Duration::from_millis(1));
|
||||
assert!(manager_ctx.state(procedure_id).is_none());
|
||||
assert!(manager_ctx.finished_procedures.lock().unwrap().is_empty());
|
||||
for child_id in children_ids {
|
||||
assert!(manager_ctx.state(child_id).is_none());
|
||||
}
|
||||
check_files(
|
||||
&object_store,
|
||||
procedure_id,
|
||||
&["0000000000.step", "0000000001.commit"],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -28,6 +28,17 @@ use crate::{BoxedProcedure, ProcedureId};
|
||||
|
||||
pub mod state_store;
|
||||
|
||||
/// Key prefix of procedure store.
|
||||
pub(crate) const PROC_PATH: &str = "procedure/";
|
||||
|
||||
/// Constructs a path for procedure store.
|
||||
macro_rules! proc_path {
|
||||
($fmt:expr) => { format!("{}{}", $crate::store::PROC_PATH, format_args!($fmt)) };
|
||||
($fmt:expr, $($args:tt)*) => { format!("{}{}", $crate::store::PROC_PATH, format_args!($fmt, $($args)*)) };
|
||||
}
|
||||
|
||||
pub(crate) use proc_path;
|
||||
|
||||
/// Serialized data of a procedure.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ProcedureMessage {
|
||||
@@ -116,14 +127,55 @@ impl ProcedureStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load uncommitted procedures from the storage.
|
||||
pub(crate) async fn load_messages(&self) -> Result<HashMap<ProcedureId, ProcedureMessage>> {
|
||||
let mut messages = HashMap::new();
|
||||
/// Delete states of procedure from the storage.
|
||||
pub(crate) async fn delete_procedure(&self, procedure_id: ProcedureId) -> Result<()> {
|
||||
let path = proc_path!("{procedure_id}/");
|
||||
// TODO(yingwen): We can optimize this to avoid reading the value.
|
||||
let mut key_values = self.0.walk_top_down(&path).await?;
|
||||
// 8 should be enough for most procedures.
|
||||
let mut step_keys = Vec::with_capacity(8);
|
||||
let mut finish_keys = Vec::new();
|
||||
while let Some((key, _)) = key_values.try_next().await? {
|
||||
let Some(curr_key) = ParsedKey::parse_str(&key) else {
|
||||
logging::warn!("Unknown key while deleting procedures, key: {}", key);
|
||||
continue;
|
||||
};
|
||||
if curr_key.key_type == KeyType::Step {
|
||||
step_keys.push(key);
|
||||
} else {
|
||||
// .commit or .rollback
|
||||
finish_keys.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
logging::debug!(
|
||||
"Delete keys for procedure {}, step_keys: {:?}, finish_keys: {:?}",
|
||||
procedure_id,
|
||||
step_keys,
|
||||
finish_keys
|
||||
);
|
||||
// We delete all step keys first.
|
||||
self.0.batch_delete(step_keys.as_slice()).await?;
|
||||
// Then we delete the finish keys, to ensure
|
||||
self.0.batch_delete(finish_keys.as_slice()).await?;
|
||||
// Finally we remove the directory itself.
|
||||
self.0.delete(&path).await?;
|
||||
// Maybe we could use procedure_id.commit/rollback as the file name so we could
|
||||
// use remove_all to remove the directory and then remove the commit/rollback file.
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load procedures from the storage. Returns a map of uncommitted procedures and a list
|
||||
/// of finished procedures' ids.
|
||||
pub(crate) async fn load_messages(
|
||||
&self,
|
||||
) -> Result<(HashMap<ProcedureId, ProcedureMessage>, Vec<ProcedureId>)> {
|
||||
// Track the key-value pair by procedure id.
|
||||
let mut procedure_key_values: HashMap<_, (ParsedKey, Vec<u8>)> = HashMap::new();
|
||||
|
||||
// Scan all procedures.
|
||||
let mut key_values = self.0.walk_top_down("/").await?;
|
||||
let mut key_values = self.0.walk_top_down(PROC_PATH).await?;
|
||||
while let Some((key, value)) = key_values.try_next().await? {
|
||||
let Some(curr_key) = ParsedKey::parse_str(&key) else {
|
||||
logging::warn!("Unknown key while loading procedures, key: {}", key);
|
||||
@@ -140,6 +192,8 @@ impl ProcedureStore {
|
||||
}
|
||||
}
|
||||
|
||||
let mut messages = HashMap::with_capacity(procedure_key_values.len());
|
||||
let mut finished_ids = Vec::new();
|
||||
for (procedure_id, (parsed_key, value)) in procedure_key_values {
|
||||
if parsed_key.key_type == KeyType::Step {
|
||||
let Some(message) = self.load_one_message(&parsed_key, &value) else {
|
||||
@@ -148,10 +202,12 @@ impl ProcedureStore {
|
||||
continue;
|
||||
};
|
||||
messages.insert(procedure_id, message);
|
||||
} else {
|
||||
finished_ids.push(procedure_id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(messages)
|
||||
Ok((messages, finished_ids))
|
||||
}
|
||||
|
||||
fn load_one_message(&self, key: &ParsedKey, value: &[u8]) -> Option<ProcedureMessage> {
|
||||
@@ -212,7 +268,8 @@ impl fmt::Display for ParsedKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}/{:010}.{}",
|
||||
"{}{}/{:010}.{}",
|
||||
PROC_PATH,
|
||||
self.procedure_id,
|
||||
self.step,
|
||||
self.key_type.as_str(),
|
||||
@@ -223,6 +280,7 @@ impl fmt::Display for ParsedKey {
|
||||
impl ParsedKey {
|
||||
/// Try to parse the key from specific `input`.
|
||||
fn parse_str(input: &str) -> Option<ParsedKey> {
|
||||
let input = input.strip_prefix(PROC_PATH)?;
|
||||
let mut iter = input.rsplit('/');
|
||||
let name = iter.next()?;
|
||||
let id_str = iter.next()?;
|
||||
@@ -269,7 +327,10 @@ mod tests {
|
||||
step: 2,
|
||||
key_type: KeyType::Step,
|
||||
};
|
||||
assert_eq!(format!("{procedure_id}/0000000002.step"), key.to_string());
|
||||
assert_eq!(
|
||||
proc_path!("{procedure_id}/0000000002.step"),
|
||||
key.to_string()
|
||||
);
|
||||
assert_eq!(key, ParsedKey::parse_str(&key.to_string()).unwrap());
|
||||
|
||||
let key = ParsedKey {
|
||||
@@ -277,7 +338,10 @@ mod tests {
|
||||
step: 2,
|
||||
key_type: KeyType::Commit,
|
||||
};
|
||||
assert_eq!(format!("{procedure_id}/0000000002.commit"), key.to_string());
|
||||
assert_eq!(
|
||||
proc_path!("{procedure_id}/0000000002.commit"),
|
||||
key.to_string()
|
||||
);
|
||||
assert_eq!(key, ParsedKey::parse_str(&key.to_string()).unwrap());
|
||||
|
||||
let key = ParsedKey {
|
||||
@@ -286,7 +350,7 @@ mod tests {
|
||||
key_type: KeyType::Rollback,
|
||||
};
|
||||
assert_eq!(
|
||||
format!("{procedure_id}/0000000002.rollback"),
|
||||
proc_path!("{procedure_id}/0000000002.rollback"),
|
||||
key.to_string()
|
||||
);
|
||||
assert_eq!(key, ParsedKey::parse_str(&key.to_string()).unwrap());
|
||||
@@ -295,26 +359,29 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_invalid_key() {
|
||||
assert!(ParsedKey::parse_str("").is_none());
|
||||
assert!(ParsedKey::parse_str("invalidprefix").is_none());
|
||||
assert!(ParsedKey::parse_str("procedu/0000000003.step").is_none());
|
||||
assert!(ParsedKey::parse_str("procedure-0000000003.step").is_none());
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
let input = format!("{procedure_id}");
|
||||
let input = proc_path!("{procedure_id}");
|
||||
assert!(ParsedKey::parse_str(&input).is_none());
|
||||
|
||||
let input = format!("{procedure_id}/");
|
||||
let input = proc_path!("{procedure_id}/");
|
||||
assert!(ParsedKey::parse_str(&input).is_none());
|
||||
|
||||
let input = format!("{procedure_id}/0000000003");
|
||||
let input = proc_path!("{procedure_id}/0000000003");
|
||||
assert!(ParsedKey::parse_str(&input).is_none());
|
||||
|
||||
let input = format!("{procedure_id}/0000000003.");
|
||||
let input = proc_path!("{procedure_id}/0000000003.");
|
||||
assert!(ParsedKey::parse_str(&input).is_none());
|
||||
|
||||
let input = format!("{procedure_id}/0000000003.other");
|
||||
let input = proc_path!("{procedure_id}/0000000003.other");
|
||||
assert!(ParsedKey::parse_str(&input).is_none());
|
||||
|
||||
assert!(ParsedKey::parse_str("12345/0000000003.step").is_none());
|
||||
|
||||
let input = format!("{procedure_id}-0000000003.commit");
|
||||
let input = proc_path!("{procedure_id}-0000000003.commit");
|
||||
assert!(ParsedKey::parse_str(&input).is_none());
|
||||
}
|
||||
|
||||
@@ -384,8 +451,9 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let messages = store.load_messages().await.unwrap();
|
||||
let (messages, finished) = store.load_messages().await.unwrap();
|
||||
assert_eq!(1, messages.len());
|
||||
assert!(finished.is_empty());
|
||||
let msg = messages.get(&procedure_id).unwrap();
|
||||
let expect = ProcedureMessage {
|
||||
type_name: "MockProcedure".to_string(),
|
||||
@@ -410,8 +478,9 @@ mod tests {
|
||||
.unwrap();
|
||||
store.commit_procedure(procedure_id, 1).await.unwrap();
|
||||
|
||||
let messages = store.load_messages().await.unwrap();
|
||||
let (messages, finished) = store.load_messages().await.unwrap();
|
||||
assert!(messages.is_empty());
|
||||
assert_eq!(&[procedure_id], &finished[..]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -428,8 +497,58 @@ mod tests {
|
||||
.unwrap();
|
||||
store.rollback_procedure(procedure_id, 1).await.unwrap();
|
||||
|
||||
let messages = store.load_messages().await.unwrap();
|
||||
let (messages, finished) = store.load_messages().await.unwrap();
|
||||
assert!(messages.is_empty());
|
||||
assert_eq!(&[procedure_id], &finished[..]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_procedure() {
|
||||
let dir = create_temp_dir("delete_procedure");
|
||||
let store = procedure_store_for_test(&dir);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
let procedure: BoxedProcedure = Box::new(MockProcedure::new("test store procedure"));
|
||||
|
||||
store
|
||||
.store_procedure(procedure_id, 0, &procedure, None)
|
||||
.await
|
||||
.unwrap();
|
||||
store
|
||||
.store_procedure(procedure_id, 1, &procedure, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
store.delete_procedure(procedure_id).await.unwrap();
|
||||
|
||||
let (messages, finished) = store.load_messages().await.unwrap();
|
||||
assert!(messages.is_empty());
|
||||
assert!(finished.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_committed_procedure() {
|
||||
let dir = create_temp_dir("delete_committed");
|
||||
let store = procedure_store_for_test(&dir);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
let procedure: BoxedProcedure = Box::new(MockProcedure::new("test store procedure"));
|
||||
|
||||
store
|
||||
.store_procedure(procedure_id, 0, &procedure, None)
|
||||
.await
|
||||
.unwrap();
|
||||
store
|
||||
.store_procedure(procedure_id, 1, &procedure, None)
|
||||
.await
|
||||
.unwrap();
|
||||
store.commit_procedure(procedure_id, 2).await.unwrap();
|
||||
|
||||
store.delete_procedure(procedure_id).await.unwrap();
|
||||
|
||||
let (messages, finished) = store.load_messages().await.unwrap();
|
||||
assert!(messages.is_empty());
|
||||
assert!(finished.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -477,8 +596,9 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let messages = store.load_messages().await.unwrap();
|
||||
let (messages, finished) = store.load_messages().await.unwrap();
|
||||
assert_eq!(2, messages.len());
|
||||
assert_eq!(1, finished.len());
|
||||
|
||||
let msg = messages.get(&id0).unwrap();
|
||||
assert_eq!("id0-2", msg.data);
|
||||
|
||||
@@ -45,7 +45,11 @@ pub trait StateStore: Send + Sync {
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream>;
|
||||
|
||||
/// Deletes key-value pairs by `keys`.
|
||||
async fn delete(&self, keys: &[String]) -> Result<()>;
|
||||
async fn batch_delete(&self, keys: &[String]) -> Result<()>;
|
||||
|
||||
/// Deletes one key-value pair by `key`. Return `Ok` if the key
|
||||
/// does not exist.
|
||||
async fn delete(&self, key: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
/// Reference counted pointer to [StateStore].
|
||||
@@ -80,8 +84,6 @@ impl StateStore for ObjectStateStore {
|
||||
}
|
||||
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
|
||||
let path_string = path.to_string();
|
||||
|
||||
let mut lister = self
|
||||
.store
|
||||
.scan(path)
|
||||
@@ -92,12 +94,11 @@ impl StateStore for ObjectStateStore {
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.with_context(|_| ListStateSnafu {
|
||||
path: path_string.clone(),
|
||||
})?;
|
||||
.context(ListStateSnafu { path })?;
|
||||
|
||||
let store = self.store.clone();
|
||||
|
||||
let path_string = path.to_string();
|
||||
let stream = try_stream!({
|
||||
while let Some(res) = lister.next().await {
|
||||
let entry = res
|
||||
@@ -138,13 +139,22 @@ impl StateStore for ObjectStateStore {
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
async fn delete(&self, keys: &[String]) -> Result<()> {
|
||||
for key in keys {
|
||||
self.store
|
||||
.delete(key)
|
||||
.await
|
||||
.context(DeleteStateSnafu { key })?;
|
||||
}
|
||||
async fn batch_delete(&self, keys: &[String]) -> Result<()> {
|
||||
self.store
|
||||
.remove(keys.to_vec())
|
||||
.await
|
||||
.with_context(|_| DeleteStateSnafu {
|
||||
key: format!("{:?}", keys),
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &str) -> Result<()> {
|
||||
self.store
|
||||
.delete(key)
|
||||
.await
|
||||
.with_context(|_| DeleteStateSnafu { key })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -215,7 +225,7 @@ mod tests {
|
||||
);
|
||||
|
||||
state_store
|
||||
.delete(&["a/2".to_string(), "b/1".to_string()])
|
||||
.batch_delete(&["a/2".to_string(), "b/1".to_string()])
|
||||
.await
|
||||
.unwrap();
|
||||
let mut data: Vec<_> = state_store
|
||||
@@ -228,4 +238,40 @@ mod tests {
|
||||
data.sort_unstable_by(|a, b| a.0.cmp(&b.0));
|
||||
assert_eq!(vec![("a/1".to_string(), b"v1".to_vec()),], data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_object_state_store_delete() {
|
||||
let dir = create_temp_dir("state_store_list");
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
state_store.put("a/1", b"v1".to_vec()).await.unwrap();
|
||||
state_store.put("a/2", b"v2".to_vec()).await.unwrap();
|
||||
state_store.put("b/1", b"v3".to_vec()).await.unwrap();
|
||||
|
||||
state_store.delete("b/1").await.unwrap();
|
||||
|
||||
let mut data: Vec<_> = state_store
|
||||
.walk_top_down("a/")
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect()
|
||||
.await
|
||||
.unwrap();
|
||||
data.sort_unstable_by(|a, b| a.0.cmp(&b.0));
|
||||
assert_eq!(
|
||||
vec![
|
||||
("a/1".to_string(), b"v1".to_vec()),
|
||||
("a/2".to_string(), b"v2".to_vec()),
|
||||
],
|
||||
data
|
||||
);
|
||||
|
||||
// Delete returns Ok even the key doesn't exist.
|
||||
state_store.delete("b/1").await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,13 +49,18 @@ pub trait PhysicalPlan: Debug + Send + Sync {
|
||||
/// Specifies the output partitioning scheme of this plan
|
||||
fn output_partitioning(&self) -> Partitioning;
|
||||
|
||||
/// returns `Some(keys)` that describes how the output was sorted.
|
||||
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Get a list of child physical plans that provide the input for this plan. The returned list
|
||||
/// will be empty for leaf nodes, will contain a single value for unary nodes, or two
|
||||
/// values for binary nodes (such as joins).
|
||||
fn children(&self) -> Vec<PhysicalPlanRef>;
|
||||
|
||||
/// Returns a new plan where all children were replaced by new plans.
|
||||
/// The size of `children` must be equal to the size of `PhysicalPlan::children()`.
|
||||
/// The size of `children` must be equal to the size of [`PhysicalPlan::children()`].
|
||||
fn with_new_children(&self, children: Vec<PhysicalPlanRef>) -> Result<PhysicalPlanRef>;
|
||||
|
||||
/// Creates an RecordBatch stream.
|
||||
@@ -149,7 +154,7 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
|
||||
None
|
||||
self.0.output_ordering()
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<Arc<dyn DfPhysicalPlan>> {
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::task::{Context, Poll};
|
||||
|
||||
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::parquet::arrow::async_reader::{AsyncFileReader, ParquetRecordBatchStream};
|
||||
use datafusion::physical_plan::RecordBatchStream as DfRecordBatchStream;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
@@ -39,6 +40,40 @@ type FutureStream = Pin<
|
||||
>,
|
||||
>;
|
||||
|
||||
/// ParquetRecordBatchStream -> DataFusion RecordBatchStream
|
||||
pub struct ParquetRecordBatchStreamAdapter<T> {
|
||||
stream: ParquetRecordBatchStream<T>,
|
||||
}
|
||||
|
||||
impl<T: Unpin + AsyncFileReader + Send + 'static> ParquetRecordBatchStreamAdapter<T> {
|
||||
pub fn new(stream: ParquetRecordBatchStream<T>) -> Self {
|
||||
Self { stream }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Unpin + AsyncFileReader + Send + 'static> DfRecordBatchStream
|
||||
for ParquetRecordBatchStreamAdapter<T>
|
||||
{
|
||||
fn schema(&self) -> DfSchemaRef {
|
||||
self.stream.schema().clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Unpin + AsyncFileReader + Send + 'static> Stream for ParquetRecordBatchStreamAdapter<T> {
|
||||
type Item = DfResult<DfRecordBatch>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let batch = futures::ready!(Pin::new(&mut self.stream).poll_next(cx))
|
||||
.map(|r| r.map_err(|e| DataFusionError::External(Box::new(e))));
|
||||
Poll::Ready(batch)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.stream.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
/// Greptime SendableRecordBatchStream -> DataFusion RecordBatchStream
|
||||
pub struct DfRecordBatchStreamAdapter {
|
||||
stream: SendableRecordBatchStream,
|
||||
|
||||
@@ -31,7 +31,6 @@ pub async fn collect_batches(stream: SendableRecordBatchStream) -> Result<Record
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::mem;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -59,7 +58,7 @@ mod tests {
|
||||
type Item = Result<RecordBatch>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let batch = mem::replace(&mut self.batch, None);
|
||||
let batch = self.batch.take();
|
||||
|
||||
if let Some(batch) = batch {
|
||||
Poll::Ready(Some(Ok(batch)))
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
metrics = "0.20"
|
||||
metrics.workspace = true
|
||||
once_cell = "1.12"
|
||||
paste.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -28,7 +28,8 @@ pub enum Error {
|
||||
source: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Repeated task {} not started yet", name))]
|
||||
|
||||
#[snafu(display("Repeated task {} is already started", name))]
|
||||
IllegalState { name: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
|
||||
@@ -24,5 +24,5 @@ pub use global::{
|
||||
spawn_read, spawn_write, write_runtime,
|
||||
};
|
||||
|
||||
pub use crate::repeated_task::{RepeatedTask, TaskFunction, TaskFunctionRef};
|
||||
pub use crate::repeated_task::{BoxedTaskFunction, RepeatedTask, TaskFunction};
|
||||
pub use crate::runtime::{Builder, JoinError, JoinHandle, Runtime};
|
||||
|
||||
@@ -13,57 +13,78 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_telemetry::logging;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::Mutex;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::error::{IllegalStateSnafu, Result, WaitGcTaskStopSnafu};
|
||||
use crate::Runtime;
|
||||
|
||||
/// Task to execute repeatedly.
|
||||
#[async_trait::async_trait]
|
||||
pub trait TaskFunction<E: ErrorExt> {
|
||||
async fn call(&self) -> std::result::Result<(), E>;
|
||||
pub trait TaskFunction<E> {
|
||||
/// Invoke the task.
|
||||
async fn call(&mut self) -> std::result::Result<(), E>;
|
||||
|
||||
/// Name of the task.
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub type TaskFunctionRef<E> = Arc<dyn TaskFunction<E> + Send + Sync>;
|
||||
pub type BoxedTaskFunction<E> = Box<dyn TaskFunction<E> + Send + Sync + 'static>;
|
||||
|
||||
pub struct RepeatedTask<E> {
|
||||
cancel_token: Mutex<Option<CancellationToken>>,
|
||||
task_handle: Mutex<Option<JoinHandle<()>>>,
|
||||
started: AtomicBool,
|
||||
interval: Duration,
|
||||
task_fn: TaskFunctionRef<E>,
|
||||
struct TaskInner<E> {
|
||||
/// The repeated task handle. This handle is Some if the task is started.
|
||||
task_handle: Option<JoinHandle<()>>,
|
||||
/// The task_fn to run. This is Some if the task is not started.
|
||||
task_fn: Option<BoxedTaskFunction<E>>,
|
||||
}
|
||||
|
||||
impl<E: ErrorExt> std::fmt::Display for RepeatedTask<E> {
|
||||
pub struct RepeatedTask<E> {
|
||||
name: String,
|
||||
cancel_token: CancellationToken,
|
||||
inner: Mutex<TaskInner<E>>,
|
||||
started: AtomicBool,
|
||||
interval: Duration,
|
||||
}
|
||||
|
||||
impl<E> std::fmt::Display for RepeatedTask<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "RepeatedTask({})", self.task_fn.name())
|
||||
write!(f, "RepeatedTask({})", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: ErrorExt> std::fmt::Debug for RepeatedTask<E> {
|
||||
impl<E> std::fmt::Debug for RepeatedTask<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("RepeatedTask")
|
||||
.field(&self.task_fn.name())
|
||||
.finish()
|
||||
f.debug_tuple("RepeatedTask").field(&self.name).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> Drop for RepeatedTask<E> {
|
||||
fn drop(&mut self) {
|
||||
let inner = self.inner.get_mut().unwrap();
|
||||
if inner.task_handle.is_some() {
|
||||
// Cancel the background task.
|
||||
self.cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
pub fn new(interval: Duration, task_fn: TaskFunctionRef<E>) -> Self {
|
||||
pub fn new(interval: Duration, task_fn: BoxedTaskFunction<E>) -> Self {
|
||||
Self {
|
||||
cancel_token: Mutex::new(None),
|
||||
task_handle: Mutex::new(None),
|
||||
name: task_fn.name().to_string(),
|
||||
cancel_token: CancellationToken::new(),
|
||||
inner: Mutex::new(TaskInner {
|
||||
task_handle: None,
|
||||
task_fn: Some(task_fn),
|
||||
}),
|
||||
started: AtomicBool::new(false),
|
||||
interval,
|
||||
task_fn,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,11 +92,17 @@ impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
self.started.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub async fn start(&self, runtime: Runtime) -> Result<()> {
|
||||
let token = CancellationToken::new();
|
||||
pub fn start(&self, runtime: Runtime) -> Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
ensure!(
|
||||
inner.task_fn.is_some(),
|
||||
IllegalStateSnafu { name: &self.name }
|
||||
);
|
||||
|
||||
let interval = self.interval;
|
||||
let child = token.child_token();
|
||||
let task_fn = self.task_fn.clone();
|
||||
let child = self.cancel_token.child_token();
|
||||
// Safety: The task is not started.
|
||||
let mut task_fn = inner.task_fn.take().unwrap();
|
||||
// TODO(hl): Maybe spawn to a blocking runtime.
|
||||
let handle = runtime.spawn(async move {
|
||||
loop {
|
||||
@@ -90,13 +117,12 @@ impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
}
|
||||
}
|
||||
});
|
||||
*self.cancel_token.lock().await = Some(token);
|
||||
*self.task_handle.lock().await = Some(handle);
|
||||
inner.task_handle = Some(handle);
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
|
||||
logging::debug!(
|
||||
"Repeated task {} started with interval: {:?}",
|
||||
self.task_fn.name(),
|
||||
self.name,
|
||||
self.interval
|
||||
);
|
||||
|
||||
@@ -104,30 +130,25 @@ impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
let name = self.task_fn.name();
|
||||
ensure!(
|
||||
self.started
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok(),
|
||||
IllegalStateSnafu { name }
|
||||
);
|
||||
let token = self
|
||||
.cancel_token
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.context(IllegalStateSnafu { name })?;
|
||||
let handle = self
|
||||
.task_handle
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.context(IllegalStateSnafu { name })?;
|
||||
let handle = {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
if inner.task_handle.is_none() {
|
||||
// We allow stop the task multiple times.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
token.cancel();
|
||||
handle.await.context(WaitGcTaskStopSnafu { name })?;
|
||||
self.cancel_token.cancel();
|
||||
self.started.store(false, Ordering::Relaxed);
|
||||
// Safety: The task is not stopped.
|
||||
inner.task_handle.take().unwrap()
|
||||
};
|
||||
|
||||
handle
|
||||
.await
|
||||
.context(WaitGcTaskStopSnafu { name: &self.name })?;
|
||||
|
||||
logging::debug!("Repeated task {} stopped", self.name);
|
||||
|
||||
logging::debug!("Repeated task {} stopped", name);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -135,20 +156,22 @@ impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
|
||||
struct TickTask {
|
||||
n: AtomicI32,
|
||||
n: Arc<AtomicI32>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TaskFunction<crate::error::Error> for TickTask {
|
||||
impl TaskFunction<Error> for TickTask {
|
||||
fn name(&self) -> &str {
|
||||
"test"
|
||||
}
|
||||
|
||||
async fn call(&self) -> Result<()> {
|
||||
async fn call(&mut self) -> Result<()> {
|
||||
self.n.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Ok(())
|
||||
@@ -159,16 +182,15 @@ mod tests {
|
||||
async fn test_repeated_task() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let task_fn = Arc::new(TickTask {
|
||||
n: AtomicI32::new(0),
|
||||
});
|
||||
let n = Arc::new(AtomicI32::new(0));
|
||||
let task_fn = TickTask { n: n.clone() };
|
||||
|
||||
let task = RepeatedTask::new(Duration::from_millis(100), task_fn.clone());
|
||||
let task = RepeatedTask::new(Duration::from_millis(100), Box::new(task_fn));
|
||||
|
||||
task.start(crate::bg_runtime()).await.unwrap();
|
||||
task.start(crate::bg_runtime()).unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(550)).await;
|
||||
task.stop().await.unwrap();
|
||||
|
||||
assert_eq!(task_fn.n.load(Ordering::Relaxed), 5);
|
||||
assert_eq!(n.load(Ordering::Relaxed), 5);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,16 +14,18 @@ common-error = { path = "../error" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
datafusion.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-substrait.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
futures = "0.3"
|
||||
prost.workspace = true
|
||||
session = { path = "../../session" }
|
||||
snafu.workspace = true
|
||||
table = { path = "../../table" }
|
||||
query = { path = "../../query" }
|
||||
|
||||
[dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.4"
|
||||
version = "0.7"
|
||||
|
||||
[dev-dependencies]
|
||||
datatypes = { path = "../../datatypes" }
|
||||
|
||||
@@ -16,18 +16,18 @@ use std::sync::Arc;
|
||||
|
||||
use async_recursion::async_recursion;
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use bytes::{Buf, Bytes};
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
|
||||
use datafusion::catalog::catalog::CatalogList;
|
||||
use datafusion::common::{DFField, DFSchema};
|
||||
use datafusion::datasource::DefaultTableSource;
|
||||
use datafusion::physical_plan::project_schema;
|
||||
use datafusion::sql::TableReference;
|
||||
use datafusion_expr::{Filter, LogicalPlan, TableScan};
|
||||
use prost::Message;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use substrait_proto::proto::expression::mask_expression::{StructItem, StructSelect};
|
||||
@@ -42,41 +42,35 @@ use table::table::adapter::DfTableProviderAdapter;
|
||||
use crate::context::ConvertorContext;
|
||||
use crate::df_expr::{expression_from_df_expr, to_df_expr};
|
||||
use crate::error::{
|
||||
self, DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, Error,
|
||||
InvalidParametersSnafu, MissingFieldSnafu, ResolveTableSnafu, SchemaNotMatchSnafu,
|
||||
UnknownPlanSnafu, UnsupportedExprSnafu, UnsupportedPlanSnafu,
|
||||
self, DFInternalSnafu, EmptyPlanSnafu, Error, InvalidParametersSnafu, MissingFieldSnafu,
|
||||
ResolveTableSnafu, SchemaNotMatchSnafu, UnknownPlanSnafu, UnsupportedExprSnafu,
|
||||
UnsupportedPlanSnafu,
|
||||
};
|
||||
use crate::schema::{from_schema, to_schema};
|
||||
use crate::SubstraitPlan;
|
||||
|
||||
pub struct DFLogicalSubstraitConvertor;
|
||||
pub struct DFLogicalSubstraitConvertorDeprecated;
|
||||
|
||||
#[async_trait]
|
||||
impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
||||
impl SubstraitPlan for DFLogicalSubstraitConvertorDeprecated {
|
||||
type Error = Error;
|
||||
|
||||
type Plan = LogicalPlan;
|
||||
|
||||
async fn decode<B: Buf + Send>(
|
||||
&self,
|
||||
message: B,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
_message: B,
|
||||
_catalog_list: Arc<dyn CatalogList>,
|
||||
) -> Result<Self::Plan, Self::Error> {
|
||||
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
||||
self.convert_plan(plan, catalog_manager).await
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
|
||||
let plan = self.convert_df_plan(plan)?;
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
plan.encode(&mut buf).context(EncodeRelSnafu)?;
|
||||
|
||||
Ok(buf.freeze())
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl DFLogicalSubstraitConvertor {
|
||||
impl DFLogicalSubstraitConvertorDeprecated {
|
||||
async fn convert_plan(
|
||||
&self,
|
||||
mut plan: Plan,
|
||||
@@ -197,6 +191,14 @@ impl DFLogicalSubstraitConvertor {
|
||||
name: "Cross Relation",
|
||||
}
|
||||
.fail()?,
|
||||
RelType::HashJoin(_) => UnsupportedPlanSnafu {
|
||||
name: "Cross Relation",
|
||||
}
|
||||
.fail()?,
|
||||
RelType::MergeJoin(_) => UnsupportedPlanSnafu {
|
||||
name: "Cross Relation",
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
Ok(logical_plan)
|
||||
@@ -311,7 +313,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
}
|
||||
}
|
||||
|
||||
impl DFLogicalSubstraitConvertor {
|
||||
impl DFLogicalSubstraitConvertorDeprecated {
|
||||
fn logical_plan_to_rel(
|
||||
&self,
|
||||
ctx: &mut ConvertorContext,
|
||||
@@ -530,112 +532,3 @@ fn same_schema_without_metadata(lhs: &ArrowSchemaRef, rhs: &ArrowSchemaRef) -> b
|
||||
&& x.is_nullable() == y.is_nullable()
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use catalog::local::{LocalCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use catalog::{CatalogList, CatalogProvider, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use datafusion::common::{DFSchema, ToDFSchema};
|
||||
use datafusion_expr::TableSource;
|
||||
use datatypes::schema::RawSchema;
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::test_util::{EmptyTable, MockTableEngine};
|
||||
|
||||
use super::*;
|
||||
use crate::schema::test::supported_types;
|
||||
|
||||
const DEFAULT_TABLE_NAME: &str = "SubstraitTable";
|
||||
|
||||
async fn build_mock_catalog_manager() -> CatalogManagerRef {
|
||||
let mock_table_engine = Arc::new(MockTableEngine::new());
|
||||
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
|
||||
MITO_ENGINE.to_string(),
|
||||
mock_table_engine.clone(),
|
||||
));
|
||||
let catalog_manager = Arc::new(LocalCatalogManager::try_new(engine_manager).await.unwrap());
|
||||
let schema_provider = Arc::new(MemorySchemaProvider::new());
|
||||
let catalog_provider = Arc::new(MemoryCatalogProvider::new());
|
||||
catalog_provider
|
||||
.register_schema(DEFAULT_SCHEMA_NAME.to_string(), schema_provider)
|
||||
.unwrap();
|
||||
catalog_manager
|
||||
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), catalog_provider)
|
||||
.unwrap();
|
||||
|
||||
catalog_manager.init().await.unwrap();
|
||||
catalog_manager
|
||||
}
|
||||
|
||||
fn build_create_table_request<N: ToString>(table_name: N) -> CreateTableRequest {
|
||||
CreateTableRequest {
|
||||
id: 1,
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: None,
|
||||
schema: RawSchema::new(supported_types()),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn logical_plan_round_trip(plan: LogicalPlan, catalog: CatalogManagerRef) {
|
||||
let convertor = DFLogicalSubstraitConvertor;
|
||||
|
||||
let proto = convertor.encode(plan.clone()).unwrap();
|
||||
let tripped_plan = convertor.decode(proto, catalog).await.unwrap();
|
||||
|
||||
assert_eq!(format!("{plan:?}"), format!("{tripped_plan:?}"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_scan() {
|
||||
let catalog_manager = build_mock_catalog_manager().await;
|
||||
let table_ref = Arc::new(EmptyTable::new(build_create_table_request(
|
||||
DEFAULT_TABLE_NAME,
|
||||
)));
|
||||
catalog_manager
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: DEFAULT_TABLE_NAME.to_string(),
|
||||
table_id: 1,
|
||||
table: table_ref.clone(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let adapter = Arc::new(DefaultTableSource::new(Arc::new(
|
||||
DfTableProviderAdapter::new(table_ref),
|
||||
)));
|
||||
|
||||
let projection = vec![1, 3, 5];
|
||||
let df_schema = adapter.schema().to_dfschema().unwrap();
|
||||
let projected_fields = projection
|
||||
.iter()
|
||||
.map(|index| df_schema.field(*index).clone())
|
||||
.collect();
|
||||
let projected_schema =
|
||||
Arc::new(DFSchema::new_with_metadata(projected_fields, Default::default()).unwrap());
|
||||
|
||||
let table_name = TableReference::full(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
DEFAULT_TABLE_NAME,
|
||||
);
|
||||
let table_scan_plan = LogicalPlan::TableScan(TableScan {
|
||||
table_name,
|
||||
source: adapter,
|
||||
projection: Some(projection),
|
||||
projected_schema,
|
||||
filters: vec![],
|
||||
fetch: None,
|
||||
});
|
||||
|
||||
logical_plan_round_trip(table_scan_plan, catalog_manager).await;
|
||||
}
|
||||
}
|
||||
|
||||
61
src/common/substrait/src/df_substrait.rs
Normal file
61
src/common/substrait/src/df_substrait.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use datafusion::catalog::catalog::CatalogList;
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
|
||||
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
|
||||
use prost::Message;
|
||||
use snafu::ResultExt;
|
||||
use substrait_proto::proto::Plan;
|
||||
|
||||
use crate::error::{DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error};
|
||||
use crate::SubstraitPlan;
|
||||
|
||||
pub struct DFLogicalSubstraitConvertor;
|
||||
|
||||
#[async_trait]
|
||||
impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
||||
type Error = Error;
|
||||
|
||||
type Plan = LogicalPlan;
|
||||
|
||||
async fn decode<B: Buf + Send>(
|
||||
&self,
|
||||
message: B,
|
||||
catalog_list: Arc<dyn CatalogList>,
|
||||
) -> Result<Self::Plan, Self::Error> {
|
||||
let mut context = SessionContext::new();
|
||||
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
||||
context.register_catalog_list(catalog_list);
|
||||
let df_plan = from_substrait_plan(&mut context, &plan)
|
||||
.await
|
||||
.context(DecodeDfPlanSnafu)?;
|
||||
Ok(df_plan)
|
||||
}
|
||||
|
||||
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
let substrait_plan = to_substrait_plan(&plan).context(EncodeDfPlanSnafu)?;
|
||||
substrait_plan.encode(&mut buf).context(EncodeRelSnafu)?;
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
}
|
||||
@@ -109,6 +109,18 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode DataFusion plan, source: {}", source))]
|
||||
EncodeDfPlan {
|
||||
source: datafusion::error::DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode DataFusion plan, source: {}", source))]
|
||||
DecodeDfPlan {
|
||||
source: datafusion::error::DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -129,7 +141,10 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidParameters { .. }
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::SchemaNotMatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
|
||||
Error::DFInternal { .. }
|
||||
| Error::Internal { .. }
|
||||
| Error::EncodeDfPlan { .. }
|
||||
| Error::DecodeDfPlan { .. } => StatusCode::Internal,
|
||||
Error::ConvertDfSchema { source } => source.status_code(),
|
||||
Error::ResolveTable { source, .. } => source.status_code(),
|
||||
}
|
||||
|
||||
@@ -17,16 +17,20 @@
|
||||
|
||||
mod context;
|
||||
mod df_expr;
|
||||
#[allow(unused)]
|
||||
mod df_logical;
|
||||
mod df_substrait;
|
||||
pub mod error;
|
||||
mod schema;
|
||||
mod types;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes};
|
||||
use catalog::CatalogManagerRef;
|
||||
use datafusion::catalog::catalog::CatalogList;
|
||||
|
||||
pub use crate::df_logical::DFLogicalSubstraitConvertor;
|
||||
pub use crate::df_substrait::DFLogicalSubstraitConvertor;
|
||||
|
||||
#[async_trait]
|
||||
pub trait SubstraitPlan {
|
||||
@@ -37,7 +41,7 @@ pub trait SubstraitPlan {
|
||||
async fn decode<B: Buf + Send>(
|
||||
&self,
|
||||
message: B,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
catalog_list: Arc<dyn CatalogList>,
|
||||
) -> Result<Self::Plan, Self::Error>;
|
||||
|
||||
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error>;
|
||||
|
||||
@@ -12,8 +12,8 @@ deadlock_detection = ["parking_lot"]
|
||||
backtrace = "0.3"
|
||||
common-error = { path = "../error" }
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
metrics = "0.20.1"
|
||||
metrics-exporter-prometheus = { version = "0.11", default-features = false }
|
||||
metrics-exporter-prometheus = { git = "https://github.com/GreptimeTeam/metrics.git", rev = "174de287e9f7f9f57c0272be56c95df156489476", default-features = false }
|
||||
metrics.workspace = true
|
||||
once_cell = "1.10"
|
||||
opentelemetry = { version = "0.17", default-features = false, features = [
|
||||
"trace",
|
||||
@@ -23,6 +23,7 @@ opentelemetry-jaeger = { version = "0.16", features = ["rt-tokio"] }
|
||||
parking_lot = { version = "0.12", features = [
|
||||
"deadlock_detection",
|
||||
], optional = true }
|
||||
serde = "1.0"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-bunyan-formatter = "0.3"
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::sync::{Arc, Mutex, Once};
|
||||
use once_cell::sync::Lazy;
|
||||
use opentelemetry::global;
|
||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use tracing::{event, span, Level};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
@@ -31,6 +32,24 @@ use tracing_subscriber::{filter, EnvFilter, Registry};
|
||||
|
||||
pub use crate::{debug, error, info, log, trace, warn};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct LoggingOptions {
|
||||
pub dir: String,
|
||||
pub level: String,
|
||||
pub enable_jaeger_tracing: bool,
|
||||
}
|
||||
|
||||
impl Default for LoggingOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
level: "info".to_string(),
|
||||
enable_jaeger_tracing: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Init tracing for unittest.
|
||||
/// Write logs to file `unittest`.
|
||||
pub fn init_default_ut_logging() {
|
||||
@@ -47,7 +66,12 @@ pub fn init_default_ut_logging() {
|
||||
env::var("UNITTEST_LOG_DIR").unwrap_or_else(|_| "/tmp/__unittest_logs".to_string());
|
||||
|
||||
let level = env::var("UNITTEST_LOG_LEVEL").unwrap_or_else(|_| "DEBUG".to_string());
|
||||
*g = Some(init_global_logging("unittest", &dir, &level, false));
|
||||
let opts = LoggingOptions {
|
||||
dir: dir.clone(),
|
||||
level,
|
||||
..Default::default()
|
||||
};
|
||||
*g = Some(init_global_logging("unittest", &opts));
|
||||
|
||||
info!("logs dir = {}", dir);
|
||||
});
|
||||
@@ -56,13 +80,11 @@ pub fn init_default_ut_logging() {
|
||||
static GLOBAL_UT_LOG_GUARD: Lazy<Arc<Mutex<Option<Vec<WorkerGuard>>>>> =
|
||||
Lazy::new(|| Arc::new(Mutex::new(None)));
|
||||
|
||||
pub fn init_global_logging(
|
||||
app_name: &str,
|
||||
dir: &str,
|
||||
level: &str,
|
||||
enable_jaeger_tracing: bool,
|
||||
) -> Vec<WorkerGuard> {
|
||||
pub fn init_global_logging(app_name: &str, opts: &LoggingOptions) -> Vec<WorkerGuard> {
|
||||
let mut guards = vec![];
|
||||
let dir = &opts.dir;
|
||||
let level = &opts.level;
|
||||
let enable_jaeger_tracing = opts.enable_jaeger_tracing;
|
||||
|
||||
// Enable log compatible layer to convert log record to tracing span.
|
||||
LogTracer::init().expect("log tracer must be valid");
|
||||
|
||||
@@ -32,7 +32,9 @@ pub fn init_default_metrics_recorder() {
|
||||
|
||||
/// Init prometheus recorder.
|
||||
fn init_prometheus_recorder() {
|
||||
let recorder = PrometheusBuilder::new().build_recorder();
|
||||
let recorder = PrometheusBuilder::new()
|
||||
.add_global_prefix("greptime".to_string())
|
||||
.build_recorder();
|
||||
let mut h = PROMETHEUS_HANDLE.as_ref().write().unwrap();
|
||||
*h = Some(recorder.handle());
|
||||
// TODO(LFC): separate metrics for testing and metrics for production
|
||||
@@ -60,6 +62,7 @@ pub fn try_handle() -> Option<PrometheusHandle> {
|
||||
pub struct Timer {
|
||||
start: Instant,
|
||||
name: &'static str,
|
||||
labels: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
impl Timer {
|
||||
@@ -67,9 +70,25 @@ impl Timer {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
name,
|
||||
labels: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_labels<S: Into<String> + Clone>(name: &'static str, labels: &[(S, S)]) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
name,
|
||||
labels: labels
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone().into(), v.clone().into()))
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn labels_mut(&mut self) -> &mut Vec<(String, String)> {
|
||||
self.labels.as_mut()
|
||||
}
|
||||
|
||||
pub fn elapsed(&self) -> Duration {
|
||||
self.start.elapsed()
|
||||
}
|
||||
@@ -77,7 +96,11 @@ impl Timer {
|
||||
|
||||
impl Drop for Timer {
|
||||
fn drop(&mut self) {
|
||||
histogram!(self.name, self.start.elapsed());
|
||||
if !self.labels.is_empty() {
|
||||
histogram!(self.name, self.start.elapsed(), &self.labels);
|
||||
} else {
|
||||
histogram!(self.name, self.start.elapsed());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +109,9 @@ macro_rules! timer {
|
||||
($name: expr) => {
|
||||
$crate::metric::Timer::new($name)
|
||||
};
|
||||
($name:expr,$labels:expr) => {
|
||||
$crate::metric::Timer::new_with_labels($name, $labels)
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -109,4 +135,48 @@ mod tests {
|
||||
assert!(text.contains("test_elapsed_timer_a"));
|
||||
assert!(text.contains("test_elapsed_timer_b"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_elapsed_timer_with_label() {
|
||||
init_default_metrics_recorder();
|
||||
{
|
||||
let t = Timer::new("test_elapsed_timer_a");
|
||||
drop(t);
|
||||
}
|
||||
let handle = try_handle().unwrap();
|
||||
let text = handle.render();
|
||||
assert!(text.contains("test_elapsed_timer_a"));
|
||||
assert!(!text.contains("test_elapsed_timer_b"));
|
||||
let label_a = "label_a";
|
||||
let label_b = "label_b";
|
||||
let label_c = "label_c";
|
||||
let label_d = "label_d";
|
||||
let label_e = "label_e";
|
||||
assert!(!text.contains(label_a));
|
||||
assert!(!text.contains(label_b));
|
||||
|
||||
{
|
||||
let mut timer_b = timer!("test_elapsed_timer_b", &[(label_a, "a"), (label_b, "b")]);
|
||||
let labels = timer_b.labels_mut();
|
||||
labels.push((label_c.to_owned(), "d".to_owned()));
|
||||
}
|
||||
let text = handle.render();
|
||||
assert!(text.contains("test_elapsed_timer_a"));
|
||||
assert!(text.contains("test_elapsed_timer_b"));
|
||||
assert!(text.contains(label_a));
|
||||
assert!(text.contains(label_b));
|
||||
assert!(text.contains(label_c));
|
||||
|
||||
{
|
||||
let mut timer_c = timer!("test_elapsed_timer_c");
|
||||
let labels = timer_c.labels_mut();
|
||||
labels.push((label_d.to_owned(), "d".to_owned()));
|
||||
labels.push((label_e.to_owned(), "e".to_owned()));
|
||||
}
|
||||
|
||||
let text = handle.render();
|
||||
assert!(text.contains("test_elapsed_timer_c"));
|
||||
assert!(text.contains(label_d));
|
||||
assert!(text.contains(label_e));
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user