mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 23:49:58 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7d2ede4a4 | ||
|
|
df6ebbf934 | ||
|
|
719fbf2f3a | ||
|
|
cc14dea913 |
@@ -12,5 +12,4 @@ rustflags = [
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
"-Aclippy::items_after_test_module",
|
||||
]
|
||||
|
||||
@@ -20,3 +20,6 @@ out/
|
||||
|
||||
# Rust
|
||||
target/
|
||||
|
||||
# Git
|
||||
.git
|
||||
|
||||
@@ -3,15 +3,8 @@ GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
GT_S3_ENDPOINT_URL=S3 endpoint url
|
||||
GT_S3_REGION=S3 region
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
GT_OSS_ACCESS_KEY=OSS access key
|
||||
GT_OSS_ENDPOINT=OSS endpoint
|
||||
# Settings for azblob test
|
||||
GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
1
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -81,5 +81,6 @@ body:
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
render: bash
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
6
.github/workflows/develop.yml
vendored
6
.github/workflows/develop.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
name: CI
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -141,7 +141,6 @@ jobs:
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness && ls /tmp
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
@@ -217,7 +216,7 @@ jobs:
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -225,7 +224,6 @@ jobs:
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
286
.github/workflows/release.yml
vendored
286
.github/workflows/release.yml
vendored
@@ -7,38 +7,38 @@ on:
|
||||
- cron: '0 0 * * 1'
|
||||
# Mannually trigger only builds binaries.
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Skip docker push and release steps'
|
||||
type: boolean
|
||||
default: true
|
||||
skip_test:
|
||||
description: 'Do not run tests during build'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
name: Release
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
|
||||
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
|
||||
## FIXME(zyy17): Enable it after the tests are stabled.
|
||||
DISABLE_RUN_TESTS: true
|
||||
|
||||
jobs:
|
||||
build-macos:
|
||||
name: Build macOS binary
|
||||
build:
|
||||
name: Build binary
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
@@ -49,6 +49,16 @@ jobs:
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
@@ -62,116 +72,6 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install Protoc for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Configure tag
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
|
||||
|
||||
build-linux:
|
||||
name: Build linux binary
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -196,6 +96,11 @@ jobs:
|
||||
sudo cp protoc/bin/protoc /usr/local/bin/
|
||||
sudo cp -r protoc/include/google /usr/local/include/
|
||||
|
||||
- name: Install Protoc for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -209,6 +114,12 @@ jobs:
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
@@ -227,18 +138,13 @@ jobs:
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
@@ -283,6 +189,10 @@ jobs:
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -303,26 +213,11 @@ jobs:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Configure tag
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.AWS_CN_REGION }}
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build-linux, build-macos]
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -335,7 +230,7 @@ jobs:
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
@@ -343,7 +238,7 @@ jobs:
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
@@ -406,9 +301,9 @@ jobs:
|
||||
release:
|
||||
name: Release artifacts
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build-linux, build-macos, docker]
|
||||
needs: [build,docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -418,92 +313,31 @@ jobs:
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
|
||||
- name: Set whether it is the latest release
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name != 'push'
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name != 'push'
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generateReleaseNotes: true
|
||||
artifacts: |
|
||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
- name: Publish release
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name == 'push'
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name != 'schedule'
|
||||
with:
|
||||
name: "${{ github.ref_name }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
generateReleaseNotes: true
|
||||
artifacts: |
|
||||
name: "Release ${{ github.ref_name }}"
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
docker-push-acr:
|
||||
name: Push docker image to alibaba cloud container registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to alibaba cloud container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: registry.cn-hangzhou.aliyuncs.com
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,8 +1,6 @@
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
/target/
|
||||
# also ignore if it's a symbolic link
|
||||
/target
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
@@ -41,8 +39,3 @@ benchmarks/data
|
||||
# dashboard files
|
||||
!/src/servers/dashboard/VERSION
|
||||
/src/servers/dashboard/*
|
||||
|
||||
# Vscode workspace
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
@@ -107,6 +107,6 @@ The core team will be thrilled if you participate in any way you like. When you
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
- [GreptimeDB Docs](https://docs.greptime.com/)
|
||||
- [Learn GreptimeDB](https://greptime.com/product/db)
|
||||
- [GreptimeDB Docs](https://greptime.com/docs)
|
||||
- [Learn GreptimeDB](https://greptime.com/products/db)
|
||||
- [Greptime Inc. Website](https://greptime.com)
|
||||
|
||||
3687
Cargo.lock
generated
3687
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
44
Cargo.toml
44
Cargo.toml
@@ -14,10 +14,7 @@ members = [
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/pprof",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -33,7 +30,6 @@ members = [
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
@@ -51,49 +47,39 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.3.2"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "40.0" }
|
||||
arrow-array = "40.0"
|
||||
arrow-flight = "40.0"
|
||||
arrow-schema = { version = "40.0", features = ["serde"] }
|
||||
arrow = { version = "37.0" }
|
||||
arrow-array = "37.0"
|
||||
arrow-flight = "37.0"
|
||||
arrow-schema = { version = "37.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/WenyXu/greptime-proto.git", rev = "1eda4691a5d2c8ffc463d48ca2317905ba7e4b2d" }
|
||||
itertools = "0.10"
|
||||
parquet = "40.0"
|
||||
parquet = "37.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.34"
|
||||
sqlparser = "0.33"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util"] }
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
[build]
|
||||
pre-build = [
|
||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||
"apt update && apt install -y unzip zlib1g-dev zlib1g-dev:$CROSS_DEB_ARCH",
|
||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||
]
|
||||
11
Makefile
11
Makefile
@@ -33,12 +33,13 @@ docker-image: ## Build docker image.
|
||||
|
||||
##@ Test
|
||||
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run --retries 3
|
||||
.PHONY: unit-test
|
||||
unit-test: ## Run unit test.
|
||||
cargo test --workspace
|
||||
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
||||
.PHONY: integration-test
|
||||
integration-test: ## Run integation test.
|
||||
cargo test integration
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
|
||||
68
README.md
68
README.md
@@ -47,14 +47,6 @@ for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
|
||||
## Quick Start
|
||||
|
||||
### GreptimePlay
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
<a href="https://greptime.com/playground" target="_blank"><img
|
||||
src="https://www.greptime.com/assets/greptime_play_button_colorful.1bbe2746.png"
|
||||
alt="GreptimePlay" width="200px" /></a>
|
||||
|
||||
### Build
|
||||
|
||||
#### Build from Source
|
||||
@@ -100,22 +92,64 @@ Or if you built from docker:
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
For more startup options, greptimedb's **distributed mode** and information
|
||||
about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
|
||||
|
||||
### Get started
|
||||
### Connect
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
1. Connect to GreptimeDB via standard [MySQL
|
||||
client](https://dev.mysql.com/downloads/mysql/):
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
|
||||
```
|
||||
# The standalone instance listen on port 4002 by default.
|
||||
mysql -h 127.0.0.1 -P 4002
|
||||
```
|
||||
|
||||
2. Create table:
|
||||
|
||||
```SQL
|
||||
CREATE TABLE monitor (
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE DEFAULT 0,
|
||||
memory DOUBLE,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
3. Insert some data:
|
||||
|
||||
```SQL
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
|
||||
```
|
||||
|
||||
4. Query the data:
|
||||
|
||||
```SQL
|
||||
SELECT * FROM monitor;
|
||||
```
|
||||
|
||||
```TEXT
|
||||
+-------+---------------------+------+--------+
|
||||
| host | ts | cpu | memory |
|
||||
+-------+---------------------+------+--------+
|
||||
| host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
|
||||
| host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
|
||||
| host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
|
||||
+-------+---------------------+------+--------+
|
||||
3 rows in set (0.01 sec)
|
||||
```
|
||||
|
||||
You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
@@ -123,7 +157,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
|
||||
### Documentation
|
||||
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
@@ -9,6 +9,6 @@ arrow.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { path = "../src/client" }
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
itertools = "0.10.5"
|
||||
parquet.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -26,9 +26,7 @@ use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests,
|
||||
};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -109,12 +107,8 @@ async fn write_data(
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let requests = InsertRequests {
|
||||
inserts: vec![request],
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
let _ = db.insert(requests).await.unwrap();
|
||||
db.insert(request).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -370,23 +364,26 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_numbers: vec![0],
|
||||
region_ids: vec![0],
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
ret.insert(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
);
|
||||
|
||||
ret.insert(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
|
||||
);
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
@@ -411,8 +408,7 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -421,8 +417,7 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
|
||||
heartbeat_interval_millis = 5000
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
@@ -26,10 +24,9 @@ tcp_nodelay = true
|
||||
|
||||
# WAL options, see `standalone.example.toml`.
|
||||
[wal]
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
file_size = "256MB"
|
||||
purge_threshold = "4GB"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
@@ -37,9 +34,7 @@ sync_write = false
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
@@ -53,31 +48,13 @@ max_purge_tasks = 32
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
gc_duration = '30s'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
[procedure]
|
||||
max_retry_times = 3
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# [procedure.store]
|
||||
# type = "File"
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# max_retry_times = 3
|
||||
# retry_delay = "500ms"
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
heartbeat_interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc_options]
|
||||
@@ -61,8 +56,3 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
@@ -13,8 +13,3 @@ datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
@@ -9,9 +9,6 @@ enable_memory_catalog = false
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
# HTTP request body limit, 64Mb by default.
|
||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options.
|
||||
[grpc_options]
|
||||
@@ -81,12 +78,12 @@ addr = "127.0.0.1:4004"
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL data directory.
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
file_size = "1GB"
|
||||
# WAL purge threshold in bytes.
|
||||
purge_threshold = "50GB"
|
||||
# WAL purge interval in seconds.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
@@ -99,9 +96,7 @@ sync_write = false
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
@@ -118,33 +113,18 @@ max_purge_tasks = 32
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
gc_duration = '30s'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
# # Storage type.
|
||||
# type = "File"
|
||||
# # Procedure data path.
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# # Procedure max retry time.
|
||||
# max_retry_times = 3
|
||||
# # Initial retry delay of procedures, increases exponentially
|
||||
# retry_delay = "500ms"
|
||||
|
||||
@@ -8,14 +8,11 @@ RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
&& pip install pyarrow
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
FROM centos:7
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=http://mirrors.tuna.tsinghua.edu.cn/centos|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/CentOS-*.repo
|
||||
|
||||
# Install dependencies
|
||||
RUN RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
CMD ["cargo", "build", "--release"]
|
||||
@@ -8,7 +8,6 @@ RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
---
|
||||
Feature Name: distributed-planner
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-05-09
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Distributed Planner
|
||||
-------------------
|
||||
# Summary
|
||||
Enhance the logical planner with aware of distributed, multi-region table topology. To achieve "push computation down" execution rather than the current "pull data up" manner.
|
||||
|
||||
# Motivation
|
||||
Query distributively can leverage the advantage of GreptimeDB's architecture to process large dataset that exceeds the capacity of a single node, or accelerate the query execution by executing it in parallel. This task includes two sub-tasks
|
||||
- Be able to transform the plan that can push as much as possible computation down to data source.
|
||||
- Be able to handle pipeline breaker (like `Join` or `Sort`) on multiple computation nodes.
|
||||
This is a relatively complex topic. To keep this RFC concentrated I'll focus on the first one.
|
||||
|
||||
# Details
|
||||
## Background: Partition and Region
|
||||
GreptimeDB supports table partitioning, where the partition rule is set during table creation. Each partition can be further divided into one or more physical storage units known as "regions". Both partitions and regions are divided based on rows:
|
||||
``` text
|
||||
┌────────────────────────────────────┐
|
||||
│ │
|
||||
│ Table │
|
||||
│ │
|
||||
└─────┬────────────┬────────────┬────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌─────▼────┐ ┌─────▼────┐ ┌─────▼────┐
|
||||
│ Region 1 │ │ Region 2 │ │ Region 3 │
|
||||
└──────────┘ └──────────┘ └──────────┘
|
||||
Row 1~10 Row 11~20 Row 21~30
|
||||
```
|
||||
General speaking, region is the minimum element of data distribution, and we can also use it as the unit to distribute computation. This can greatly simplify the routing logic of this distributed planner, by always schedule the computation to the node that currently opening the corresponding region. And is also easy to scale more node for computing since GreptimeDB's data is persisted on shared storage backend like S3. But this is a bit beyond the scope of this specific topic.
|
||||
## Background: Commutativity
|
||||
Commutativity is an attribute that describes whether two operation can exchange their apply order: $P1(P2(R)) \Leftrightarrow P2(P1(R))$. If the equation keeps, we can transform one expression into another form without changing its result. This is useful on rewriting SQL expression, and is the theoretical basis of this RFC.
|
||||
|
||||
Take this SQL as an example
|
||||
|
||||
``` sql
|
||||
SELECT a FROM t WHERE a > 10;
|
||||
```
|
||||
|
||||
As we know projection and filter are commutative (todo: latex), it can be translated to the following two identical plan trees:
|
||||
|
||||
```text
|
||||
┌─────────────┐ ┌─────────────┐
|
||||
│Projection(a)│ │Filter(a>10) │
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐
|
||||
│Filter(a>10) │ │Projection(a)│
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ TableScan │ │ TableScan │
|
||||
└─────────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Merge Operation
|
||||
|
||||
This RFC proposes to add a new expression node `MergeScan` to merge result from several regions in the frontend. It wrap the abstraction of remote data and execution, and expose a `TableScan` interface to upper level.
|
||||
|
||||
``` text
|
||||
▲
|
||||
│
|
||||
┌───────┼───────┐
|
||||
│ │ │
|
||||
│ ┌──┴──┐ │
|
||||
│ └──▲──┘ │
|
||||
│ │ │
|
||||
│ ┌──┴──┐ │
|
||||
│ └──▲──┘ │ ┌─────────────────────────────┐
|
||||
│ │ │ │ │
|
||||
│ ┌────┴────┐ │ │ ┌──────────┐ ┌───┐ ┌───┐ │
|
||||
│ │MergeScan◄──┼────┤ │ Region 1 │ │ │ .. │ │ │
|
||||
│ └─────────┘ │ │ └──────────┘ └───┘ └───┘ │
|
||||
│ │ │ │
|
||||
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
||||
```
|
||||
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
|
||||
## Commutativity of MergeScan
|
||||
|
||||
Obviously, The position of `MergeScan` is the key of the distributed plan. The more closer to the underlying `TableScan`, the less computation is taken by datanodes. Thus the goal is to pull the `MergeScan` up as more as possible. The word "pull up" means exchange `MergeScan` with its parent node in the plan tree, which means we should check the commutativity between the existing expression nodes and the `MergeScan`. Here I classify all the possibility into five categories:
|
||||
|
||||
- Commutative: $P1(P2(R)) \Leftrightarrow P2(P1(R))$
|
||||
- filter
|
||||
- projection
|
||||
- operations that match the partition key
|
||||
- Partial Commutative: $P1(P2(R)) \Leftrightarrow P1(P2(P1(R)))$
|
||||
- $min(R) \rightarrow min(MERGE(min(R)))$
|
||||
- $max(R) \rightarrow max(MERGE(max(R)))$
|
||||
- Conditional Commutative: $P1(P2(R)) \Leftrightarrow P3(P2(P1(R)))$
|
||||
- $count(R) \rightarrow sum(count(R))$
|
||||
- Transformed Commutative: $P1(P2(R)) \Leftrightarrow P1(P3(R)) \Leftrightarrow P3(P1(R))$
|
||||
- $avg(R) \rightarrow sum(R)/count(R)$
|
||||
- Non-commutative
|
||||
- sort
|
||||
- join
|
||||
- percentile
|
||||
## Steps to plan
|
||||
After establishing the set of commutative relations for all expressions, we can begin transforming the logical plan. There are four steps:
|
||||
|
||||
- Add a merge node before table scan
|
||||
- Evaluate commutativity in a bottom-up way, stop at the first non-commutative node
|
||||
- Divide the TableScan to scan over partitions
|
||||
- Execute
|
||||
|
||||
First insert the `MergeScan` on top of the bottom `TableScan` node. Then examine the commutativity start from the `MergeScan` node transform the plan tree based on the result. Stop this process on the first non-commutative node.
|
||||
``` text
|
||||
┌─────────────┐ ┌─────────────┐
|
||||
│ Sort │ │ Sort │
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌─────────────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ Sort │ │Projection(a)│ │ MergeScan │
|
||||
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│Projection(a)│ │ MergeScan │ │Projection(a)│
|
||||
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ TableScan │ │ TableScan │ │ TableScan │
|
||||
└─────────────┘ └─────────────┘ └─────────────┘
|
||||
(a) (b) (c)
|
||||
```
|
||||
Then in the physical planning phase, convert the sub-tree below `MergeScan` into a remote query request and dispatch to all the regions. And let the `MergeScan` to receive the results and feed to it parent node.
|
||||
|
||||
To simplify the overall complexity, any error in the procedure will lead to a failure to the entire query, and cancel all other parts.
|
||||
# Alternatives
|
||||
## Spill
|
||||
If only consider the ability of processing large dataset, we can enable DataFusion's spill ability to temporary persist intermediate data into disk, like the "swap" memory. But this will lead to a super slow performance and very large write amplification.
|
||||
# Future Work
|
||||
As described in the `Motivation` section we can further explore the distributed planner on the physical execution level, by introducing mechanism like Spark's shuffle to improve parallelism and reduce intermediate pipeline breaker's stage.
|
||||
@@ -1,527 +0,0 @@
|
||||
# Schema Structs
|
||||
|
||||
# Common Schemas
|
||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
||||
|
||||
## ColumnSchema
|
||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
||||
|
||||
```rust
|
||||
pub struct ColumnSchema {
|
||||
pub name: String,
|
||||
pub data_type: ConcreteDataType,
|
||||
is_nullable: bool,
|
||||
is_time_index: bool,
|
||||
default_constraint: Option<ColumnDefaultConstraint>,
|
||||
metadata: Metadata,
|
||||
}
|
||||
```
|
||||
|
||||
## Schema
|
||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
||||
|
||||
```rust
|
||||
use arrow::datatypes::Schema as ArrowSchema;
|
||||
|
||||
pub struct Schema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
name_to_index: HashMap<String, usize>,
|
||||
arrow_schema: Arc<ArrowSchema>,
|
||||
timestamp_index: Option<usize>,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
pub type SchemaRef = Arc<Schema>;
|
||||
```
|
||||
|
||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
||||
|
||||
## RawSchema
|
||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
||||
|
||||
```rust
|
||||
pub struct RawSchema {
|
||||
pub column_schemas: Vec<ColumnSchema>,
|
||||
pub timestamp_index: Option<usize>,
|
||||
pub version: u32,
|
||||
}
|
||||
```
|
||||
|
||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
||||
|
||||
# Schema of the Table
|
||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
||||
```rust
|
||||
pub struct TableMeta {
|
||||
pub schema: SchemaRef,
|
||||
pub primary_key_indices: Vec<usize>,
|
||||
pub value_indices: Vec<usize>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
||||
|
||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
||||
|
||||
Suppose we create a table with the following SQL
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"host",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"datacenter"
|
||||
],
|
||||
"time_index":0,
|
||||
"version":0
|
||||
},
|
||||
"primary_key_indices":[
|
||||
4,
|
||||
1
|
||||
],
|
||||
"value_indices":[
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Schemas of the storage engine
|
||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
||||
|
||||
The storage engine maintains schemas of regions in more complicated ways because it
|
||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
||||
- maintains additional metadata like column id or column family
|
||||
|
||||
So the storage engine defines several schema structs:
|
||||
- RegionSchema
|
||||
- StoreSchema
|
||||
- ProjectedSchema
|
||||
|
||||
## RegionSchema
|
||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
||||
|
||||
```rust
|
||||
pub struct RegionSchema {
|
||||
user_schema: SchemaRef,
|
||||
store_schema: StoreSchemaRef,
|
||||
columns: ColumnsMetadataRef,
|
||||
}
|
||||
```
|
||||
|
||||
Each region reserves some columns called `internal columns` for internal usage:
|
||||
- `__sequence`, sequence number of a row
|
||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
||||
|
||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
||||
- User schema, a `Schema` struct that doesn't have internal columns
|
||||
- Store schema, a `StoreSchema` struct that has internal columns
|
||||
|
||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
||||
|
||||
`RegionSchema` organizes columns in the following order:
|
||||
```
|
||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
We can ignore the `__version` column because it is disabled now:
|
||||
|
||||
```
|
||||
key columns, timestamp, value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
||||
|
||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
"store_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## StoreSchema
|
||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
||||
```rust
|
||||
struct StoreSchema {
|
||||
columns: Vec<ColumnMetadata>,
|
||||
schema: SchemaRef,
|
||||
row_key_end: usize,
|
||||
user_column_end: usize,
|
||||
}
|
||||
```
|
||||
|
||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
||||
|
||||
The `StoreSchema` of the region above is similar to this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
}
|
||||
```
|
||||
|
||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
||||
```rust
|
||||
impl StoreSchema {
|
||||
#[inline]
|
||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
||||
0..self.row_key_end
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
||||
self.row_key_end..self.user_column_end
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
||||
|
||||
## ProjectedSchema
|
||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
||||
```rust
|
||||
pub struct ProjectedSchema {
|
||||
projection: Option<Projection>,
|
||||
schema_to_read: StoreSchemaRef,
|
||||
projected_user_schema: SchemaRef,
|
||||
}
|
||||
```
|
||||
|
||||
We need to handle many cases while doing projection:
|
||||
- The columns' order of table and region is different
|
||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
||||
- We support `ALTER TABLE` so data files may have different schemas.
|
||||
|
||||
### Projection
|
||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
||||
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
||||
|
||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
||||
|
||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
||||
|
||||
So we can construct the following `ProjectedSchema`:
|
||||
```json
|
||||
{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":4
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
||||
|
||||
### ReadAdapter
|
||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
||||
|
||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
||||
|
||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
||||
```rust
|
||||
#[derive(Debug)]
|
||||
pub struct ReadAdapter {
|
||||
source_schema: StoreSchemaRef,
|
||||
dest_schema: ProjectedSchemaRef,
|
||||
indices_in_result: Vec<Option<usize>>,
|
||||
is_source_needed: Vec<bool>,
|
||||
}
|
||||
```
|
||||
|
||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
||||
|
||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
||||
|
||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
||||
```sql
|
||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
||||
```
|
||||
|
||||
The new `StoreSchema` becomes:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":6
|
||||
}
|
||||
```
|
||||
|
||||
Note that we bump the version of the schema to 1.
|
||||
|
||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
||||
```json
|
||||
{
|
||||
"source_schema":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"dest_schema":{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
},
|
||||
"indices_in_result":[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
null,
|
||||
4,
|
||||
5
|
||||
],
|
||||
"is_source_needed":[
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
||||
|
||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
||||
|
||||
```text
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ store_schema │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema │ │
|
||||
│ │ version 1 │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ user_schema │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ RegionSchema │
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
┌──────────────▼───────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ │
|
||||
│ │ schema_to_read │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema (projected) │ │
|
||||
│ │ version 1 │ │
|
||||
│ └──────────────────────────┘ │
|
||||
┌───┤ ├───┐
|
||||
│ │ ┌──────────────────────────┐ │ │
|
||||
│ │ │ projected_user_schema │ │ │
|
||||
│ │ └──────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ProjectedSchema │ │
|
||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
||||
│ │
|
||||
│ │
|
||||
┌──────▼───────┐ ┌───────▼──────┐
|
||||
│ │ │ │
|
||||
│ ReadAdapter │ │ ReadAdapter │
|
||||
│ │ │ │
|
||||
└──────▲───────┘ └───────▲──────┘
|
||||
│ │
|
||||
│ │
|
||||
source schema │ │ source schema
|
||||
│ │
|
||||
┌───────┴─────────┐ ┌────────┴────────┐
|
||||
│ │ │ │
|
||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ version 0 │ │ │ │ version 1 │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ │ └─────────────┘ │
|
||||
│ │ │ │
|
||||
│ SST 0 │ │ SST 1 │
|
||||
│ │ │ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
# Conversion
|
||||
This figure shows the conversion between schemas:
|
||||
```text
|
||||
┌─────────────┐ schema From ┌─────────────┐
|
||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
||||
│ TableMeta │ │ │ │ RawSchema │
|
||||
│ │ │ │ ┌─────────────────────────┤ │
|
||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
||||
│ │ │ ├─────────────────────► │
|
||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
||||
│ │ │ │ ◄─────────────────────┤ │ │
|
||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ └────────────────────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ columns │ user_schema() │ │ │
|
||||
│ │ │ │ projected_user_schema() schema() │
|
||||
│ │ │ │ │ │
|
||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
||||
│ │ ├─────────────────────────► │ │ │
|
||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
||||
│ │ └─────────────────────────────────────────► │ │
|
||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
||||
│ ◄──────────────────────────────────────────────┤ │
|
||||
└─────────────────────────┘ columns └───────────────┘
|
||||
```
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-05-03"
|
||||
channel = "nightly-2023-02-26"
|
||||
|
||||
@@ -7,12 +7,9 @@ set -e
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
OUT_DIR="${1:-$SCRIPT_DIR}"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
|
||||
echo "Downloading assets to dir: $OUT_DIR"
|
||||
cd $OUT_DIR
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
|
||||
@@ -51,17 +51,13 @@ get_os_type
|
||||
get_arch_type
|
||||
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest nightly version.
|
||||
if [ "${VERSION}" = "latest" ]; then
|
||||
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest version."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
if [ "${VERSION}" = "latest" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest/download/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
else
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
fi
|
||||
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto.workspace = true
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0bebe5f69c91cdfbce85cb8f45f9fcd28185261c" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -41,7 +41,7 @@ pub enum Error {
|
||||
))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -52,7 +52,7 @@ pub enum Error {
|
||||
))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -18,10 +18,6 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::{DdlRequest, QueryRequest};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -228,38 +224,6 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
|
||||
/// Returns the type name of the [Request].
|
||||
pub fn request_type(request: &Request) -> &'static str {
|
||||
match request {
|
||||
Request::Inserts(_) => "inserts",
|
||||
Request::Query(query_req) => query_request_type(query_req),
|
||||
Request::Ddl(ddl_req) => ddl_request_type(ddl_req),
|
||||
Request::Delete(_) => "delete",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the type name of the [QueryRequest].
|
||||
fn query_request_type(request: &QueryRequest) -> &'static str {
|
||||
match request.query {
|
||||
Some(Query::Sql(_)) => "query.sql",
|
||||
Some(Query::LogicalPlan(_)) => "query.logical_plan",
|
||||
Some(Query::PromRangeQuery(_)) => "query.prom_range",
|
||||
None => "query.empty",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the type name of the [DdlRequest].
|
||||
fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
match request.expr {
|
||||
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
|
||||
Some(Expr::CreateTable(_)) => "ddl.create_table",
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::FlushTable(_)) => "ddl.flush_table",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -23,5 +23,4 @@ pub mod prometheus {
|
||||
|
||||
pub mod v1;
|
||||
|
||||
pub use greptime_proto;
|
||||
pub use prost::DecodeError;
|
||||
|
||||
@@ -4,9 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
arc-swap = "1.0"
|
||||
@@ -17,7 +14,6 @@ backoff = { version = "0.4", features = ["tokio"] }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
@@ -28,11 +24,8 @@ datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
key-lock = "0.1"
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
@@ -40,12 +33,10 @@ serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
storage = { path = "../storage" }
|
||||
store-api = { path = "../store-api" }
|
||||
table = { path = "../table" }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
catalog = { path = ".", features = ["testing"] }
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
|
||||
@@ -12,4 +12,4 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod engine;
|
||||
pub mod catalog_adapter;
|
||||
318
src/catalog/src/datafusion/catalog_adapter.rs
Normal file
318
src/catalog/src/datafusion/catalog_adapter.rs
Normal file
@@ -0,0 +1,318 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Catalog adapter between datafusion and greptime query engine.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use datafusion::catalog::catalog::{
|
||||
CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider,
|
||||
};
|
||||
use datafusion::catalog::schema::SchemaProvider as DfSchemaProvider;
|
||||
use datafusion::datasource::TableProvider as DfTableProvider;
|
||||
use datafusion::error::Result as DataFusionResult;
|
||||
use snafu::ResultExt;
|
||||
use table::table::adapter::{DfTableProviderAdapter, TableAdapter};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, Result, SchemaProviderOperationSnafu};
|
||||
use crate::{
|
||||
CatalogListRef, CatalogProvider, CatalogProviderRef, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
pub struct DfCatalogListAdapter {
|
||||
catalog_list: CatalogListRef,
|
||||
}
|
||||
|
||||
impl DfCatalogListAdapter {
|
||||
pub fn new(catalog_list: CatalogListRef) -> DfCatalogListAdapter {
|
||||
DfCatalogListAdapter { catalog_list }
|
||||
}
|
||||
}
|
||||
|
||||
impl DfCatalogList for DfCatalogListAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: Arc<dyn DfCatalogProvider>,
|
||||
) -> Option<Arc<dyn DfCatalogProvider>> {
|
||||
let catalog_adapter = Arc::new(CatalogProviderAdapter {
|
||||
df_catalog_provider: catalog,
|
||||
});
|
||||
self.catalog_list
|
||||
.register_catalog(name, catalog_adapter)
|
||||
.expect("datafusion does not accept fallible catalog access") // TODO(hl): datafusion register catalog does not handles errors
|
||||
.map(|catalog_provider| Arc::new(DfCatalogProviderAdapter { catalog_provider }) as _)
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Vec<String> {
|
||||
// TODO(hl): datafusion register catalog does not handles errors
|
||||
self.catalog_list
|
||||
.catalog_names()
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Option<Arc<dyn DfCatalogProvider>> {
|
||||
self.catalog_list
|
||||
.catalog(name)
|
||||
.expect("datafusion does not accept fallible catalog access") // TODO(hl): datafusion register catalog does not handles errors
|
||||
.map(|catalog_provider| Arc::new(DfCatalogProviderAdapter { catalog_provider }) as _)
|
||||
}
|
||||
}
|
||||
|
||||
/// Datafusion's CatalogProvider -> greptime CatalogProvider
|
||||
struct CatalogProviderAdapter {
|
||||
df_catalog_provider: Arc<dyn DfCatalogProvider>,
|
||||
}
|
||||
|
||||
impl CatalogProvider for CatalogProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.df_catalog_provider.schema_names())
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
&self,
|
||||
_name: String,
|
||||
_schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
todo!("register_schema is not supported in Datafusion catalog provider")
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
Ok(self
|
||||
.df_catalog_provider
|
||||
.schema(name)
|
||||
.map(|df_schema_provider| Arc::new(SchemaProviderAdapter { df_schema_provider }) as _))
|
||||
}
|
||||
}
|
||||
|
||||
///Greptime CatalogProvider -> datafusion's CatalogProvider
|
||||
struct DfCatalogProviderAdapter {
|
||||
catalog_provider: CatalogProviderRef,
|
||||
}
|
||||
|
||||
impl DfCatalogProvider for DfCatalogProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
self.catalog_provider
|
||||
.schema_names()
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Option<Arc<dyn DfSchemaProvider>> {
|
||||
self.catalog_provider
|
||||
.schema(name)
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
.map(|schema_provider| Arc::new(DfSchemaProviderAdapter { schema_provider }) as _)
|
||||
}
|
||||
}
|
||||
|
||||
/// Greptime SchemaProvider -> datafusion SchemaProvider
|
||||
struct DfSchemaProviderAdapter {
|
||||
schema_provider: Arc<dyn SchemaProvider>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DfSchemaProvider for DfSchemaProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
self.schema_provider
|
||||
.table_names()
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Option<Arc<dyn DfTableProvider>> {
|
||||
self.schema_provider
|
||||
.table(name)
|
||||
.await
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
.map(|table| Arc::new(DfTableProviderAdapter::new(table)) as _)
|
||||
}
|
||||
|
||||
fn register_table(
|
||||
&self,
|
||||
name: String,
|
||||
table: Arc<dyn DfTableProvider>,
|
||||
) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
|
||||
let table = Arc::new(TableAdapter::new(table)?);
|
||||
match self.schema_provider.register_table(name, table)? {
|
||||
Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
|
||||
match self.schema_provider.deregister_table(name)? {
|
||||
Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> bool {
|
||||
self.schema_provider
|
||||
.table_exist(name)
|
||||
.expect("datafusion does not accept fallible catalog access")
|
||||
}
|
||||
}
|
||||
|
||||
/// Datafusion SchemaProviderAdapter -> greptime SchemaProviderAdapter
|
||||
struct SchemaProviderAdapter {
|
||||
df_schema_provider: Arc<dyn DfSchemaProvider>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for SchemaProviderAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
/// Retrieves the list of available table names in this schema.
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.df_schema_provider.table_names())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table = self.df_schema_provider.table(name).await;
|
||||
let table = table.map(|table_provider| {
|
||||
match table_provider
|
||||
.as_any()
|
||||
.downcast_ref::<DfTableProviderAdapter>()
|
||||
{
|
||||
Some(adapter) => adapter.table(),
|
||||
None => {
|
||||
// TODO(yingwen): Avoid panic here.
|
||||
let adapter =
|
||||
TableAdapter::new(table_provider).expect("convert datafusion table");
|
||||
Arc::new(adapter) as _
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let table_provider = Arc::new(DfTableProviderAdapter::new(table.clone()));
|
||||
Ok(self
|
||||
.df_schema_provider
|
||||
.register_table(name, table_provider)
|
||||
.context(error::DatafusionSnafu {
|
||||
msg: "Fail to register table to datafusion",
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(SchemaProviderOperationSnafu)?
|
||||
.map(|_| table))
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
self.df_schema_provider
|
||||
.deregister_table(name)
|
||||
.context(error::DatafusionSnafu {
|
||||
msg: "Fail to deregister table from datafusion",
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(SchemaProviderOperationSnafu)?
|
||||
.map(|table| {
|
||||
let adapter = TableAdapter::new(table)
|
||||
.context(error::TableSchemaMismatchSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(SchemaProviderOperationSnafu)?;
|
||||
Ok(Arc::new(adapter) as _)
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
Ok(self.df_schema_provider.table_exist(name))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use table::table::numbers::NumbersTable;
|
||||
|
||||
use super::*;
|
||||
use crate::local::{new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
pub fn test_register_schema() {
|
||||
let adapter = CatalogProviderAdapter {
|
||||
df_catalog_provider: Arc::new(
|
||||
datafusion::catalog::catalog::MemoryCatalogProvider::new(),
|
||||
),
|
||||
};
|
||||
|
||||
adapter
|
||||
.register_schema(
|
||||
"whatever".to_string(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_table() {
|
||||
let adapter = DfSchemaProviderAdapter {
|
||||
schema_provider: Arc::new(MemorySchemaProvider::new()),
|
||||
};
|
||||
|
||||
adapter
|
||||
.register_table(
|
||||
"test_table".to_string(),
|
||||
Arc::new(DfTableProviderAdapter::new(Arc::new(
|
||||
NumbersTable::default(),
|
||||
))),
|
||||
)
|
||||
.unwrap();
|
||||
adapter.table("test_table").await.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_catalog() {
|
||||
let catalog_list = DfCatalogListAdapter {
|
||||
catalog_list: new_memory_catalog_list().unwrap(),
|
||||
};
|
||||
assert!(catalog_list
|
||||
.register_catalog(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(DfCatalogProviderAdapter {
|
||||
catalog_provider: Arc::new(MemoryCatalogProvider::new()),
|
||||
}),
|
||||
)
|
||||
.is_none());
|
||||
|
||||
catalog_list.catalog("test_catalog").unwrap();
|
||||
}
|
||||
}
|
||||
@@ -20,30 +20,21 @@ use common_error::prelude::{Snafu, StatusCode};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::Location;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Failed to re-compile script due to internal error, source: {}",
|
||||
source
|
||||
))]
|
||||
CompileScriptInternal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table, source: {}", source))]
|
||||
OpenSystemCatalog {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table, source: {}", source))]
|
||||
CreateSystemCatalog {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -54,7 +45,7 @@ pub enum Error {
|
||||
))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -94,7 +85,7 @@ pub enum Error {
|
||||
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -114,7 +105,7 @@ pub enum Error {
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Table not found: {}", table))]
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
TableNotExist { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
@@ -132,13 +123,10 @@ pub enum Error {
|
||||
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
|
||||
OpenTable {
|
||||
table_info: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel, source: {}", source))]
|
||||
ParallelOpenTable { source: JoinError },
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
@@ -147,13 +135,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch, source: {}", source))]
|
||||
CreateRecordBatch {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
@@ -162,7 +150,7 @@ pub enum Error {
|
||||
source
|
||||
))]
|
||||
InsertCatalogRecord {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -173,7 +161,7 @@ pub enum Error {
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
@@ -182,49 +170,69 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
|
||||
SystemCatalogTableScan {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
Internal {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to upgrade weak catalog manager reference. location: {}",
|
||||
location
|
||||
))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
SystemCatalogTableScanExec {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value, source: {}", source))]
|
||||
InvalidCatalogValue {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
MetaSrv {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog, source: {}", source))]
|
||||
InvalidTableInfoInCatalog {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
|
||||
CatalogEntrySerde {
|
||||
#[snafu(backtrace)]
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
source
|
||||
))]
|
||||
RegionStats {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system table definition: {err_msg}"))]
|
||||
InvalidSystemTableDef { err_msg: String, location: Location },
|
||||
|
||||
@@ -237,18 +245,9 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Table schema mismatch, source: {}", source))]
|
||||
TableSchemaMismatch {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error: {}", source))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -262,21 +261,20 @@ impl ErrorExt for Error {
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::InvalidSystemTableDef { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidSystemTableDef { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
@@ -290,21 +288,20 @@ impl ErrorExt for Error {
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. }
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
| Error::RegionStats { source, .. }
|
||||
| Error::TableSchemaMismatch { source } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,6 @@ pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
}
|
||||
|
||||
/// Global table info has only one key across all datanodes so it does not have `node_id` field.
|
||||
pub fn build_table_global_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
@@ -79,7 +78,6 @@ pub fn build_table_global_prefix(
|
||||
)
|
||||
}
|
||||
|
||||
/// Regional table info varies between datanode, so it contains a `node_id` field.
|
||||
pub fn build_table_regional_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
@@ -93,7 +91,7 @@ pub fn build_table_regional_prefix(
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone, Hash, Eq, PartialEq)]
|
||||
#[derive(Clone)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
@@ -126,14 +124,6 @@ impl TableGlobalKey {
|
||||
table_name: captures[3].to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_raw_key(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
pub fn try_from_raw_key(key: &[u8]) -> Result<Self, Error> {
|
||||
Self::parse(String::from_utf8_lossy(key))
|
||||
}
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
@@ -151,10 +141,6 @@ impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
|
||||
pub fn engine(&self) -> &str {
|
||||
&self.table_info.meta.engine
|
||||
}
|
||||
}
|
||||
|
||||
/// Table regional info that varies between datanode, so it contains a `node_id` field.
|
||||
@@ -203,12 +189,8 @@ impl TableRegionalKey {
|
||||
/// region ids allocated by metasrv.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct TableRegionalValue {
|
||||
// We can remove the `Option` from the table id once all regional values
|
||||
// stored in meta have table ids.
|
||||
pub table_id: Option<TableId>,
|
||||
pub version: TableVersion,
|
||||
pub regions_ids: Vec<u32>,
|
||||
pub engine_name: Option<String>,
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
@@ -392,6 +374,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
let _ = TableGlobalValue::parse(s).unwrap();
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,126 +12,69 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod columns;
|
||||
mod tables;
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use datafusion::datasource::streaming::{PartitionStream, StreamingTable};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::{Result as TableResult, Table, TableRef};
|
||||
use table::table::adapter::TableAdapter;
|
||||
use table::TableRef;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::error::{DatafusionSnafu, Result, TableSchemaMismatchSnafu};
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::CatalogManager;
|
||||
use crate::{CatalogProviderRef, SchemaProvider};
|
||||
|
||||
const TABLES: &str = "tables";
|
||||
const COLUMNS: &str = "columns";
|
||||
|
||||
pub struct InformationSchemaProvider {
|
||||
pub(crate) struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
pub(crate) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_provider,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let stream_builder = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
COLUMNS => Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _,
|
||||
_ => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some(Arc::new(InformationTable::new(stream_builder))))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ruihang): make it a more generic trait:
|
||||
// https://github.com/GreptimeTeam/greptimedb/pull/1639#discussion_r1205001903
|
||||
pub trait InformationStreamBuilder: Send + Sync {
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
}
|
||||
|
||||
pub struct InformationTable {
|
||||
stream_builder: Arc<dyn InformationStreamBuilder>,
|
||||
}
|
||||
|
||||
impl InformationTable {
|
||||
pub fn new(stream_builder: Arc<dyn InformationStreamBuilder>) -> Self {
|
||||
Self { stream_builder }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Table for InformationTable {
|
||||
impl SchemaProvider for InformationSchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.stream_builder.schema()
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(vec![TABLES.to_string()])
|
||||
}
|
||||
|
||||
fn table_info(&self) -> table::metadata::TableInfoRef {
|
||||
unreachable!("Should not call table_info() of InformationTable directly")
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
let projection = request.projection;
|
||||
let projected_schema = if let Some(projection) = &projection {
|
||||
Arc::new(
|
||||
self.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)?,
|
||||
)
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table = if name.eq_ignore_ascii_case(TABLES) {
|
||||
Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
))
|
||||
} else {
|
||||
self.schema()
|
||||
return Ok(None);
|
||||
};
|
||||
let stream = self
|
||||
.stream_builder
|
||||
.to_stream()
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)?
|
||||
.map(move |batch| {
|
||||
batch.and_then(|batch| {
|
||||
if let Some(projection) = &projection {
|
||||
batch.try_project(projection)
|
||||
} else {
|
||||
Ok(batch)
|
||||
}
|
||||
})
|
||||
});
|
||||
let stream = RecordBatchStreamAdaptor {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
};
|
||||
Ok(Box::pin(stream))
|
||||
|
||||
let table = Arc::new(
|
||||
StreamingTable::try_new(table.schema().clone(), vec![table]).with_context(|_| {
|
||||
DatafusionSnafu {
|
||||
msg: format!("Failed to get InformationSchema table '{name}'"),
|
||||
}
|
||||
})?,
|
||||
);
|
||||
let table = TableAdapter::new(table).context(TableSchemaMismatchSnafu)?;
|
||||
Ok(Some(Arc::new(table)))
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
Ok(matches!(name.to_ascii_lowercase().as_str(), TABLES))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use super::InformationStreamBuilder;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const COLUMN_NAME: &str = "column_name";
|
||||
const DATA_TYPE: &str = "data_type";
|
||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaColumnsBuilder {
|
||||
InformationSchemaColumnsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationStreamBuilder for InformationSchemaColumns {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaColumnsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
column_names: StringVectorBuilder,
|
||||
data_types: StringVectorBuilder,
|
||||
semantic_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaColumnsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
column_names: StringVectorBuilder::with_capacity(42),
|
||||
data_types: StringVectorBuilder::with_capacity(42),
|
||||
semantic_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
} else if keys.contains(&idx) {
|
||||
SEMANTIC_TYPE_PRIMARY_KEY
|
||||
} else {
|
||||
SEMANTIC_TYPE_FIELD
|
||||
};
|
||||
self.add_column(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_column(
|
||||
&mut self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
column_name: &str,
|
||||
data_type: &str,
|
||||
semantic_type: &str,
|
||||
) {
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.column_names.push(Some(column_name));
|
||||
self.data_types.push(Some(data_type));
|
||||
self.semantic_types.push(Some(semantic_type));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.column_names.finish()),
|
||||
Arc::new(self.data_types.finish()),
|
||||
Arc::new(self.semantic_types.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaColumns {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -12,49 +12,43 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use datatypes::vectors::StringVectorBuilder;
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationStreamBuilder;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, Result};
|
||||
use crate::information_schema::TABLES;
|
||||
use crate::CatalogProviderRef;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_provider,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,108 +56,62 @@ impl InformationSchemaTables {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationStreamBuilder for InformationSchemaTables {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.TABLE` table row by row
|
||||
///
|
||||
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
|
||||
struct InformationSchemaTablesBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_provider,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
table_types: StringVectorBuilder::with_capacity(42),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(42),
|
||||
engines: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
for schema_name in self.catalog_provider.schema_names()? {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
if !catalog_manager
|
||||
.schema_exist(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let Some(table) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await? else { continue };
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name)? else { continue };
|
||||
for table_name in schema.table_names()? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
self.add_table(&catalog_name, &schema_name, &table_name, table.table_type());
|
||||
}
|
||||
}
|
||||
|
||||
// Add a final list for the information schema tables themselves
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
INFORMATION_SCHEMA_NAME,
|
||||
TABLES,
|
||||
TableType::View,
|
||||
);
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
@@ -173,8 +121,6 @@ impl InformationSchemaTablesBuilder {
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
table_type: TableType,
|
||||
table_id: Option<u32>,
|
||||
engine: Option<&str>,
|
||||
) {
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
@@ -184,8 +130,6 @@ impl InformationSchemaTablesBuilder {
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
}));
|
||||
self.table_ids.push(table_id);
|
||||
self.engines.push(engine);
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -194,8 +138,6 @@ impl InformationSchemaTablesBuilder {
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
@@ -207,7 +149,7 @@ impl DfPartitionStream for InformationSchemaTables {
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let schema = self.schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
|
||||
@@ -12,12 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(trait_upcasting)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -30,46 +27,80 @@ use table::requests::CreateTableRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
|
||||
|
||||
pub mod datafusion;
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod information_schema;
|
||||
pub(crate) mod information_schema;
|
||||
pub mod local;
|
||||
mod metrics;
|
||||
pub mod remote;
|
||||
pub mod schema;
|
||||
pub mod system;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
/// Represent a list of named catalogs
|
||||
pub trait CatalogList: Sync + Send {
|
||||
/// Returns the catalog list as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Adds a new catalog to this catalog list
|
||||
/// If a catalog of the same name existed before, it is replaced in the list and returned.
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>>;
|
||||
|
||||
/// Retrieves the list of available catalog names
|
||||
fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific catalog by name, provided it exists.
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>>;
|
||||
}
|
||||
|
||||
/// Represents a catalog, comprising a number of named schemas.
|
||||
pub trait CatalogProvider: Sync + Send {
|
||||
/// Returns the catalog provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available schema names in this catalog.
|
||||
fn schema_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Registers schema to this catalog.
|
||||
fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Retrieves a specific schema from the catalog by name, provided it exists.
|
||||
fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogListRef = Arc<dyn CatalogList>;
|
||||
pub type CatalogProviderRef = Arc<dyn CatalogProvider>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: CatalogList {
|
||||
/// Starts a catalog manager.
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
|
||||
async fn register_catalog(&self, name: String) -> Result<bool>;
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Deregisters a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table deregistered.
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Register a schema with catalog name and schema name. Retuens whether the
|
||||
/// schema registered.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This method will/should fail if catalog not exist
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This method will/should fail if catalog or schema not exist
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
|
||||
/// Deregisters a table within given catalog/schema to catalog manager
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()>;
|
||||
|
||||
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
|
||||
|
||||
@@ -77,17 +108,7 @@ pub trait CatalogManager: Send + Sync {
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
-> error::Result<()>;
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
|
||||
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
|
||||
|
||||
async fn catalog_exist(&self, catalog: &str) -> Result<bool>;
|
||||
|
||||
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool>;
|
||||
|
||||
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
async fn table(
|
||||
@@ -155,6 +176,14 @@ pub struct RegisterSchemaRequest {
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
|
||||
pub trait SchemaProviderFactory {
|
||||
fn create(&self, catalog_name: String, schema_name: String) -> SchemaProviderRef;
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
manager: &'a M,
|
||||
engine: TableEngineRef,
|
||||
@@ -180,7 +209,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
table_name,
|
||||
),
|
||||
})?;
|
||||
let _ = manager
|
||||
manager
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
@@ -209,19 +238,21 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names().await else { return (region_number, region_stats) };
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else { continue };
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
|
||||
|
||||
let Ok(schema_names) = catalog.schema_names() else { continue };
|
||||
for schema_name in schema_names {
|
||||
let Ok(table_names) = catalog_manager.table_names(&catalog_name,&schema_name).await else { continue };
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
|
||||
|
||||
let Ok(table_names) = schema.table_names() else { continue };
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = catalog_manager.table(&catalog_name, &schema_name, &table_name).await else { continue };
|
||||
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
let engine = &table.table_info().meta.engine;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
@@ -232,7 +263,6 @@ pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<Reg
|
||||
table_name: table_name.clone(),
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
attrs: HashMap::from([("engine_name".to_owned(), engine.clone())]),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
|
||||
@@ -16,4 +16,6 @@ pub mod manager;
|
||||
pub mod memory;
|
||||
|
||||
pub use manager::LocalCatalogManager;
|
||||
pub use memory::{new_memory_catalog_manager, MemoryCatalogManager};
|
||||
pub use memory::{
|
||||
new_memory_catalog_list, MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider,
|
||||
};
|
||||
|
||||
@@ -18,8 +18,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
MITO_ENGINE, NUMBERS_TABLE_ID, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
|
||||
SYSTEM_CATALOG_TABLE_NAME,
|
||||
MITO_ENGINE, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
@@ -27,13 +26,12 @@ use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, UInt8Vector};
|
||||
use futures_util::lock::Mutex;
|
||||
use metrics::increment_gauge;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::manager::TableEngineManagerRef;
|
||||
use table::engine::EngineContext;
|
||||
use table::metadata::TableId;
|
||||
use table::requests::OpenTableRequest;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -43,16 +41,16 @@ use crate::error::{
|
||||
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
|
||||
TableNotFoundSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::local::memory::MemoryCatalogManager;
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
|
||||
VALUE_INDEX,
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
@@ -75,11 +73,11 @@ impl LocalCatalogManager {
|
||||
engine_name: MITO_ENGINE,
|
||||
})?;
|
||||
let table = SystemCatalogTable::new(engine.clone()).await?;
|
||||
let memory_catalog_manager = crate::local::memory::new_memory_catalog_manager()?;
|
||||
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
|
||||
let system_catalog = Arc::new(SystemCatalog::new(table));
|
||||
Ok(Self {
|
||||
system: system_catalog,
|
||||
catalogs: memory_catalog_manager,
|
||||
catalogs: memory_catalog_list,
|
||||
engine_manager,
|
||||
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
init_lock: Mutex::new(false),
|
||||
@@ -90,7 +88,7 @@ impl LocalCatalogManager {
|
||||
|
||||
/// Scan all entries from system catalog table
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
self.init_system_catalog().await?;
|
||||
self.init_system_catalog()?;
|
||||
let system_records = self.system.information_schema.system.records().await?;
|
||||
let entries = self.collect_system_catalog_entries(system_records).await?;
|
||||
let max_table_id = self.handle_system_catalog_entries(entries).await?;
|
||||
@@ -116,48 +114,27 @@ impl LocalCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn init_system_catalog(&self) -> Result<()> {
|
||||
// register SystemCatalogTable
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
|
||||
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
table: self.system.information_schema.system.clone(),
|
||||
};
|
||||
let _ = self.catalogs.register_table(register_table_req).await?;
|
||||
fn init_system_catalog(&self) -> Result<()> {
|
||||
let system_schema = Arc::new(MemorySchemaProvider::new());
|
||||
system_schema.register_table(
|
||||
SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
self.system.information_schema.system.clone(),
|
||||
)?;
|
||||
let system_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
system_catalog.register_schema(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
|
||||
|
||||
// register default catalog and default schema
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
|
||||
let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})?;
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
let default_schema = Arc::new(MemorySchemaProvider::new());
|
||||
|
||||
// Add numbers table for test
|
||||
let numbers_table = Arc::new(NumbersTable::default());
|
||||
let register_number_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: NUMBERS_TABLE_NAME.to_string(),
|
||||
table_id: NUMBERS_TABLE_ID,
|
||||
table: numbers_table,
|
||||
};
|
||||
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_table(register_number_table_req)
|
||||
.await?;
|
||||
let table = Arc::new(NumbersTable::default());
|
||||
default_schema.register_table("numbers".to_string(), table)?;
|
||||
|
||||
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -229,26 +206,29 @@ impl LocalCatalogManager {
|
||||
for entry in entries {
|
||||
match entry {
|
||||
Entry::Catalog(c) => {
|
||||
let _ = self
|
||||
.catalogs
|
||||
.register_catalog_if_absent(c.catalog_name.clone());
|
||||
self.catalogs.register_catalog_if_absent(
|
||||
c.catalog_name.clone(),
|
||||
Arc::new(MemoryCatalogProvider::new()),
|
||||
);
|
||||
info!("Register catalog: {}", c.catalog_name);
|
||||
}
|
||||
Entry::Schema(s) => {
|
||||
let req = RegisterSchemaRequest {
|
||||
catalog: s.catalog_name.clone(),
|
||||
schema: s.schema_name.clone(),
|
||||
};
|
||||
let _ = self.catalogs.register_schema_sync(req)?;
|
||||
let catalog =
|
||||
self.catalogs
|
||||
.catalog(&s.catalog_name)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &s.catalog_name,
|
||||
})?;
|
||||
catalog.register_schema(
|
||||
s.schema_name.clone(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)?;
|
||||
info!("Registered schema: {:?}", s);
|
||||
}
|
||||
Entry::Table(t) => {
|
||||
max_table_id = max_table_id.max(t.table_id);
|
||||
if t.is_deleted {
|
||||
continue;
|
||||
}
|
||||
self.open_and_register_table(&t).await?;
|
||||
info!("Registered table: {:?}", t);
|
||||
max_table_id = max_table_id.max(t.table_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -263,16 +243,25 @@ impl LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
|
||||
self.check_catalog_schema_exist(&t.catalog_name, &t.schema_name)
|
||||
.await?;
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(&t.catalog_name)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &t.catalog_name,
|
||||
})?;
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
})?;
|
||||
|
||||
let context = EngineContext {};
|
||||
let open_request = OpenTableRequest {
|
||||
let request = OpenTableRequest {
|
||||
catalog_name: t.catalog_name.clone(),
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let engine = self
|
||||
.engine_manager
|
||||
@@ -281,8 +270,8 @@ impl LocalCatalogManager {
|
||||
engine_name: &t.engine,
|
||||
})?;
|
||||
|
||||
let table_ref = engine
|
||||
.open_table(&context, open_request)
|
||||
let option = engine
|
||||
.open_table(&context, request)
|
||||
.await
|
||||
.with_context(|_| OpenTableSnafu {
|
||||
table_info: format!(
|
||||
@@ -297,49 +286,34 @@ impl LocalCatalogManager {
|
||||
),
|
||||
})?;
|
||||
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: t.catalog_name.clone(),
|
||||
schema: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
table: table_ref,
|
||||
};
|
||||
let _ = self.catalogs.register_table(register_request).await?;
|
||||
|
||||
schema.register_table(t.table_name.clone(), option)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_state(&self) -> Result<()> {
|
||||
let started = self.init_lock.lock().await;
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
impl CatalogList for LocalCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn check_catalog_schema_exist(
|
||||
fn register_catalog(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
) -> Result<()> {
|
||||
if !self.catalogs.catalog_exist(catalog_name).await? {
|
||||
return CatalogNotFoundSnafu { catalog_name }.fail()?;
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
self.catalogs.register_catalog(name, catalog)
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
self.catalogs.catalog_names()
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
} else {
|
||||
self.catalogs.catalog(name)
|
||||
}
|
||||
if !self
|
||||
.catalogs
|
||||
.schema_exist(catalog_name, schema_name)
|
||||
.await?
|
||||
{
|
||||
return SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -359,21 +333,32 @@ impl CatalogManager for LocalCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
self.check_state().await?;
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
let catalog_name = request.catalog.clone();
|
||||
let schema_name = request.schema.clone();
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
self.check_catalog_schema_exist(&catalog_name, &schema_name)
|
||||
.await?;
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
if let Some(existing) = self
|
||||
.catalogs
|
||||
.table(&request.catalog, &request.schema, &request.table_name)
|
||||
.await?
|
||||
{
|
||||
if let Some(existing) = schema.table(&request.table_name).await? {
|
||||
if existing.table_info().ident.table_id != request.table_id {
|
||||
error!(
|
||||
"Unexpected table register request: {:?}, existing: {:?}",
|
||||
@@ -382,8 +367,8 @@ impl CatalogManager for LocalCatalogManager {
|
||||
);
|
||||
return TableExistsSnafu {
|
||||
table: format_full_table_name(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
&request.table_name,
|
||||
),
|
||||
}
|
||||
@@ -392,53 +377,57 @@ impl CatalogManager for LocalCatalogManager {
|
||||
// Try to register table with same table id, just ignore.
|
||||
Ok(false)
|
||||
} else {
|
||||
// table does not exist
|
||||
let engine = request.table.table_info().meta.engine.to_string();
|
||||
let table_name = request.table_name.clone();
|
||||
let table_id = request.table_id;
|
||||
let _ = self.catalogs.register_table(request).await?;
|
||||
let _ = self
|
||||
.system
|
||||
// table does not exist
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name,
|
||||
table_id,
|
||||
request.table_name.clone(),
|
||||
request.table_id,
|
||||
engine,
|
||||
)
|
||||
.await?;
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&catalog_name, &schema_name)],
|
||||
);
|
||||
schema.register_table(request.table_name, request.table)?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
self.check_state().await?;
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
self.check_catalog_schema_exist(catalog_name, schema_name)
|
||||
.await?;
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
self.catalogs
|
||||
.table(catalog_name, schema_name, &request.new_table_name)
|
||||
.await?
|
||||
.is_none(),
|
||||
!schema.table_exist(&request.new_table_name)?,
|
||||
TableExistsSnafu {
|
||||
table: &request.new_table_name
|
||||
}
|
||||
);
|
||||
|
||||
let _lock = self.register_lock.lock().await;
|
||||
let old_table = self
|
||||
.catalogs
|
||||
.table(catalog_name, schema_name, &request.table_name)
|
||||
let old_table = schema
|
||||
.table(&request.table_name)
|
||||
.await?
|
||||
.context(TableNotExistSnafu {
|
||||
table: &request.table_name,
|
||||
@@ -446,8 +435,7 @@ impl CatalogManager for LocalCatalogManager {
|
||||
|
||||
let engine = old_table.table_info().meta.engine.to_string();
|
||||
// rename table in system catalog
|
||||
let _ = self
|
||||
.system
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
@@ -457,11 +445,17 @@ impl CatalogManager for LocalCatalogManager {
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.catalogs.rename_table(request).await
|
||||
let renamed = schema
|
||||
.rename_table(&request.table_name, request.new_table_name.clone())
|
||||
.is_ok();
|
||||
Ok(renamed)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
|
||||
self.check_state().await?;
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
@@ -482,58 +476,67 @@ impl CatalogManager for LocalCatalogManager {
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
self.system.deregister_table(&request, table_id).await?;
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.catalogs.deregister_table(request).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
self.check_state().await?;
|
||||
|
||||
let started = self.init_lock.lock().await;
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
if !self.catalogs.catalog_exist(catalog_name).await? {
|
||||
return CatalogNotFoundSnafu { catalog_name }.fail()?;
|
||||
}
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
!self
|
||||
.catalogs
|
||||
.schema_exist(catalog_name, schema_name)
|
||||
.await?,
|
||||
catalog.schema(schema_name)?.is_none(),
|
||||
SchemaExistsSnafu {
|
||||
schema: schema_name,
|
||||
}
|
||||
);
|
||||
let _ = self
|
||||
.system
|
||||
.register_schema(request.catalog.clone(), schema_name.clone())
|
||||
self.system
|
||||
.register_schema(request.catalog, schema_name.clone())
|
||||
.await?;
|
||||
self.catalogs.register_schema_sync(request)
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
self.check_state().await?;
|
||||
|
||||
let catalog_name = request.create_table_request.catalog_name.clone();
|
||||
let schema_name = request.create_table_request.schema_name.clone();
|
||||
ensure!(
|
||||
!*self.init_lock.lock().await,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager already started",
|
||||
}
|
||||
);
|
||||
|
||||
let mut sys_table_requests = self.system_table_requests.lock().await;
|
||||
sys_table_requests.push(request);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&catalog_name, &schema_name)],
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.catalogs.schema_exist(catalog, schema).await
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
self.catalogs
|
||||
.catalog(catalog)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
async fn table(
|
||||
@@ -542,48 +545,17 @@ impl CatalogManager for LocalCatalogManager {
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
let manager: CatalogManagerRef = self.catalogs.clone() as _;
|
||||
let provider =
|
||||
InformationSchemaProvider::new(catalog_name.to_string(), Arc::downgrade(&manager));
|
||||
return provider.table(table_name);
|
||||
}
|
||||
|
||||
self.catalogs
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
|
||||
if catalog.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
|
||||
Ok(true)
|
||||
} else {
|
||||
self.catalogs.catalog_exist(catalog).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
self.catalogs.table_exist(catalog, schema, table).await
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
self.catalogs.catalog_names().await
|
||||
}
|
||||
|
||||
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
|
||||
self.catalogs.schema_names(catalog_name).await
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
|
||||
self.catalogs.table_names(catalog_name, schema_name).await
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
self.catalogs.register_catalog(name).await
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -605,7 +577,6 @@ mod tests {
|
||||
table_name: "T1".to_string(),
|
||||
table_id: 1,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
is_deleted: false,
|
||||
}),
|
||||
Entry::Catalog(CatalogEntry {
|
||||
catalog_name: "C2".to_string(),
|
||||
@@ -627,7 +598,6 @@ mod tests {
|
||||
table_name: "T2".to_string(),
|
||||
table_id: 2,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
is_deleted: false,
|
||||
}),
|
||||
];
|
||||
let res = LocalCatalogManager::sort_entries(vec);
|
||||
|
||||
@@ -18,27 +18,28 @@ use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use metrics::{decrement_gauge, increment_gauge};
|
||||
use snafu::OptionExt;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest,
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
pub struct MemoryCatalogManager {
|
||||
/// Collection of catalogs containing schemas and ultimately Tables
|
||||
pub catalogs: RwLock<HashMap<String, SchemaEntries>>,
|
||||
pub catalogs: RwLock<HashMap<String, CatalogProviderRef>>,
|
||||
pub table_id: AtomicU32,
|
||||
}
|
||||
|
||||
@@ -48,14 +49,13 @@ impl Default for MemoryCatalogManager {
|
||||
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
catalogs: Default::default(),
|
||||
};
|
||||
|
||||
let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
|
||||
let _ = manager
|
||||
.catalogs
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(DEFAULT_CATALOG_NAME.to_string(), catalog);
|
||||
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
manager
|
||||
.register_catalog("greptime".to_string(), default_catalog.clone())
|
||||
.unwrap();
|
||||
default_catalog
|
||||
.register_schema("public".to_string(), Arc::new(MemorySchemaProvider::new()))
|
||||
.unwrap();
|
||||
manager
|
||||
}
|
||||
}
|
||||
@@ -75,79 +75,71 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalog = request.catalog.clone();
|
||||
let schema = request.schema.clone();
|
||||
let result = self.register_table_sync(request);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&catalog, &schema)],
|
||||
);
|
||||
result
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
|
||||
// check old and new table names
|
||||
if !schema.contains_key(&request.table_name) {
|
||||
return TableNotFoundSnafu {
|
||||
table_info: request.table_name.to_string(),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
if schema.contains_key(&request.new_table_name) {
|
||||
return TableExistsSnafu {
|
||||
table: &request.new_table_name,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let table = schema.remove(&request.table_name).unwrap();
|
||||
let _ = schema.insert(request.new_table_name, table);
|
||||
|
||||
Ok(true)
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
let result = schema.remove(&request.table_name);
|
||||
if result.is_some() {
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
.map(|v| v.is_some())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let registered = self.register_schema_sync(request)?;
|
||||
if registered {
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
}
|
||||
Ok(registered)
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
|
||||
@@ -155,16 +147,13 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.contains_key(schema))
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
if let Some(c) = catalogs.get(catalog) {
|
||||
c.schema(schema)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn table(
|
||||
@@ -173,153 +162,197 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let result = try {
|
||||
self.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog)?
|
||||
.get(schema)?
|
||||
.get(table_name)
|
||||
.cloned()?
|
||||
let catalog = {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let Some(c) = c.get(catalog) else { return Ok(None) };
|
||||
c.clone()
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
}
|
||||
|
||||
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs
|
||||
.get(catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.get(schema)
|
||||
.with_context(|| SchemaNotFoundSnafu { catalog, schema })?
|
||||
.contains_key(table))
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog_name)
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog_name)
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
|
||||
.get(schema_name)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn register_catalog(&self, name: String) -> Result<bool> {
|
||||
let registered = self.register_catalog_sync(name)?;
|
||||
if registered {
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
match catalog.schema(schema)? {
|
||||
None => Ok(None),
|
||||
Some(s) => s.table(table_name).await,
|
||||
}
|
||||
Ok(registered)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl MemoryCatalogManager {
|
||||
/// Registers a catalog and return the catalog already exist
|
||||
pub fn register_catalog_if_absent(&self, name: String) -> bool {
|
||||
/// Registers a catalog and return `None` if no catalog with the same name was already
|
||||
/// registered, or `Some` with the previously registered catalog.
|
||||
pub fn register_catalog_if_absent(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Option<CatalogProviderRef> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let entry = catalogs.entry(name);
|
||||
match entry {
|
||||
Entry::Occupied(_) => true,
|
||||
Entry::Occupied(v) => Some(v.get().clone()),
|
||||
Entry::Vacant(v) => {
|
||||
let _ = v.insert(HashMap::new());
|
||||
false
|
||||
v.insert(catalog);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
Ok(catalogs.insert(name, HashMap::new()).is_some())
|
||||
impl CatalogList for MemoryCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn register_schema_sync(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
if catalog.contains_key(&request.schema) {
|
||||
return Ok(false);
|
||||
Ok(catalogs.insert(name, catalog))
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs.keys().map(|s| s.to_string()).collect())
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryCatalogProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a catalog.
|
||||
pub struct MemoryCatalogProvider {
|
||||
schemas: RwLock<HashMap<String, Arc<dyn SchemaProvider>>>,
|
||||
}
|
||||
|
||||
impl MemoryCatalogProvider {
|
||||
/// Instantiates a new MemoryCatalogProvider with an empty collection of schemas.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
schemas: RwLock::new(HashMap::new()),
|
||||
}
|
||||
let _ = catalog.insert(request.schema, HashMap::new());
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProvider for MemoryCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn register_table_sync(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
fn schema_names(&self) -> Result<Vec<String>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.keys().cloned().collect())
|
||||
}
|
||||
|
||||
if schema.contains_key(&request.table_name) {
|
||||
return TableExistsSnafu {
|
||||
table: &request.table_name,
|
||||
fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a schema.
|
||||
pub struct MemorySchemaProvider {
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
}
|
||||
|
||||
impl MemorySchemaProvider {
|
||||
/// Instantiates a new MemorySchemaProvider with an empty collection of tables.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tables: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemorySchemaProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if let Some(existing) = tables.get(name.as_str()) {
|
||||
// if table with the same name but different table id exists, then it's a fatal bug
|
||||
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
|
||||
error!(
|
||||
"Unexpected table register: {:?}, existing: {:?}",
|
||||
table.table_info(),
|
||||
existing.table_info()
|
||||
);
|
||||
return TableExistsSnafu { table: name }.fail()?;
|
||||
}
|
||||
.fail();
|
||||
Ok(Some(existing.clone()))
|
||||
} else {
|
||||
Ok(tables.insert(name, table))
|
||||
}
|
||||
|
||||
Ok(schema.insert(request.table_name, request.table).is_none())
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_with_table(table: TableRef) -> Self {
|
||||
let manager = Self::default();
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table.table_info().name.clone(),
|
||||
table_id: table.table_info().ident.table_id,
|
||||
table,
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let Some(table) = tables.remove(name) else {
|
||||
return TableNotFoundSnafu {
|
||||
table_info: name.to_string(),
|
||||
}
|
||||
.fail()?;
|
||||
};
|
||||
let _ = manager.register_table_sync(request).unwrap();
|
||||
manager
|
||||
let e = match tables.entry(new_name) {
|
||||
Entry::Vacant(e) => e,
|
||||
Entry::Occupied(e) => {
|
||||
return TableExistsSnafu { table: e.key() }.fail();
|
||||
}
|
||||
};
|
||||
e.insert(table.clone());
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.contains_key(name))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a memory catalog list contains a numbers table for test
|
||||
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogManager>> {
|
||||
Ok(Arc::new(MemoryCatalogManager::default()))
|
||||
}
|
||||
|
||||
@@ -328,99 +361,81 @@ mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::numbers::NumbersTable;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_manager().unwrap();
|
||||
let catalog_list = new_memory_catalog_list().unwrap();
|
||||
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
|
||||
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: NUMBERS_TABLE_NAME.to_string(),
|
||||
table_id: NUMBERS_TABLE_ID,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
|
||||
let _ = catalog_list.register_table(register_request).await.unwrap();
|
||||
let table = catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
NUMBERS_TABLE_NAME,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let _ = table.unwrap();
|
||||
assert!(catalog_list
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
|
||||
.await
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
.unwrap();
|
||||
|
||||
default_schema
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
.unwrap();
|
||||
|
||||
let table = default_schema.table("numbers").await.unwrap();
|
||||
assert!(table.is_some());
|
||||
assert!(default_schema.table("not_exists").await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_manager_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let table_name = "test_table";
|
||||
assert!(!catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
async fn test_mem_provider() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "numbers";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
let test_table = NumbersTable::default();
|
||||
// register table successfully
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), Arc::new(test_table))
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
let other_table = NumbersTable::new(12);
|
||||
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
// register test table
|
||||
let table_id = 2333;
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table: Arc::new(NumbersTable::new(table_id)),
|
||||
};
|
||||
assert!(catalog.register_table(register_request).await.unwrap());
|
||||
assert!(catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "test_table_renamed";
|
||||
let rename_request = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
let _ = catalog.rename_table(rename_request).await.unwrap();
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
|
||||
// test new table name exists
|
||||
assert!(catalog
|
||||
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap());
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
);
|
||||
|
||||
let dup_register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: new_table_name.to_string(),
|
||||
table_id: table_id + 1,
|
||||
table: Arc::new(NumbersTable::new(table_id + 1)),
|
||||
};
|
||||
let result = catalog.register_table(dup_register_request).await;
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider.register_table(new_table_name.to_string(), other_table);
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
@@ -428,11 +443,15 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register table
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
|
||||
// register table
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
@@ -441,11 +460,7 @@ mod tests {
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
assert!(schema.table_exist(table_name).unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers_new";
|
||||
@@ -457,16 +472,8 @@ mod tests {
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
assert!(!schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(new_table_name).unwrap());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
@@ -479,42 +486,49 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
assert!(!list.register_catalog_if_absent("test_catalog".to_string(),));
|
||||
assert!(list.register_catalog_if_absent("test_catalog".to_string()));
|
||||
assert!(list
|
||||
.register_catalog_if_absent(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(MemoryCatalogProvider::new())
|
||||
)
|
||||
.is_none());
|
||||
list.register_catalog_if_absent(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(MemoryCatalogProvider::new()),
|
||||
)
|
||||
.unwrap();
|
||||
list.as_any()
|
||||
.downcast_ref::<MemoryCatalogManager>()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_catalog_deregister_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let table_name = "foo_table";
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
table_id: 2333,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
let _ = catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(schema.table_exist("numbers").unwrap());
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
};
|
||||
catalog
|
||||
.deregister_table(deregister_table_req)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(!schema.table_exist("numbers").unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::build_db_string;
|
||||
|
||||
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
||||
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
|
||||
|
||||
pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
|
||||
pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
|
||||
(METRIC_DB_LABEL, build_db_string(catalog, schema))
|
||||
}
|
||||
@@ -12,40 +12,78 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use client::{CachedMetaKvBackend, MetaKvBackend};
|
||||
pub use manager::RemoteCatalogManager;
|
||||
pub use client::MetaKvBackend;
|
||||
use futures::Stream;
|
||||
use futures_util::StreamExt;
|
||||
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod mock;
|
||||
pub mod region_alive_keeper;
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
|
||||
|
||||
pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a>>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvCacheInvalidator: Send + Sync {
|
||||
async fn invalidate_key(&self, key: &[u8]);
|
||||
pub trait KvBackend: Send + Sync {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b;
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error>;
|
||||
|
||||
/// Compare and set value of key. `expect` is the expected value, if backend's current value associated
|
||||
/// with key is the same as `expect`, the value will be updated to `val`.
|
||||
///
|
||||
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
|
||||
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
|
||||
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
|
||||
/// - If any error happens during operation, an `Err(Error)` will be returned.
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error>;
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error>;
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<(), Error> {
|
||||
self.delete_range(key, &[]).await
|
||||
}
|
||||
|
||||
/// Default get is implemented based on `range` method.
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
|
||||
let mut iter = self.range(key);
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r?;
|
||||
if kv.0 == key {
|
||||
return Ok(Some(kv));
|
||||
}
|
||||
}
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
|
||||
pub type KvBackendRef = Arc<dyn KvBackend>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
|
||||
use async_stream::stream;
|
||||
use common_meta::kv_backend::{Kv, KvBackend, ValueIter};
|
||||
|
||||
use crate::error::Error;
|
||||
use super::*;
|
||||
|
||||
struct MockKvBackend {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, _key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
@@ -76,29 +114,17 @@ mod tests {
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn move_value(&self, _from_key: &[u8], _to_key: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get() {
|
||||
let backend = MockKvBackend {};
|
||||
|
||||
let result = backend.get(0.to_string().as_bytes()).await;
|
||||
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(1.to_string().as_bytes()).await;
|
||||
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(2.to_string().as_bytes()).await;
|
||||
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
|
||||
let result = backend.get(3.to_string().as_bytes()).await;
|
||||
assert!(result.unwrap().is_none());
|
||||
}
|
||||
|
||||
@@ -12,167 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::stream;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, MetaSrvSnafu, Result};
|
||||
use common_meta::kv_backend::{Kv, KvBackend, KvBackendRef, ValueIter};
|
||||
use common_meta::rpc::store::{
|
||||
CompareAndPutRequest, DeleteRangeRequest, MoveValueRequest, PutRequest, RangeRequest,
|
||||
};
|
||||
use common_telemetry::{info, timer};
|
||||
use common_telemetry::info;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use super::KvCacheInvalidator;
|
||||
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
|
||||
|
||||
const CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const CACHE_TTL_SECOND: u64 = 10 * 60;
|
||||
const CACHE_TTI_SECOND: u64 = 5 * 60;
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, Kv>>;
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
self.kv_backend.range(key)
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
||||
|
||||
let init = async {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
|
||||
self.kv_backend.get(key).await.map(|val| {
|
||||
val.with_context(|| CacheNotGetSnafu {
|
||||
key: String::from_utf8_lossy(key),
|
||||
})
|
||||
})?
|
||||
};
|
||||
|
||||
// currently moka doesn't have `optionally_try_get_with_by_ref`
|
||||
// TODO(fys): change to moka method when available
|
||||
// https://github.com/moka-rs/moka/issues/254
|
||||
match self.cache.try_get_with_by_ref(key, init).await {
|
||||
Ok(val) => Ok(Some(val)),
|
||||
Err(e) => match e.as_ref() {
|
||||
CacheNotGet { .. } => Ok(None),
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.set(key, val).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.delete_range(key, &[]).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<()> {
|
||||
// TODO(fys): implement it
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
let ret = self.kv_backend.compare_and_set(key, expect, val).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
|
||||
let ret = self.kv_backend.move_value(from_key, to_key).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(from_key).await;
|
||||
self.invalidate_key(to_key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.cache.invalidate(key).await
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
pub fn new(client: Arc<MetaClient>) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(CACHE_MAX_CAPACITY)
|
||||
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
|
||||
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
|
||||
.build(),
|
||||
);
|
||||
let kv_backend = Arc::new(MetaKvBackend { client });
|
||||
|
||||
Self { kv_backend, cache }
|
||||
}
|
||||
|
||||
pub fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(CACHE_MAX_CAPACITY)
|
||||
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
|
||||
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
|
||||
.build(),
|
||||
);
|
||||
|
||||
Self { kv_backend, cache }
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
&self.cache
|
||||
}
|
||||
}
|
||||
use meta_client::rpc::{CompareAndPutRequest, DeleteRangeRequest, PutRequest, RangeRequest};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MetaSrvSnafu};
|
||||
use crate::remote::{Kv, KvBackend, ValueIter};
|
||||
#[derive(Debug)]
|
||||
pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
@@ -183,8 +33,6 @@ pub struct MetaKvBackend {
|
||||
/// comparing to `Accessor`'s list and get method.
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MetaKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
@@ -195,7 +43,6 @@ impl KvBackend for MetaKvBackend {
|
||||
.client
|
||||
.range(RangeRequest::new().with_prefix(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
let kvs = resp.take_kvs();
|
||||
for mut kv in kvs.into_iter() {
|
||||
@@ -204,12 +51,11 @@ impl KvBackend for MetaKvBackend {
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(response
|
||||
.take_kvs()
|
||||
@@ -217,27 +63,17 @@ impl KvBackend for MetaKvBackend {
|
||||
.map(|kv| Kv(kv.take_key(), kv.take_value())))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||
let req = PutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let _ = self
|
||||
.client
|
||||
.put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
let _ = self.client.put(req).await.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<()> {
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
|
||||
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
|
||||
let resp = self
|
||||
.client
|
||||
.delete_range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
let resp = self.client.delete_range(req).await.context(MetaSrvSnafu)?;
|
||||
info!(
|
||||
"Delete range, key: {}, end: {}, deleted: {}",
|
||||
String::from_utf8_lossy(key),
|
||||
@@ -253,7 +89,7 @@ impl KvBackend for MetaKvBackend {
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
let request = CompareAndPutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_expect(expect.to_vec())
|
||||
@@ -262,7 +98,6 @@ impl KvBackend for MetaKvBackend {
|
||||
.client
|
||||
.compare_and_put(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
if response.is_success() {
|
||||
Ok(Ok(()))
|
||||
@@ -270,19 +105,4 @@ impl KvBackend for MetaKvBackend {
|
||||
Ok(Err(response.take_prev_kv().map(|v| v.value().to_vec())))
|
||||
}
|
||||
}
|
||||
|
||||
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
|
||||
let req = MoveValueRequest::new(from_key, to_key);
|
||||
let _ = self
|
||||
.client
|
||||
.move_value(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,119 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock as StdRwLock};
|
||||
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
use table::engine::{CloseTableResult, EngineContext, TableEngine};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{
|
||||
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
};
|
||||
use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockTableEngine {
|
||||
tables: StdRwLock<HashMap<TableId, TableRef>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableEngine for MockTableEngine {
|
||||
fn name(&self) -> &str {
|
||||
"MockTableEngine"
|
||||
}
|
||||
|
||||
/// Create a table with only one column
|
||||
async fn create_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CreateTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
let table_id = request.id;
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]));
|
||||
|
||||
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
|
||||
let record_batch = RecordBatch::new(schema, data).unwrap();
|
||||
let table: TableRef = Arc::new(MemTable::new_with_catalog(
|
||||
&request.table_name,
|
||||
record_batch,
|
||||
table_id,
|
||||
request.catalog_name,
|
||||
request.schema_name,
|
||||
vec![0],
|
||||
)) as Arc<_>;
|
||||
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let _ = tables.insert(table_id, table.clone() as TableRef);
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn open_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: OpenTableRequest,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().unwrap().get(&request.table_id).cloned())
|
||||
}
|
||||
|
||||
async fn alter_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: AlterTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
table_id: TableId,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().unwrap().get(&table_id).cloned())
|
||||
}
|
||||
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_id: TableId) -> bool {
|
||||
self.tables.read().unwrap().contains_key(&table_id)
|
||||
}
|
||||
|
||||
async fn drop_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: DropTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CloseTableRequest,
|
||||
) -> table::Result<CloseTableResult> {
|
||||
let _ = self.tables.write().unwrap().remove(&request.table_id);
|
||||
Ok(CloseTableResult::Released(vec![]))
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,822 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_meta::error::InvalidProtoMsgSnafu;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::ident::TableIdent;
|
||||
use common_meta::RegionIdent;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::engine::manager::TableEngineManagerRef;
|
||||
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
|
||||
use table::requests::CloseTableRequest;
|
||||
use table::TableRef;
|
||||
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
use crate::error::{Result, TableEngineNotFoundSnafu};
|
||||
|
||||
/// [RegionAliveKeepers] manages all [RegionAliveKeeper] in a scope of tables.
|
||||
pub struct RegionAliveKeepers {
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
keepers: Arc<Mutex<HashMap<TableIdent, Arc<RegionAliveKeeper>>>>,
|
||||
heartbeat_interval_millis: u64,
|
||||
started: AtomicBool,
|
||||
|
||||
/// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
|
||||
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
|
||||
/// non-decreasing). The heartbeat request will carry the duration since this epoch, and the
|
||||
/// duration acts like an "invariant point" for region's keep alive lease.
|
||||
epoch: Instant,
|
||||
}
|
||||
|
||||
impl RegionAliveKeepers {
|
||||
pub fn new(
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
heartbeat_interval_millis: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine_manager,
|
||||
keepers: Arc::new(Mutex::new(HashMap::new())),
|
||||
heartbeat_interval_millis,
|
||||
started: AtomicBool::new(false),
|
||||
epoch: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_keeper(&self, table_ident: &TableIdent) -> Option<Arc<RegionAliveKeeper>> {
|
||||
self.keepers.lock().await.get(table_ident).cloned()
|
||||
}
|
||||
|
||||
pub async fn register_table(&self, table_ident: TableIdent, table: TableRef) -> Result<()> {
|
||||
let keeper = self.find_keeper(&table_ident).await;
|
||||
if keeper.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_engine = self
|
||||
.table_engine_manager
|
||||
.engine(&table_ident.engine)
|
||||
.context(TableEngineNotFoundSnafu {
|
||||
engine_name: &table_ident.engine,
|
||||
})?;
|
||||
|
||||
let keeper = Arc::new(RegionAliveKeeper::new(
|
||||
table_engine,
|
||||
table_ident.clone(),
|
||||
self.heartbeat_interval_millis,
|
||||
));
|
||||
for r in table.table_info().meta.region_numbers.iter() {
|
||||
keeper.register_region(*r).await;
|
||||
}
|
||||
|
||||
let mut keepers = self.keepers.lock().await;
|
||||
let _ = keepers.insert(table_ident.clone(), keeper.clone());
|
||||
|
||||
if self.started.load(Ordering::Relaxed) {
|
||||
keeper.start().await;
|
||||
|
||||
info!("RegionAliveKeeper for table {table_ident} is started!");
|
||||
} else {
|
||||
info!("RegionAliveKeeper for table {table_ident} is registered but not started yet!");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn deregister_table(
|
||||
&self,
|
||||
table_ident: &TableIdent,
|
||||
) -> Option<Arc<RegionAliveKeeper>> {
|
||||
self.keepers.lock().await.remove(table_ident).map(|x| {
|
||||
info!("Deregister RegionAliveKeeper for table {table_ident}");
|
||||
x
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn register_region(&self, region_ident: &RegionIdent) {
|
||||
let table_ident = ®ion_ident.table_ident;
|
||||
let Some(keeper) = self.find_keeper(table_ident).await else {
|
||||
// Alive keeper could be affected by lagging msg, just warn and ignore.
|
||||
warn!("Alive keeper for region {region_ident} is not found!");
|
||||
return;
|
||||
};
|
||||
keeper.register_region(region_ident.region_number).await
|
||||
}
|
||||
|
||||
pub async fn deregister_region(&self, region_ident: &RegionIdent) {
|
||||
let table_ident = ®ion_ident.table_ident;
|
||||
let Some(keeper) = self.find_keeper(table_ident).await else {
|
||||
// Alive keeper could be affected by lagging msg, just warn and ignore.
|
||||
warn!("Alive keeper for region {region_ident} is not found!");
|
||||
return;
|
||||
};
|
||||
let _ = keeper.deregister_region(region_ident.region_number).await;
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
let keepers = self.keepers.lock().await;
|
||||
for keeper in keepers.values() {
|
||||
keeper.start().await;
|
||||
}
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
|
||||
info!(
|
||||
"RegionAliveKeepers for tables {:?} are started!",
|
||||
keepers.keys().map(|x| x.to_string()).collect::<Vec<_>>(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn epoch(&self) -> Instant {
|
||||
self.epoch
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatResponseHandler for RegionAliveKeepers {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
!ctx.response.region_leases.is_empty()
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
ctx: &mut HeartbeatResponseHandlerContext,
|
||||
) -> common_meta::error::Result<HandleControl> {
|
||||
let leases = ctx.response.region_leases.drain(..).collect::<Vec<_>>();
|
||||
for lease in leases {
|
||||
let table_ident: TableIdent = match lease
|
||||
.table_ident
|
||||
.context(InvalidProtoMsgSnafu {
|
||||
err_msg: "'table_ident' is missing in RegionLease",
|
||||
})
|
||||
.and_then(|x| x.try_into())
|
||||
{
|
||||
Ok(x) => x,
|
||||
Err(e) => {
|
||||
error!(e; "");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let Some(keeper) = self.keepers.lock().await.get(&table_ident).cloned() else {
|
||||
// Alive keeper could be affected by lagging msg, just warn and ignore.
|
||||
warn!("Alive keeper for table {table_ident} is not found!");
|
||||
continue;
|
||||
};
|
||||
|
||||
let start_instant = self.epoch + Duration::from_millis(lease.duration_since_epoch);
|
||||
let deadline = start_instant + Duration::from_secs(lease.lease_seconds);
|
||||
keeper.keep_lived(lease.regions, deadline).await;
|
||||
}
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
/// [RegionAliveKeeper] starts a countdown for each region in a table. When deadline is reached,
|
||||
/// the region will be closed.
|
||||
/// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its
|
||||
/// opened regions to Metasrv, in heartbeats. If Metasrv decides some region could be resided in this
|
||||
/// Datanode, it will "extend" the region's "lease", with a deadline for [RegionAliveKeeper] to
|
||||
/// countdown.
|
||||
pub struct RegionAliveKeeper {
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
countdown_task_handles: Arc<Mutex<HashMap<RegionNumber, Arc<CountdownTaskHandle>>>>,
|
||||
heartbeat_interval_millis: u64,
|
||||
started: AtomicBool,
|
||||
}
|
||||
|
||||
impl RegionAliveKeeper {
|
||||
fn new(
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
heartbeat_interval_millis: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
table_ident,
|
||||
countdown_task_handles: Arc::new(Mutex::new(HashMap::new())),
|
||||
heartbeat_interval_millis,
|
||||
started: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_handle(&self, region: &RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
|
||||
self.countdown_task_handles
|
||||
.lock()
|
||||
.await
|
||||
.get(region)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
async fn register_region(&self, region: RegionNumber) {
|
||||
if self.find_handle(®ion).await.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
let countdown_task_handles = Arc::downgrade(&self.countdown_task_handles);
|
||||
let on_task_finished = async move {
|
||||
if let Some(x) = countdown_task_handles.upgrade() {
|
||||
let _ = x.lock().await.remove(®ion);
|
||||
} // Else the countdown task handles map could be dropped because the keeper is dropped.
|
||||
};
|
||||
let handle = Arc::new(CountdownTaskHandle::new(
|
||||
self.table_engine.clone(),
|
||||
self.table_ident.clone(),
|
||||
region,
|
||||
|| on_task_finished,
|
||||
));
|
||||
|
||||
let mut handles = self.countdown_task_handles.lock().await;
|
||||
let _ = handles.insert(region, handle.clone());
|
||||
|
||||
if self.started.load(Ordering::Relaxed) {
|
||||
handle.start(self.heartbeat_interval_millis).await;
|
||||
|
||||
info!(
|
||||
"Region alive countdown for region {region} in table {} is started!",
|
||||
self.table_ident
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"Region alive countdown for region {region} in table {} is registered but not started yet!",
|
||||
self.table_ident
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn deregister_region(&self, region: RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
|
||||
self.countdown_task_handles
|
||||
.lock()
|
||||
.await
|
||||
.remove(®ion)
|
||||
.map(|x| {
|
||||
info!(
|
||||
"Deregister alive countdown for region {region} in table {}",
|
||||
self.table_ident
|
||||
);
|
||||
x
|
||||
})
|
||||
}
|
||||
|
||||
async fn start(&self) {
|
||||
let handles = self.countdown_task_handles.lock().await;
|
||||
for handle in handles.values() {
|
||||
handle.start(self.heartbeat_interval_millis).await;
|
||||
}
|
||||
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
info!(
|
||||
"Region alive countdowns for regions {:?} in table {} are started!",
|
||||
handles.keys().copied().collect::<Vec<_>>(),
|
||||
self.table_ident
|
||||
);
|
||||
}
|
||||
|
||||
async fn keep_lived(&self, designated_regions: Vec<RegionNumber>, deadline: Instant) {
|
||||
for region in designated_regions {
|
||||
if let Some(handle) = self.find_handle(®ion).await {
|
||||
handle.reset_deadline(deadline).await;
|
||||
}
|
||||
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn deadline(&self, region: RegionNumber) -> Option<Instant> {
|
||||
let mut deadline = None;
|
||||
if let Some(handle) = self.find_handle(®ion).await {
|
||||
let (s, r) = oneshot::channel();
|
||||
if handle.tx.send(CountdownCommand::Deadline(s)).await.is_ok() {
|
||||
deadline = r.await.ok()
|
||||
}
|
||||
}
|
||||
deadline
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CountdownCommand {
|
||||
Start(u64),
|
||||
Reset(Instant),
|
||||
Deadline(oneshot::Sender<Instant>),
|
||||
}
|
||||
|
||||
struct CountdownTaskHandle {
|
||||
tx: mpsc::Sender<CountdownCommand>,
|
||||
handler: JoinHandle<()>,
|
||||
table_ident: TableIdent,
|
||||
region: RegionNumber,
|
||||
}
|
||||
|
||||
impl CountdownTaskHandle {
|
||||
/// Creates a new [CountdownTaskHandle] and starts the countdown task.
|
||||
/// # Params
|
||||
/// - `on_task_finished`: a callback to be invoked when the task is finished. Note that it will not
|
||||
/// be invoked if the task is cancelled (by dropping the handle). This is because we want something
|
||||
/// meaningful to be done when the task is finished, e.g. deregister the handle from the map.
|
||||
/// While dropping the handle does not necessarily mean the task is finished.
|
||||
fn new<Fut>(
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
region: RegionNumber,
|
||||
on_task_finished: impl FnOnce() -> Fut + Send + 'static,
|
||||
) -> Self
|
||||
where
|
||||
Fut: Future<Output = ()> + Send,
|
||||
{
|
||||
let (tx, rx) = mpsc::channel(1024);
|
||||
|
||||
let mut countdown_task = CountdownTask {
|
||||
table_engine,
|
||||
table_ident: table_ident.clone(),
|
||||
region,
|
||||
rx,
|
||||
};
|
||||
let handler = common_runtime::spawn_bg(async move {
|
||||
countdown_task.run().await;
|
||||
on_task_finished().await;
|
||||
});
|
||||
|
||||
Self {
|
||||
tx,
|
||||
handler,
|
||||
table_ident,
|
||||
region,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(&self, heartbeat_interval_millis: u64) {
|
||||
if let Err(e) = self
|
||||
.tx
|
||||
.send(CountdownCommand::Start(heartbeat_interval_millis))
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to start region alive keeper countdown: {e}. \
|
||||
Maybe the task is stopped due to region been closed."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn reset_deadline(&self, deadline: Instant) {
|
||||
if let Err(e) = self.tx.send(CountdownCommand::Reset(deadline)).await {
|
||||
warn!(
|
||||
"Failed to reset region alive keeper deadline: {e}. \
|
||||
Maybe the task is stopped due to region been closed."
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CountdownTaskHandle {
|
||||
fn drop(&mut self) {
|
||||
debug!(
|
||||
"Aborting region alive countdown task for region {} in table {}",
|
||||
self.region, self.table_ident,
|
||||
);
|
||||
self.handler.abort();
|
||||
}
|
||||
}
|
||||
|
||||
struct CountdownTask {
|
||||
table_engine: TableEngineRef,
|
||||
table_ident: TableIdent,
|
||||
region: RegionNumber,
|
||||
rx: mpsc::Receiver<CountdownCommand>,
|
||||
}
|
||||
|
||||
impl CountdownTask {
|
||||
async fn run(&mut self) {
|
||||
// 30 years. See `Instant::far_future`.
|
||||
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
|
||||
|
||||
// Make sure the alive countdown is not gonna happen before heartbeat task is started (the
|
||||
// "start countdown" command will be sent from heartbeat task).
|
||||
let countdown = tokio::time::sleep_until(far_future);
|
||||
tokio::pin!(countdown);
|
||||
|
||||
let region = &self.region;
|
||||
let table_ident = &self.table_ident;
|
||||
loop {
|
||||
tokio::select! {
|
||||
command = self.rx.recv() => {
|
||||
match command {
|
||||
Some(CountdownCommand::Start(heartbeat_interval_millis)) => {
|
||||
// Set first deadline in 4 heartbeats (roughly after 20 seconds from now if heartbeat
|
||||
// interval is set to default 5 seconds), to make Datanode and Metasrv more tolerable to
|
||||
// network or other jitters during startup.
|
||||
let first_deadline = Instant::now() + Duration::from_millis(heartbeat_interval_millis) * 4;
|
||||
countdown.set(tokio::time::sleep_until(first_deadline));
|
||||
},
|
||||
Some(CountdownCommand::Reset(deadline)) => {
|
||||
if countdown.deadline() < deadline {
|
||||
debug!(
|
||||
"Reset deadline of region {region} of table {table_ident} to approximately {} seconds later",
|
||||
(deadline - Instant::now()).as_secs_f32(),
|
||||
);
|
||||
countdown.set(tokio::time::sleep_until(deadline));
|
||||
}
|
||||
// Else the countdown could be either:
|
||||
// - not started yet;
|
||||
// - during startup protection;
|
||||
// - received a lagging heartbeat message.
|
||||
// All can be safely ignored.
|
||||
},
|
||||
None => {
|
||||
info!(
|
||||
"The handle of countdown task for region {region} of table {table_ident} \
|
||||
is dropped, RegionAliveKeeper out."
|
||||
);
|
||||
break;
|
||||
},
|
||||
Some(CountdownCommand::Deadline(tx)) => {
|
||||
let _ = tx.send(countdown.deadline());
|
||||
}
|
||||
}
|
||||
}
|
||||
() = &mut countdown => {
|
||||
let result = self.close_region().await;
|
||||
warn!(
|
||||
"Region {region} of table {table_ident} is closed, result: {result:?}. \
|
||||
RegionAliveKeeper out.",
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn close_region(&self) -> CloseTableResult {
|
||||
let ctx = EngineContext::default();
|
||||
let region = self.region;
|
||||
let table_ident = &self.table_ident;
|
||||
loop {
|
||||
let request = CloseTableRequest {
|
||||
catalog_name: table_ident.catalog.clone(),
|
||||
schema_name: table_ident.schema.clone(),
|
||||
table_name: table_ident.table.clone(),
|
||||
table_id: table_ident.table_id,
|
||||
region_numbers: vec![region],
|
||||
flush: true,
|
||||
};
|
||||
match self.table_engine.close_table(&ctx, request).await {
|
||||
Ok(result) => return result,
|
||||
// If region is failed to close, immediately retry. Maybe we should panic instead?
|
||||
Err(e) => error!(e;
|
||||
"Failed to close region {region} of table {table_ident}. \
|
||||
For the integrity of data, retry closing and retry without wait.",
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{HeartbeatResponse, RegionLease};
|
||||
use common_meta::heartbeat::mailbox::HeartbeatMailbox;
|
||||
use datatypes::schema::RawSchema;
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::engine::TableEngine;
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
use table::test_util::EmptyTable;
|
||||
|
||||
use super::*;
|
||||
use crate::remote::mock::MockTableEngine;
|
||||
|
||||
async fn prepare_keepers() -> (TableIdent, RegionAliveKeepers) {
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
|
||||
let keepers = RegionAliveKeepers::new(table_engine_manager, 5000);
|
||||
|
||||
let catalog = "my_catalog";
|
||||
let schema = "my_schema";
|
||||
let table = "my_table";
|
||||
let table_ident = TableIdent {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
table: table.to_string(),
|
||||
table_id: 1,
|
||||
engine: "MockTableEngine".to_string(),
|
||||
};
|
||||
let table = Arc::new(EmptyTable::new(CreateTableRequest {
|
||||
id: 1,
|
||||
catalog_name: catalog.to_string(),
|
||||
schema_name: schema.to_string(),
|
||||
table_name: table.to_string(),
|
||||
desc: None,
|
||||
schema: RawSchema {
|
||||
column_schemas: vec![],
|
||||
timestamp_index: None,
|
||||
version: 0,
|
||||
},
|
||||
region_numbers: vec![1, 2, 3],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: TableOptions::default(),
|
||||
engine: "MockTableEngine".to_string(),
|
||||
}));
|
||||
keepers
|
||||
.register_table(table_ident.clone(), table)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(keepers.keepers.lock().await.contains_key(&table_ident));
|
||||
|
||||
(table_ident, keepers)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_handle_heartbeat_response() {
|
||||
let (table_ident, keepers) = prepare_keepers().await;
|
||||
|
||||
keepers.start().await;
|
||||
let startup_protection_until = Instant::now() + Duration::from_secs(21);
|
||||
|
||||
let duration_since_epoch = (Instant::now() - keepers.epoch).as_millis() as _;
|
||||
let lease_seconds = 100;
|
||||
let response = HeartbeatResponse {
|
||||
region_leases: vec![RegionLease {
|
||||
table_ident: Some(table_ident.clone().into()),
|
||||
regions: vec![1, 3], // Not extending region 2's lease time.
|
||||
duration_since_epoch,
|
||||
lease_seconds,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let keep_alive_until = keepers.epoch
|
||||
+ Duration::from_millis(duration_since_epoch)
|
||||
+ Duration::from_secs(lease_seconds);
|
||||
|
||||
let (tx, _) = mpsc::channel(8);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
|
||||
let mut ctx = HeartbeatResponseHandlerContext::new(mailbox, response);
|
||||
|
||||
assert!(keepers.handle(&mut ctx).await.unwrap() == HandleControl::Continue);
|
||||
|
||||
// sleep to wait for background task spawned in `handle`
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
async fn test(
|
||||
keeper: &Arc<RegionAliveKeeper>,
|
||||
region_number: RegionNumber,
|
||||
startup_protection_until: Instant,
|
||||
keep_alive_until: Instant,
|
||||
is_kept_live: bool,
|
||||
) {
|
||||
let deadline = keeper.deadline(region_number).await.unwrap();
|
||||
if is_kept_live {
|
||||
assert!(deadline > startup_protection_until && deadline == keep_alive_until);
|
||||
} else {
|
||||
assert!(deadline <= startup_protection_until);
|
||||
}
|
||||
}
|
||||
|
||||
let keeper = &keepers
|
||||
.keepers
|
||||
.lock()
|
||||
.await
|
||||
.get(&table_ident)
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
// Test region 1 and 3 is kept lived. Their deadlines are updated to desired instant.
|
||||
test(keeper, 1, startup_protection_until, keep_alive_until, true).await;
|
||||
test(keeper, 3, startup_protection_until, keep_alive_until, true).await;
|
||||
|
||||
// Test region 2 is not kept lived. It's deadline is not updated: still during startup protection period.
|
||||
test(keeper, 2, startup_protection_until, keep_alive_until, false).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_region_alive_keepers() {
|
||||
let (table_ident, keepers) = prepare_keepers().await;
|
||||
|
||||
keepers
|
||||
.register_region(&RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 1,
|
||||
table_ident: table_ident.clone(),
|
||||
region_number: 4,
|
||||
})
|
||||
.await;
|
||||
|
||||
keepers.start().await;
|
||||
for keeper in keepers.keepers.lock().await.values() {
|
||||
let regions = {
|
||||
let handles = keeper.countdown_task_handles.lock().await;
|
||||
handles.keys().copied().collect::<Vec<_>>()
|
||||
};
|
||||
for region in regions {
|
||||
// assert countdown tasks are started
|
||||
let deadline = keeper.deadline(region).await.unwrap();
|
||||
assert!(deadline <= Instant::now() + Duration::from_secs(20));
|
||||
}
|
||||
}
|
||||
|
||||
keepers
|
||||
.deregister_region(&RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 1,
|
||||
table_ident: table_ident.clone(),
|
||||
region_number: 1,
|
||||
})
|
||||
.await;
|
||||
let mut regions = keepers
|
||||
.find_keeper(&table_ident)
|
||||
.await
|
||||
.unwrap()
|
||||
.countdown_task_handles
|
||||
.lock()
|
||||
.await
|
||||
.keys()
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
regions.sort();
|
||||
assert_eq!(regions, vec![2, 3, 4]);
|
||||
|
||||
let keeper = keepers.deregister_table(&table_ident).await.unwrap();
|
||||
assert!(Arc::try_unwrap(keeper).is_ok(), "keeper is not dropped");
|
||||
assert!(keepers.keepers.lock().await.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_region_alive_keeper() {
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let table_ident = TableIdent {
|
||||
catalog: "my_catalog".to_string(),
|
||||
schema: "my_schema".to_string(),
|
||||
table: "my_table".to_string(),
|
||||
table_id: 1024,
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
let keeper = RegionAliveKeeper::new(table_engine, table_ident, 1000);
|
||||
|
||||
let region = 1;
|
||||
assert!(keeper.find_handle(®ion).await.is_none());
|
||||
keeper.register_region(region).await;
|
||||
let _ = keeper.find_handle(®ion).await.unwrap();
|
||||
|
||||
let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
|
||||
|
||||
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
|
||||
assert!(keeper.find_handle(&2).await.is_none());
|
||||
assert!(keeper.find_handle(&3).await.is_none());
|
||||
|
||||
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
|
||||
// assert if keeper is not started, keep_lived is of no use
|
||||
assert!(keeper.deadline(region).await.unwrap() > far_future);
|
||||
|
||||
keeper.start().await;
|
||||
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
|
||||
// assert keep_lived works if keeper is started
|
||||
assert!(keeper.deadline(region).await.unwrap() <= ten_seconds_later());
|
||||
|
||||
let handle = keeper.deregister_region(region).await.unwrap();
|
||||
assert!(Arc::try_unwrap(handle).is_ok(), "handle is not dropped");
|
||||
assert!(keeper.find_handle(®ion).await.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_countdown_task_handle() {
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let table_ident = TableIdent {
|
||||
catalog: "my_catalog".to_string(),
|
||||
schema: "my_schema".to_string(),
|
||||
table: "my_table".to_string(),
|
||||
table_id: 1024,
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
let finished = Arc::new(AtomicBool::new(false));
|
||||
let finished_clone = finished.clone();
|
||||
let handle = CountdownTaskHandle::new(
|
||||
table_engine.clone(),
|
||||
table_ident.clone(),
|
||||
1,
|
||||
|| async move { finished_clone.store(true, Ordering::Relaxed) },
|
||||
);
|
||||
let tx = handle.tx.clone();
|
||||
|
||||
// assert countdown task is running
|
||||
tx.send(CountdownCommand::Start(5000)).await.unwrap();
|
||||
assert!(!finished.load(Ordering::Relaxed));
|
||||
|
||||
drop(handle);
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// assert countdown task is stopped
|
||||
assert!(tx
|
||||
.try_send(CountdownCommand::Reset(
|
||||
Instant::now() + Duration::from_secs(10)
|
||||
))
|
||||
.is_err());
|
||||
// assert `on_task_finished` is not called (because the task is aborted by the handle's drop)
|
||||
assert!(!finished.load(Ordering::Relaxed));
|
||||
|
||||
let finished = Arc::new(AtomicBool::new(false));
|
||||
let finished_clone = finished.clone();
|
||||
let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, || async move {
|
||||
finished_clone.store(true, Ordering::Relaxed)
|
||||
});
|
||||
handle.tx.send(CountdownCommand::Start(100)).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
// assert `on_task_finished` is called when task is finished normally
|
||||
assert!(finished.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_countdown_task_run() {
|
||||
let ctx = &EngineContext::default();
|
||||
let catalog = "my_catalog";
|
||||
let schema = "my_schema";
|
||||
let table = "my_table";
|
||||
let table_id = 1;
|
||||
let request = CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name: catalog.to_string(),
|
||||
schema_name: schema.to_string(),
|
||||
table_name: table.to_string(),
|
||||
desc: None,
|
||||
schema: RawSchema {
|
||||
column_schemas: vec![],
|
||||
timestamp_index: None,
|
||||
version: 0,
|
||||
},
|
||||
region_numbers: vec![],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: TableOptions::default(),
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let _ = table_engine.create_table(ctx, request).await.unwrap();
|
||||
|
||||
let table_ident = TableIdent {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
table: table.to_string(),
|
||||
table_id,
|
||||
engine: "mito".to_string(),
|
||||
};
|
||||
let (tx, rx) = mpsc::channel(10);
|
||||
let mut task = CountdownTask {
|
||||
table_engine: table_engine.clone(),
|
||||
table_ident,
|
||||
region: 1,
|
||||
rx,
|
||||
};
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
task.run().await;
|
||||
});
|
||||
|
||||
async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
|
||||
let (s, r) = oneshot::channel();
|
||||
tx.send(CountdownCommand::Deadline(s)).await.unwrap();
|
||||
r.await.unwrap()
|
||||
}
|
||||
|
||||
// if countdown task is not started, its deadline is set to far future
|
||||
assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
|
||||
|
||||
// start countdown in 250ms * 4 = 1s
|
||||
tx.send(CountdownCommand::Start(250)).await.unwrap();
|
||||
// assert deadline is correctly set
|
||||
assert!(deadline(&tx).await <= Instant::now() + Duration::from_secs(1));
|
||||
|
||||
// reset countdown in 1.5s
|
||||
tx.send(CountdownCommand::Reset(
|
||||
Instant::now() + Duration::from_millis(1500),
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// assert the table is closed after deadline is reached
|
||||
assert!(table_engine.table_exists(ctx, table_id));
|
||||
// spare 500ms for the task to close the table
|
||||
tokio::time::sleep(Duration::from_millis(2000)).await;
|
||||
assert!(!table_engine.table_exists(ctx, table_id));
|
||||
}
|
||||
}
|
||||
69
src/catalog/src/schema.rs
Normal file
69
src/catalog/src/schema.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{NotSupportedSnafu, Result};
|
||||
|
||||
/// Represents a schema, comprising a number of named tables.
|
||||
#[async_trait]
|
||||
pub trait SchemaProvider: Sync + Send {
|
||||
/// Returns the schema provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available table names in this schema.
|
||||
fn table_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific table from the schema by name, provided it exists.
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, adds a new table to this schema.
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("register_table({name}, <table>)"),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("rename_table({name}, {new_name})"),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
NotSupportedSnafu {
|
||||
op: format!("deregister_table({name})"),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// If supported by the implementation, checks the table exist in the schema provider or not.
|
||||
/// If no matched table in the schema provider, return false.
|
||||
/// Otherwise, return true.
|
||||
fn table_exist(&self, name: &str) -> Result<bool>;
|
||||
}
|
||||
|
||||
pub type SchemaProviderRef = Arc<dyn SchemaProvider>;
|
||||
@@ -20,6 +20,8 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
@@ -28,13 +30,12 @@ use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
use table::{Result as TableResult, Table, TableRef};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
|
||||
@@ -58,12 +59,17 @@ impl Table for SystemCatalogTable {
|
||||
self.0.schema()
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
self.0.scan_to_stream(request).await
|
||||
async fn scan(
|
||||
&self,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
self.0.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
/// Insert values into table.
|
||||
async fn insert(&self, request: InsertRequest) -> TableResult<usize> {
|
||||
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
|
||||
self.0.insert(request).await
|
||||
}
|
||||
|
||||
@@ -71,13 +77,9 @@ impl Table for SystemCatalogTable {
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> TableResult<usize> {
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
|
||||
fn statistics(&self) -> Option<table::stats::TableStatistics> {
|
||||
self.0.statistics()
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemCatalogTable {
|
||||
@@ -87,7 +89,6 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let schema = build_system_catalog_schema();
|
||||
let ctx = EngineContext::default();
|
||||
@@ -125,17 +126,14 @@ impl SystemCatalogTable {
|
||||
/// Create a stream of all entries inside system catalog table
|
||||
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
|
||||
let full_projection = None;
|
||||
let scan_req = ScanRequest {
|
||||
sequence: None,
|
||||
projection: full_projection,
|
||||
filters: vec![],
|
||||
output_ordering: None,
|
||||
limit: None,
|
||||
};
|
||||
let stream = self
|
||||
.scan_to_stream(scan_req)
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.scan(full_projection, &[], None)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
let stream = scan
|
||||
.execute(0, ctx.task_ctx())
|
||||
.context(error::SystemCatalogTableScanExecSnafu)?;
|
||||
Ok(stream)
|
||||
}
|
||||
}
|
||||
@@ -203,50 +201,38 @@ pub fn build_table_insert_request(
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue {
|
||||
table_name,
|
||||
engine,
|
||||
is_deleted: false,
|
||||
})
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_name, engine })
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> InsertRequest {
|
||||
let entry_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue {
|
||||
table_name: "".to_string(),
|
||||
engine: "".to_string(),
|
||||
is_deleted: true,
|
||||
})
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
HashMap::from([
|
||||
(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as VectorRef,
|
||||
),
|
||||
(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as VectorRef,
|
||||
),
|
||||
(
|
||||
"timestamp".to_string(),
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef,
|
||||
),
|
||||
])
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
|
||||
);
|
||||
m
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
@@ -266,18 +252,18 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
|
||||
let _ = columns_values.insert(
|
||||
columns_values.insert(
|
||||
"value".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[value])) as _,
|
||||
);
|
||||
|
||||
let now = util::current_time_millis();
|
||||
let _ = columns_values.insert(
|
||||
columns_values.insert(
|
||||
"gmt_created".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
let _ = columns_values.insert(
|
||||
columns_values.insert(
|
||||
"gmt_modified".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
@@ -347,7 +333,6 @@ pub fn decode_system_catalog(
|
||||
table_name: table_meta.table_name,
|
||||
table_id,
|
||||
engine: table_meta.engine,
|
||||
is_deleted: table_meta.is_deleted,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -404,7 +389,6 @@ pub struct TableEntry {
|
||||
pub table_name: String,
|
||||
pub table_id: TableId,
|
||||
pub engine: String,
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
@@ -413,19 +397,12 @@ pub struct TableEntryValue {
|
||||
|
||||
#[serde(default = "mito_engine")]
|
||||
pub engine: String,
|
||||
|
||||
#[serde(default = "not_deleted")]
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
fn mito_engine() -> String {
|
||||
MITO_ENGINE.to_string()
|
||||
}
|
||||
|
||||
fn not_deleted() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
@@ -495,13 +472,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
pub fn test_decode_mismatch() {
|
||||
assert!(decode_system_catalog(
|
||||
decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.is_err());
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -516,7 +494,7 @@ mod tests {
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let mut builder = object_store::services::Fs::default();
|
||||
let _ = builder.root(&store_dir);
|
||||
builder.root(&store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
@@ -526,8 +504,7 @@ mod tests {
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
(dir, table_engine)
|
||||
@@ -584,7 +561,6 @@ mod tests {
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
is_deleted: false,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
@@ -596,11 +572,11 @@ mod tests {
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.insert(table_deletion).await.unwrap();
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 1);
|
||||
assert_eq!(batches.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,11 +24,14 @@ use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
||||
use crate::CatalogManagerRef;
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::CatalogListRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
catalog_list: CatalogListRef,
|
||||
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
|
||||
disallow_cross_schema_query: bool,
|
||||
default_catalog: String,
|
||||
@@ -37,12 +40,12 @@ pub struct DfTableSourceProvider {
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
pub fn new(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
catalog_list: CatalogListRef,
|
||||
disallow_cross_schema_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
catalog_list,
|
||||
disallow_cross_schema_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog(),
|
||||
@@ -59,8 +62,7 @@ impl DfTableSourceProvider {
|
||||
TableReference::Bare { .. } => (),
|
||||
TableReference::Partial { schema, .. } => {
|
||||
ensure!(
|
||||
schema.as_ref() == self.default_schema
|
||||
|| schema.as_ref() == INFORMATION_SCHEMA_NAME,
|
||||
schema.as_ref() == self.default_schema,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: &self.default_catalog,
|
||||
schema: schema.as_ref(),
|
||||
@@ -72,8 +74,7 @@ impl DfTableSourceProvider {
|
||||
} => {
|
||||
ensure!(
|
||||
catalog.as_ref() == self.default_catalog
|
||||
&& (schema.as_ref() == self.default_schema
|
||||
|| schema.as_ref() == INFORMATION_SCHEMA_NAME),
|
||||
&& schema.as_ref() == self.default_schema,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: catalog.as_ref(),
|
||||
schema: schema.as_ref()
|
||||
@@ -101,18 +102,36 @@ impl DfTableSourceProvider {
|
||||
let schema_name = table_ref.schema.as_ref();
|
||||
let table_name = table_ref.table.as_ref();
|
||||
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
let schema = if schema_name != INFORMATION_SCHEMA_NAME {
|
||||
let catalog = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
} else {
|
||||
let catalog_provider = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog_name.to_string(),
|
||||
catalog_provider,
|
||||
))
|
||||
};
|
||||
let table = schema
|
||||
.table(table_name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let provider = DfTableProviderAdapter::new(table);
|
||||
let source = provider_as_source(Arc::new(provider));
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
let table = DfTableProviderAdapter::new(table);
|
||||
let table = provider_as_source(Arc::new(table));
|
||||
self.resolved_tables.insert(resolved_name, table.clone());
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,14 +155,14 @@ mod tests {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
let _ = result.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
let _ = result.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("wrong_schema"),
|
||||
@@ -158,7 +177,7 @@ mod tests {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
let _ = result.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("wrong_catalog"),
|
||||
@@ -167,25 +186,5 @@ mod tests {
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("dummy"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,24 +14,49 @@
|
||||
|
||||
// The `tables` table in system catalog keeps a record of all tables created by user.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::logging;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::TableId;
|
||||
use table::Table;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{self, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
|
||||
use crate::system::{
|
||||
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
|
||||
SystemCatalogTable,
|
||||
};
|
||||
use crate::DeregisterTableRequest;
|
||||
use crate::{CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef};
|
||||
|
||||
pub struct InformationSchema {
|
||||
pub system: Arc<SystemCatalogTable>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for InformationSchema {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>, Error> {
|
||||
Ok(vec![SYSTEM_CATALOG_TABLE_NAME.to_string()])
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
|
||||
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool, Error> {
|
||||
Ok(name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SystemCatalog {
|
||||
pub information_schema: Arc<InformationSchema>,
|
||||
}
|
||||
@@ -66,21 +91,12 @@ impl SystemCatalog {
|
||||
&self,
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> CatalogResult<()> {
|
||||
) -> CatalogResult<bool> {
|
||||
self.information_schema
|
||||
.system
|
||||
.insert(build_table_deletion_request(request, table_id))
|
||||
.delete(build_table_deletion_request(request, table_id))
|
||||
.await
|
||||
.map(|x| {
|
||||
if x != 1 {
|
||||
let table = common_catalog::format_full_table_name(
|
||||
&request.catalog,
|
||||
&request.schema,
|
||||
&request.table_name
|
||||
);
|
||||
logging::warn!("Failed to delete table record from information_schema, unexpected returned result: {x}, table: {table}");
|
||||
}
|
||||
})
|
||||
.map(|x| x == 1)
|
||||
.with_context(|_| error::DeregisterTableSnafu {
|
||||
request: request.clone(),
|
||||
})
|
||||
@@ -99,3 +115,29 @@ impl SystemCatalog {
|
||||
.context(InsertCatalogRecordSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProvider for SystemCatalog {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>, Error> {
|
||||
Ok(vec![INFORMATION_SCHEMA_NAME.to_string()])
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
&self,
|
||||
_name: String,
|
||||
_schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>, Error> {
|
||||
panic!("System catalog does not support registering schema!")
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>, Error> {
|
||||
if name.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) {
|
||||
Ok(Some(self.information_schema.clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,16 +20,14 @@ mod tests {
|
||||
use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::{error, info};
|
||||
use common_test_util::temp_dir::TempDir;
|
||||
use mito::config::EngineConfig;
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
async fn create_local_catalog_manager(
|
||||
) -> Result<(TempDir, LocalCatalogManager), catalog::error::Error> {
|
||||
let (dir, object_store) =
|
||||
async fn create_local_catalog_manager() -> Result<LocalCatalogManager, catalog::error::Error> {
|
||||
let (_dir, object_store) =
|
||||
mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
|
||||
let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
@@ -39,13 +37,13 @@ mod tests {
|
||||
let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
|
||||
let catalog_manager = LocalCatalogManager::try_new(engine_manager).await.unwrap();
|
||||
catalog_manager.start().await?;
|
||||
Ok((dir, catalog_manager))
|
||||
Ok(catalog_manager)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
// register table
|
||||
let table_name = "test_table";
|
||||
let table_id = 42;
|
||||
@@ -83,7 +81,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_duplicate_register() {
|
||||
let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
|
||||
let catalog_manager = create_local_catalog_manager().await.unwrap();
|
||||
let request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
@@ -120,9 +118,8 @@ mod tests {
|
||||
fn test_concurrent_register() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
|
||||
let (_dir, catalog_manager) =
|
||||
rt.block_on(async { create_local_catalog_manager().await.unwrap() });
|
||||
let catalog_manager = Arc::new(catalog_manager);
|
||||
let catalog_manager =
|
||||
Arc::new(rt.block_on(async { create_local_catalog_manager().await.unwrap() }));
|
||||
|
||||
let succeed: Arc<Mutex<Option<TableRef>>> = Arc::new(Mutex::new(None));
|
||||
|
||||
|
||||
228
src/catalog/tests/mock.rs
Normal file
228
src/catalog/tests/mock.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::stream;
|
||||
use catalog::error::Error;
|
||||
use catalog::remote::{Kv, KvBackend, ValueIter};
|
||||
use common_recordbatch::RecordBatch;
|
||||
use common_telemetry::logging::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
use serde::Serializer;
|
||||
use table::engine::{EngineContext, TableEngine, TableReference};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
|
||||
use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockKvBackend {
|
||||
map: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Display for MockKvBackend {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
futures::executor::block_on(async {
|
||||
let map = self.map.read().await;
|
||||
for (k, v) in map.iter() {
|
||||
f.serialize_str(&String::from_utf8_lossy(k))?;
|
||||
f.serialize_str(" -> ")?;
|
||||
f.serialize_str(&String::from_utf8_lossy(v))?;
|
||||
f.serialize_str("\n")?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
let prefix = key.to_vec();
|
||||
let prefix_string = String::from_utf8_lossy(&prefix).to_string();
|
||||
Box::pin(stream!({
|
||||
let maps = self.map.read().await.clone();
|
||||
for (k, v) in maps.range(prefix.clone()..) {
|
||||
let key_string = String::from_utf8_lossy(k).to_string();
|
||||
let matches = key_string.starts_with(&prefix_string);
|
||||
if matches {
|
||||
yield Ok(Kv(k.clone(), v.clone()))
|
||||
} else {
|
||||
info!("Stream finished");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||
let mut map = self.map.write().await;
|
||||
map.insert(key.to_vec(), val.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
let mut map = self.map.write().await;
|
||||
let existing = map.entry(key.to_vec());
|
||||
match existing {
|
||||
Entry::Vacant(e) => {
|
||||
if expect.is_empty() {
|
||||
e.insert(val.to_vec());
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(None))
|
||||
}
|
||||
}
|
||||
Entry::Occupied(mut existing) => {
|
||||
if existing.get() == expect {
|
||||
existing.insert(val.to_vec());
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(Some(existing.get().clone())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
|
||||
let start = key.to_vec();
|
||||
let end = end.to_vec();
|
||||
let range = start..end;
|
||||
|
||||
let mut map = self.map.write().await;
|
||||
map.retain(|k, _| !range.contains(k));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockTableEngine {
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableEngine for MockTableEngine {
|
||||
fn name(&self) -> &str {
|
||||
"MockTableEngine"
|
||||
}
|
||||
|
||||
/// Create a table with only one column
|
||||
async fn create_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CreateTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
let table_name = request.table_name.clone();
|
||||
let catalog_name = request.catalog_name.clone();
|
||||
let schema_name = request.schema_name.clone();
|
||||
|
||||
let default_table_id = "0".to_owned();
|
||||
let table_id = TableId::from_str(
|
||||
request
|
||||
.table_options
|
||||
.extra_options
|
||||
.get("table_id")
|
||||
.unwrap_or(&default_table_id),
|
||||
)
|
||||
.unwrap();
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]));
|
||||
|
||||
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
|
||||
let record_batch = RecordBatch::new(schema, data).unwrap();
|
||||
let table: TableRef = Arc::new(MemTable::new_with_catalog(
|
||||
&table_name,
|
||||
record_batch,
|
||||
table_id,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
vec![0],
|
||||
)) as Arc<_>;
|
||||
|
||||
let mut tables = self.tables.write().await;
|
||||
tables.insert(table_name, table.clone() as TableRef);
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn open_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: OpenTableRequest,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().await.get(&request.table_name).cloned())
|
||||
}
|
||||
|
||||
async fn alter_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: AlterTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
table_ref: &TableReference,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
futures::executor::block_on(async {
|
||||
Ok(self
|
||||
.tables
|
||||
.read()
|
||||
.await
|
||||
.get(&table_ref.to_string())
|
||||
.cloned())
|
||||
})
|
||||
}
|
||||
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
|
||||
futures::executor::block_on(async {
|
||||
self.tables
|
||||
.read()
|
||||
.await
|
||||
.contains_key(&table_ref.to_string())
|
||||
})
|
||||
}
|
||||
|
||||
async fn drop_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: DropTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -14,46 +14,32 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
mod mock;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
|
||||
use catalog::remote::mock::MockTableEngine;
|
||||
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
|
||||
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
|
||||
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
|
||||
use catalog::remote::{
|
||||
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
|
||||
};
|
||||
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use common_meta::ident::TableIdent;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::KvBackend;
|
||||
use datatypes::schema::RawSchema;
|
||||
use futures_util::StreamExt;
|
||||
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
|
||||
use table::engine::manager::MemoryTableEngineManager;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::test_util::EmptyTable;
|
||||
use tokio::time::Instant;
|
||||
|
||||
struct TestingComponents {
|
||||
catalog_manager: Arc<RemoteCatalogManager>,
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
region_alive_keepers: Arc<RegionAliveKeepers>,
|
||||
}
|
||||
|
||||
impl TestingComponents {
|
||||
fn table_engine(&self) -> TableEngineRef {
|
||||
self.table_engine_manager.engine(MITO_ENGINE).unwrap()
|
||||
}
|
||||
}
|
||||
use crate::mock::{MockKvBackend, MockTableEngine};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backend() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let backend = MemoryKvBackend::default();
|
||||
let backend = MockKvBackend::default();
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -82,7 +68,7 @@ mod tests {
|
||||
let mut res = HashSet::new();
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r.unwrap();
|
||||
let _ = res.insert(String::from_utf8_lossy(&kv.0).to_string());
|
||||
res.insert(String::from_utf8_lossy(&kv.0).to_string());
|
||||
}
|
||||
assert_eq!(
|
||||
vec!["__c-greptime".to_string()],
|
||||
@@ -90,97 +76,37 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cached_backend() {
|
||||
let backend = CachedMetaKvBackend::wrap(Arc::new(MemoryKvBackend::default()));
|
||||
|
||||
let default_catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
backend
|
||||
.set(
|
||||
default_catalog_key.as_bytes(),
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
let _ = ret.unwrap();
|
||||
|
||||
let _ = backend
|
||||
.compare_and_set(
|
||||
b"__c-greptime",
|
||||
&CatalogValue {}.as_bytes().unwrap(),
|
||||
b"123",
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert_eq!(&b"123"[..], &(ret.as_ref().unwrap().1));
|
||||
|
||||
let _ = backend.set(b"__c-greptime", b"1234").await;
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert_eq!(&b"1234"[..], &(ret.as_ref().unwrap().1));
|
||||
|
||||
backend.delete(b"__c-greptime").await.unwrap();
|
||||
|
||||
let ret = backend.get(b"__c-greptime").await.unwrap();
|
||||
assert!(ret.is_none());
|
||||
}
|
||||
|
||||
async fn prepare_components(node_id: u64) -> TestingComponents {
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
backend.set(b"__c-greptime", b"").await.unwrap();
|
||||
backend.set(b"__s-greptime-public", b"").await.unwrap();
|
||||
|
||||
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(backend));
|
||||
|
||||
async fn prepare_components(
|
||||
node_id: u64,
|
||||
) -> (KvBackendRef, TableEngineRef, Arc<RemoteCatalogManager>) {
|
||||
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
|
||||
let table_engine = Arc::new(MockTableEngine::default());
|
||||
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
|
||||
MITO_ENGINE.to_string(),
|
||||
table_engine,
|
||||
table_engine.clone(),
|
||||
));
|
||||
|
||||
let region_alive_keepers = Arc::new(RegionAliveKeepers::new(engine_manager.clone(), 5000));
|
||||
|
||||
let catalog_manager = RemoteCatalogManager::new(
|
||||
engine_manager.clone(),
|
||||
node_id,
|
||||
cached_backend.clone(),
|
||||
region_alive_keepers.clone(),
|
||||
);
|
||||
let catalog_manager = RemoteCatalogManager::new(engine_manager, node_id, backend.clone());
|
||||
catalog_manager.start().await.unwrap();
|
||||
|
||||
TestingComponents {
|
||||
catalog_manager: Arc::new(catalog_manager),
|
||||
table_engine_manager: engine_manager,
|
||||
region_alive_keepers,
|
||||
}
|
||||
(backend, table_engine, Arc::new(catalog_manager))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remote_catalog_default() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_id = 42;
|
||||
let TestingComponents {
|
||||
catalog_manager, ..
|
||||
} = prepare_components(node_id).await;
|
||||
let (_, _, catalog_manager) = prepare_components(node_id).await;
|
||||
assert_eq!(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string()],
|
||||
catalog_manager.catalog_names().await.unwrap()
|
||||
catalog_manager.catalog_names().unwrap()
|
||||
);
|
||||
|
||||
let default_catalog = catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
default_catalog.schema_names().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -188,16 +114,14 @@ mod tests {
|
||||
async fn test_remote_catalog_register_nonexistent() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_id = 42;
|
||||
let components = prepare_components(node_id).await;
|
||||
|
||||
let (_, table_engine, catalog_manager) = prepare_components(node_id).await;
|
||||
// register a new table with an nonexistent catalog
|
||||
let catalog_name = "nonexistent_catalog".to_string();
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let table_name = "fail_table".to_string();
|
||||
// this schema has no effect
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = components
|
||||
.table_engine()
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
CreateTableRequest {
|
||||
@@ -223,7 +147,7 @@ mod tests {
|
||||
table_id: 1,
|
||||
table,
|
||||
};
|
||||
let res = components.catalog_manager.register_table(reg_req).await;
|
||||
let res = catalog_manager.register_table(reg_req).await;
|
||||
|
||||
// because nonexistent_catalog does not exist yet.
|
||||
assert_matches!(
|
||||
@@ -235,16 +159,21 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_register_table() {
|
||||
let node_id = 42;
|
||||
let components = prepare_components(node_id).await;
|
||||
let (_, table_engine, catalog_manager) = prepare_components(node_id).await;
|
||||
let default_catalog = catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
vec![DEFAULT_SCHEMA_NAME.to_string()],
|
||||
components
|
||||
.catalog_manager
|
||||
.schema_names(DEFAULT_CATALOG_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
default_catalog.schema_names().unwrap()
|
||||
);
|
||||
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register a new table with an nonexistent catalog
|
||||
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
|
||||
let schema_name = DEFAULT_SCHEMA_NAME.to_string();
|
||||
@@ -252,8 +181,7 @@ mod tests {
|
||||
let table_id = 1;
|
||||
// this schema has no effect
|
||||
let table_schema = RawSchema::new(vec![]);
|
||||
let table = components
|
||||
.table_engine()
|
||||
let table = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
CreateTableRequest {
|
||||
@@ -279,51 +207,35 @@ mod tests {
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_table(reg_req)
|
||||
.await
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
vec![table_name],
|
||||
components
|
||||
.catalog_manager
|
||||
.table_names(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await
|
||||
.unwrap()
|
||||
);
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
assert_eq!(vec![table_name], default_schema.table_names().unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_catalog_schema_table() {
|
||||
let node_id = 42;
|
||||
let components = prepare_components(node_id).await;
|
||||
let (backend, table_engine, catalog_manager) = prepare_components(node_id).await;
|
||||
|
||||
let catalog_name = "test_catalog".to_string();
|
||||
let schema_name = "nonexistent_schema".to_string();
|
||||
let catalog = Arc::new(RemoteCatalogProvider::new(
|
||||
catalog_name.clone(),
|
||||
backend.clone(),
|
||||
node_id,
|
||||
));
|
||||
|
||||
// register catalog to catalog manager
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_catalog(catalog_name.clone())
|
||||
.await
|
||||
.is_ok());
|
||||
catalog_manager
|
||||
.register_catalog(catalog_name.clone(), catalog)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
HashSet::<String>::from_iter(
|
||||
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
|
||||
),
|
||||
HashSet::from_iter(
|
||||
components
|
||||
.catalog_manager
|
||||
.catalog_names()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
)
|
||||
HashSet::from_iter(catalog_manager.catalog_names().unwrap().into_iter())
|
||||
);
|
||||
|
||||
let table_to_register = components
|
||||
.table_engine()
|
||||
let table_to_register = table_engine
|
||||
.create_table(
|
||||
&EngineContext {},
|
||||
CreateTableRequest {
|
||||
@@ -352,128 +264,33 @@ mod tests {
|
||||
};
|
||||
// this register will fail since schema does not exist yet
|
||||
assert_matches!(
|
||||
components
|
||||
.catalog_manager
|
||||
catalog_manager
|
||||
.register_table(reg_req.clone())
|
||||
.await
|
||||
.unwrap_err(),
|
||||
catalog::error::Error::SchemaNotFound { .. }
|
||||
);
|
||||
|
||||
let register_schema_request = RegisterSchemaRequest {
|
||||
catalog: catalog_name.to_string(),
|
||||
schema: schema_name.to_string(),
|
||||
};
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_schema(register_schema_request)
|
||||
.await
|
||||
.expect("Register schema should not fail"));
|
||||
assert!(components
|
||||
.catalog_manager
|
||||
.register_table(reg_req)
|
||||
.await
|
||||
.unwrap());
|
||||
let new_catalog = catalog_manager
|
||||
.catalog(&catalog_name)
|
||||
.unwrap()
|
||||
.expect("catalog should exist since it's already registered");
|
||||
let schema = Arc::new(RemoteSchemaProvider::new(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
node_id,
|
||||
backend.clone(),
|
||||
));
|
||||
|
||||
let prev = new_catalog
|
||||
.register_schema(schema_name.clone(), schema.clone())
|
||||
.expect("Register schema should not fail");
|
||||
assert!(prev.is_none());
|
||||
assert!(catalog_manager.register_table(reg_req).await.unwrap());
|
||||
|
||||
assert_eq!(
|
||||
HashSet::from([schema_name.clone()]),
|
||||
components
|
||||
.catalog_manager
|
||||
.schema_names(&catalog_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.collect()
|
||||
new_catalog.schema_names().unwrap().into_iter().collect()
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_table_before_and_after_region_alive_keeper_started() {
|
||||
let components = prepare_components(42).await;
|
||||
let catalog_manager = &components.catalog_manager;
|
||||
let region_alive_keepers = &components.region_alive_keepers;
|
||||
|
||||
let table_before = TableIdent {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: "table_before".to_string(),
|
||||
table_id: 1,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
let request = RegisterTableRequest {
|
||||
catalog: table_before.catalog.clone(),
|
||||
schema: table_before.schema.clone(),
|
||||
table_name: table_before.table.clone(),
|
||||
table_id: table_before.table_id,
|
||||
table: Arc::new(EmptyTable::new(CreateTableRequest {
|
||||
id: table_before.table_id,
|
||||
catalog_name: table_before.catalog.clone(),
|
||||
schema_name: table_before.schema.clone(),
|
||||
table_name: table_before.table.clone(),
|
||||
desc: None,
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
})),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
let keeper = region_alive_keepers
|
||||
.find_keeper(&table_before)
|
||||
.await
|
||||
.unwrap();
|
||||
let deadline = keeper.deadline(0).await.unwrap();
|
||||
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
|
||||
// assert region alive countdown is not started
|
||||
assert!(deadline > far_future);
|
||||
|
||||
region_alive_keepers.start().await;
|
||||
|
||||
let table_after = TableIdent {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: "table_after".to_string(),
|
||||
table_id: 2,
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
let request = RegisterTableRequest {
|
||||
catalog: table_after.catalog.clone(),
|
||||
schema: table_after.schema.clone(),
|
||||
table_name: table_after.table.clone(),
|
||||
table_id: table_after.table_id,
|
||||
table: Arc::new(EmptyTable::new(CreateTableRequest {
|
||||
id: table_after.table_id,
|
||||
catalog_name: table_after.catalog.clone(),
|
||||
schema_name: table_after.schema.clone(),
|
||||
table_name: table_after.table.clone(),
|
||||
desc: None,
|
||||
schema: RawSchema::new(vec![]),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
})),
|
||||
};
|
||||
assert!(catalog_manager.register_table(request).await.unwrap());
|
||||
|
||||
let keeper = region_alive_keepers
|
||||
.find_keeper(&table_after)
|
||||
.await
|
||||
.unwrap();
|
||||
let deadline = keeper.deadline(0).await.unwrap();
|
||||
// assert countdown is started for the table registered after [RegionAliveKeepers] started
|
||||
assert!(deadline <= Instant::now() + Duration::from_secs(20));
|
||||
|
||||
let keeper = region_alive_keepers
|
||||
.find_keeper(&table_before)
|
||||
.await
|
||||
.unwrap();
|
||||
let deadline = keeper.deadline(0).await.unwrap();
|
||||
// assert countdown is started for the table registered before [RegionAliveKeepers] started, too
|
||||
assert!(deadline <= Instant::now() + Duration::from_secs(20));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
arrow-flight.workspace = true
|
||||
@@ -19,29 +16,25 @@ common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
moka = { version = "0.9", features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
datanode = { path = "../datanode" }
|
||||
derive-new = "0.5"
|
||||
substrait = { path = "../common/substrait" }
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.7"
|
||||
version = "0.4"
|
||||
|
||||
@@ -63,7 +63,7 @@ async fn run() {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
table_id: Some(TableId { id: 1024 }),
|
||||
region_numbers: vec![0],
|
||||
region_ids: vec![0],
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::column::*;
|
||||
use api::v1::*;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use derive_new::new;
|
||||
use tracing::{error, info};
|
||||
|
||||
fn main() {
|
||||
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
|
||||
.unwrap();
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let greptimedb_endpoint =
|
||||
std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned());
|
||||
|
||||
let greptimedb_dbname =
|
||||
std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned());
|
||||
|
||||
let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]);
|
||||
|
||||
let client = Database::new_with_dbname(greptimedb_dbname, grpc_client);
|
||||
|
||||
let stream_inserter = client.streaming_inserter().unwrap();
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_1())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
}
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_2())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
}
|
||||
|
||||
let result = stream_inserter.finish().await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
info!("Rows written: {rows}");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error: {e}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(new)]
|
||||
struct WeatherRecord {
|
||||
timestamp_millis: i64,
|
||||
collector: String,
|
||||
temperature: f32,
|
||||
humidity: i32,
|
||||
}
|
||||
|
||||
fn weather_records_1() -> Vec<WeatherRecord> {
|
||||
vec![
|
||||
WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15),
|
||||
WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20),
|
||||
WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13),
|
||||
WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67),
|
||||
WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74),
|
||||
WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81),
|
||||
]
|
||||
}
|
||||
|
||||
fn weather_records_2() -> Vec<WeatherRecord> {
|
||||
vec![
|
||||
WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15),
|
||||
WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20),
|
||||
WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13),
|
||||
WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67),
|
||||
WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74),
|
||||
WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81),
|
||||
]
|
||||
}
|
||||
|
||||
/// This function generates some random data and bundle them into a
|
||||
/// `InsertRequest`.
|
||||
///
|
||||
/// Data structure:
|
||||
///
|
||||
/// - `ts`: a timestamp column
|
||||
/// - `collector`: a tag column
|
||||
/// - `temperature`: a value field of f32
|
||||
/// - `humidity`: a value field of i32
|
||||
///
|
||||
fn to_insert_request(records: Vec<WeatherRecord>) -> InsertRequest {
|
||||
// convert records into columns
|
||||
let rows = records.len();
|
||||
|
||||
// transpose records into columns
|
||||
let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold(
|
||||
(
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
Vec::with_capacity(rows),
|
||||
),
|
||||
|mut acc, rec| {
|
||||
acc.0.push(rec.timestamp_millis);
|
||||
acc.1.push(rec.collector);
|
||||
acc.2.push(rec.temperature);
|
||||
acc.3.push(rec.humidity);
|
||||
|
||||
acc
|
||||
},
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
// timestamp column: `ts`
|
||||
Column {
|
||||
column_name: "ts".to_owned(),
|
||||
values: Some(column::Values {
|
||||
ts_millisecond_values: timestamp_millis,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// tag column: collectors
|
||||
Column {
|
||||
column_name: "collector".to_owned(),
|
||||
values: Some(column::Values {
|
||||
string_values: collectors.into_iter().collect(),
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// field column: temperature
|
||||
Column {
|
||||
column_name: "temperature".to_owned(),
|
||||
values: Some(column::Values {
|
||||
f32_values: temp,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Float32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
// field column: humidity
|
||||
Column {
|
||||
column_name: "humidity".to_owned(),
|
||||
values: Some(column::Values {
|
||||
i32_values: humidity,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
InsertRequest {
|
||||
table_name: "weather_demo".to_owned(),
|
||||
columns,
|
||||
row_count: rows as u32,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@ use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
@@ -157,15 +156,10 @@ impl Client {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(PrometheusGatewayClient::new(channel))
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let mut client = HealthCheckClient::new(channel);
|
||||
let _ = client.health_check(HealthCheckRequest {}).await?;
|
||||
client.health_check(HealthCheckRequest {}).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,14 +18,14 @@ use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery,
|
||||
QueryRequest, RequestHeader,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_telemetry::{logging, timer};
|
||||
use common_telemetry::logging;
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -33,7 +33,7 @@ use snafu::{ensure, ResultExt};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, metrics, Client, Result, StreamInserter};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
@@ -56,9 +56,8 @@ impl Database {
|
||||
Self {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
dbname: "".to_string(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,11 +70,9 @@ impl Database {
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: "".to_string(),
|
||||
schema: "".to_string(),
|
||||
dbname: dbname.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,39 +106,17 @@ impl Database {
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub fn streaming_inserter(&self) -> Result<StreamInserter> {
|
||||
self.streaming_inserter_with_channel_size(65536)
|
||||
}
|
||||
|
||||
pub fn streaming_inserter_with_channel_size(
|
||||
&self,
|
||||
channel_size: usize,
|
||||
) -> Result<StreamInserter> {
|
||||
let client = self.client.make_database_client()?.inner;
|
||||
|
||||
let stream_inserter = StreamInserter::new(
|
||||
client,
|
||||
self.dbname().to_string(),
|
||||
self.ctx.auth_header.clone(),
|
||||
channel_size,
|
||||
);
|
||||
|
||||
Ok(stream_inserter)
|
||||
}
|
||||
|
||||
pub async fn delete(&self, request: DeleteRequest) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
|
||||
self.handle(Request::Delete(request)).await
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
@@ -154,21 +129,7 @@ impl Database {
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
}))
|
||||
@@ -176,7 +137,6 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
@@ -190,7 +150,6 @@ impl Database {
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
@@ -203,7 +162,6 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
@@ -211,7 +169,6 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
@@ -219,7 +176,6 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
@@ -227,7 +183,6 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_FLUSH_TABLE);
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
@@ -235,15 +190,22 @@ impl Database {
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
||||
let request = self.to_rpc_request(request);
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
.mut_inner()
|
||||
.do_get(request)
|
||||
|
||||
@@ -34,13 +34,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to convert FlightData, source: {}", source))]
|
||||
ConvertFlightData {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
@@ -57,7 +57,7 @@ pub enum Error {
|
||||
))]
|
||||
CreateChannel {
|
||||
addr: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
@@ -67,9 +67,6 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
|
||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||
ClientStreaming { err_msg: String, location: Location },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -80,12 +77,11 @@ impl ErrorExt for Error {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
@@ -13,12 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
mod database;
|
||||
mod error;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
mod stream_insert;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
@@ -26,4 +23,3 @@ pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
pub use self::stream_insert::StreamInserter;
|
||||
|
||||
@@ -60,7 +60,7 @@ mod tests {
|
||||
let random = Random;
|
||||
for _ in 0..100 {
|
||||
let peer = random.get_peer(&peers).unwrap();
|
||||
assert!(all.contains(peer));
|
||||
all.contains(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! client metrics
|
||||
pub const METRIC_GRPC_CREATE_TABLE: &str = "grpc.create_table";
|
||||
pub const METRIC_GRPC_PROMQL_RANGE_QUERY: &str = "grpc.promql.range_query";
|
||||
pub const METRIC_GRPC_INSERT: &str = "grpc.insert";
|
||||
pub const METRIC_GRPC_DELETE: &str = "grpc.delete";
|
||||
pub const METRIC_GRPC_SQL: &str = "grpc.sql";
|
||||
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
||||
pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
|
||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
||||
@@ -1,115 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{
|
||||
greptime_response, AffectedRows, AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest,
|
||||
InsertRequests, RequestHeader,
|
||||
};
|
||||
use snafu::OptionExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::transport::Channel;
|
||||
use tonic::{Response, Status};
|
||||
|
||||
use crate::error::{self, IllegalDatabaseResponseSnafu, Result};
|
||||
|
||||
/// A structure that provides some methods for streaming data insert.
|
||||
///
|
||||
/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method.
|
||||
/// You can use the following way to obtain [`StreamInserter`].
|
||||
///
|
||||
/// ```ignore
|
||||
/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]);
|
||||
/// let client = Database::new_with_dbname("db_name", grpc_client);
|
||||
/// let stream_inserter = client.streaming_inserter().unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// If you want to see a concrete usage example, please see
|
||||
/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/develop/src/client/examples/stream_ingest.rs).
|
||||
pub struct StreamInserter {
|
||||
sender: mpsc::Sender<GreptimeRequest>,
|
||||
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
dbname: String,
|
||||
|
||||
join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>>,
|
||||
}
|
||||
|
||||
impl StreamInserter {
|
||||
pub(crate) fn new(
|
||||
mut client: GreptimeDatabaseClient<Channel>,
|
||||
dbname: String,
|
||||
auth_header: Option<AuthHeader>,
|
||||
channel_size: usize,
|
||||
) -> StreamInserter {
|
||||
let (send, recv) = tokio::sync::mpsc::channel(channel_size);
|
||||
|
||||
let join: JoinHandle<std::result::Result<Response<GreptimeResponse>, Status>> =
|
||||
tokio::spawn(async move {
|
||||
let recv_stream = ReceiverStream::new(recv);
|
||||
client.handle_requests(recv_stream).await
|
||||
});
|
||||
|
||||
StreamInserter {
|
||||
sender: send,
|
||||
auth_header,
|
||||
dbname,
|
||||
join,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert(&self, requests: Vec<InsertRequest>) -> Result<()> {
|
||||
let inserts = InsertRequests { inserts: requests };
|
||||
let request = self.to_rpc_request(Request::Inserts(inserts));
|
||||
|
||||
self.sender.send(request).await.map_err(|e| {
|
||||
error::ClientStreamingSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn finish(self) -> Result<u32> {
|
||||
drop(self.sender);
|
||||
|
||||
let response = self.join.await.unwrap()?;
|
||||
|
||||
let response = response
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
authorization: self.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
..Default::default()
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,9 +10,7 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["metrics-process"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
metrics-process = ["servers/metrics-process"]
|
||||
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
@@ -26,14 +24,12 @@ common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
config = "0.13"
|
||||
datanode = { path = "../datanode" }
|
||||
either = "1.8"
|
||||
frontend = { path = "../frontend" }
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
metrics.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { path = "../partition" }
|
||||
query = { path = "../query" }
|
||||
@@ -43,15 +39,16 @@ servers = { path = "../servers" }
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
tikv-jemallocator = "0.5"
|
||||
tikv-jemalloc-ctl = { version = "0.5", optional = true }
|
||||
tikv-jemallocator = { version = "0.5", optional = true }
|
||||
tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
temp-env = "0.3"
|
||||
serde.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
build-data = "0.1.4"
|
||||
build-data = "0.1.3"
|
||||
|
||||
@@ -18,10 +18,6 @@ fn main() {
|
||||
"cargo:rustc-env=GIT_COMMIT={}",
|
||||
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
|
||||
);
|
||||
println!(
|
||||
"cargo:rustc-env=GIT_COMMIT_SHORT={}",
|
||||
build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
|
||||
);
|
||||
println!(
|
||||
"cargo:rustc-env=GIT_BRANCH={}",
|
||||
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
|
||||
|
||||
@@ -18,24 +18,18 @@ use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::options::{Options, TopLevelOptions};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info, TracingOptions};
|
||||
use metrics::gauge;
|
||||
use common_telemetry::logging::{error, info};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(name = "greptimedb", version = print_version())]
|
||||
struct Command {
|
||||
#[clap(long)]
|
||||
log_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
log_level: Option<String>,
|
||||
#[clap(long, default_value = "/tmp/greptimedb/logs")]
|
||||
log_dir: String,
|
||||
#[clap(long, default_value = "info")]
|
||||
log_level: String,
|
||||
#[clap(subcommand)]
|
||||
subcmd: SubCommand,
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
#[clap(long)]
|
||||
tokio_console_addr: Option<String>,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
@@ -47,13 +41,13 @@ pub enum Application {
|
||||
}
|
||||
|
||||
impl Application {
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
async fn run(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.start().await,
|
||||
Application::Frontend(instance) => instance.start().await,
|
||||
Application::Metasrv(instance) => instance.start().await,
|
||||
Application::Standalone(instance) => instance.start().await,
|
||||
Application::Cli(instance) => instance.start().await,
|
||||
Application::Datanode(instance) => instance.run().await,
|
||||
Application::Frontend(instance) => instance.run().await,
|
||||
Application::Metasrv(instance) => instance.run().await,
|
||||
Application::Standalone(instance) => instance.run().await,
|
||||
Application::Cli(instance) => instance.run().await,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,20 +63,8 @@ impl Application {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn build(self, opts: Options) -> Result<Application> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
fn load_options(&self) -> Result<Options> {
|
||||
let top_level_opts = self.top_level_options();
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
|
||||
fn top_level_options(&self) -> TopLevelOptions {
|
||||
TopLevelOptions {
|
||||
log_dir: self.log_dir.clone(),
|
||||
log_level: self.log_level.clone(),
|
||||
}
|
||||
async fn build(self) -> Result<Application> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,40 +83,28 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, opts: Options) -> Result<Application> {
|
||||
match (self, opts) {
|
||||
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
|
||||
let app = cmd.build(*dn_opts).await?;
|
||||
async fn build(self) -> Result<Application> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
|
||||
let app = cmd.build(*fe_opts).await?;
|
||||
SubCommand::Frontend(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
|
||||
let app = cmd.build(*meta_opts).await?;
|
||||
SubCommand::Metasrv(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
|
||||
let app = cmd.build(opts.fe_opts, opts.dn_opts).await?;
|
||||
SubCommand::Standalone(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
(SubCommand::Cli(cmd), Options::Cli(_)) => {
|
||||
SubCommand::Cli(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Frontend(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Metasrv(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Standalone(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Cli(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -164,63 +134,27 @@ fn print_version() -> &'static str {
|
||||
)
|
||||
}
|
||||
|
||||
fn short_version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
// {app_name}-{branch_name}-{commit_short}
|
||||
// The branch name (tag) of a release build should already contain the short
|
||||
// version so the full version doesn't concat the short version explicitly.
|
||||
fn full_version() -> &'static str {
|
||||
concat!(
|
||||
"greptimedb-",
|
||||
env!("GIT_BRANCH"),
|
||||
"-",
|
||||
env!("GIT_COMMIT_SHORT")
|
||||
)
|
||||
}
|
||||
|
||||
fn log_env_flags() {
|
||||
info!("command line arguments");
|
||||
for argument in std::env::args() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "mem-prof")]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let cmd = Command::parse();
|
||||
// TODO(dennis):
|
||||
// 1. adds ip/port to app
|
||||
let app_name = &cmd.subcmd.to_string();
|
||||
|
||||
let opts = cmd.load_options()?;
|
||||
let logging_opts = opts.logging_options();
|
||||
let tracing_opts = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: cmd.tokio_console_addr.clone(),
|
||||
};
|
||||
let log_dir = &cmd.log_dir;
|
||||
let log_level = &cmd.log_level;
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||
|
||||
// Report app version as gauge.
|
||||
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
|
||||
|
||||
// Log version and argument flags.
|
||||
info!(
|
||||
"short_version: {}, full_version: {}",
|
||||
short_version(),
|
||||
full_version()
|
||||
);
|
||||
log_env_flags();
|
||||
|
||||
let mut app = cmd.build(opts).await?;
|
||||
let mut app = cmd.build().await?;
|
||||
|
||||
tokio::select! {
|
||||
result = app.start() => {
|
||||
result = app.run() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
|
||||
@@ -17,18 +17,16 @@ mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
pub use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.repl.run().await
|
||||
}
|
||||
|
||||
@@ -47,17 +45,6 @@ impl Command {
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.cmd.build().await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut logging_opts = LoggingOptions::default();
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
logging_opts.dir = dir;
|
||||
}
|
||||
if top_level_opts.log_level.is_some() {
|
||||
logging_opts.level = top_level_opts.log_level;
|
||||
}
|
||||
Ok(Options::Cli(Box::new(logging_opts)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -89,46 +76,3 @@ impl AttachCommand {
|
||||
Ok(Instance { repl })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
|
||||
assert!(logging_opts.level.is_none());
|
||||
assert!(!logging_opts.enable_jaeger_tracing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,16 +16,15 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use catalog::remote::CachedMetaKvBackend;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
use either::Either;
|
||||
use frontend::catalog::FrontendCatalogManager;
|
||||
use frontend::datanode::DatanodeClients;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use partition::route::TableRoutes;
|
||||
@@ -108,7 +107,7 @@ impl Repl {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
let _ = self.rl.add_history_entry(request.to_string());
|
||||
self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
@@ -137,7 +136,7 @@ impl Repl {
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
let _ = self.execute_sql(sql).await;
|
||||
self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
@@ -254,7 +253,9 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||
let backend = Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
});
|
||||
|
||||
let table_routes = Arc::new(TableRoutes::new(meta_client));
|
||||
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
|
||||
@@ -262,19 +263,11 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let catalog_list = Arc::new(FrontendCatalogManager::new(
|
||||
cached_meta_backend.clone(),
|
||||
cached_meta_backend,
|
||||
backend,
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
));
|
||||
let plugins: Arc<Plugins> = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
plugins.clone(),
|
||||
));
|
||||
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(state, plugins))
|
||||
Ok(DatafusionQueryEngine::new(state))
|
||||
}
|
||||
|
||||
@@ -16,20 +16,22 @@ use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use datanode::datanode::{
|
||||
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
|
||||
};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
@@ -48,12 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,15 +61,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -84,54 +76,71 @@ struct StartCommand {
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
data_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_DATANODE")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)?;
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
let opts: DatanodeOptions = self.try_into()?;
|
||||
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
type Error = Error;
|
||||
fn try_from(cmd: StartCommand) -> Result<Self> {
|
||||
let mut opts: DatanodeOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
DatanodeOptions::default()
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
opts.rpc_addr = addr;
|
||||
}
|
||||
|
||||
if top_level_opts.log_level.is_some() {
|
||||
opts.logging.level = top_level_opts.log_level;
|
||||
if cmd.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = cmd.rpc_hostname;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.rpc_addr = addr.clone();
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_addr = addr;
|
||||
}
|
||||
|
||||
if self.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
if let Some(node_id) = cmd.node_id {
|
||||
opts.node_id = Some(node_id);
|
||||
}
|
||||
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
if let Some(meta_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
.metasrv_addrs = meta_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
.map(&str::to_string)
|
||||
.collect::<_>();
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
@@ -142,44 +151,33 @@ impl StartCommand {
|
||||
.fail();
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig {
|
||||
data_home: data_home.clone(),
|
||||
});
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = &self.wal_dir {
|
||||
opts.wal.dir = Some(wal_dir.clone());
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http_opts.addr = http_addr.clone();
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
opts.http_opts.addr = http_addr
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
|
||||
// Disable dashboard in datanode.
|
||||
opts.http_opts.disable_dashboard = true;
|
||||
|
||||
Ok(Options::Datanode(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = Datanode::new(opts, Default::default())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -189,7 +187,6 @@ mod tests {
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
@@ -201,6 +198,8 @@ mod tests {
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
@@ -209,7 +208,7 @@ mod tests {
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
dir = "/other/wal"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -218,7 +217,7 @@ mod tests {
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
@@ -229,11 +228,6 @@ mod tests {
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
checkpoint_on_startup = true
|
||||
compress = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -241,14 +235,12 @@ mod tests {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
|
||||
let options: DatanodeOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(2, options.mysql_runtime_size);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
|
||||
assert_eq!("/other/wal", options.wal.dir.unwrap());
|
||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
@@ -267,12 +259,11 @@ mod tests {
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
match &options.storage.store {
|
||||
ObjectStoreConfig::File(FileConfig { data_home, .. }) => {
|
||||
assert_eq!("/tmp/greptimedb/", data_home)
|
||||
ObjectStoreConfig::File(FileConfig { data_dir, .. }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/", data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Azblob { .. } => unreachable!(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
@@ -289,184 +280,40 @@ mod tests {
|
||||
checkpoint_margin: Some(9),
|
||||
gc_duration: Some(Duration::from_secs(7)),
|
||||
checkpoint_on_startup: true,
|
||||
compress: true
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
if let Options::Datanode(opt) = StartCommand::default()
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Standalone, opt.mode)
|
||||
}
|
||||
assert_eq!(
|
||||
Mode::Standalone,
|
||||
DatanodeOptions::try_from(StartCommand::default())
|
||||
.unwrap()
|
||||
.mode
|
||||
);
|
||||
|
||||
if let Options::Datanode(opt) = (StartCommand {
|
||||
let mode = DatanodeOptions::try_from(StartCommand {
|
||||
node_id: Some(42),
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
metasrv_addr: Some("127.0.0.1:3002".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Distributed, opt.mode)
|
||||
}
|
||||
.mode;
|
||||
assert_matches!(mode, Mode::Distributed);
|
||||
|
||||
assert!((StartCommand {
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
assert!(DatanodeOptions::try_from(StartCommand {
|
||||
metasrv_addr: Some("127.0.0.1:3002".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.is_err());
|
||||
|
||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||
assert!((StartCommand {
|
||||
DatanodeOptions::try_from(StartCommand {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand::default();
|
||||
|
||||
let options = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let logging_opt = options.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_on_startup = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// storage.manifest.gc_duration = 9s
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("9s"),
|
||||
),
|
||||
(
|
||||
// storage.compaction.max_purge_tasks = 99
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"compaction".to_uppercase(),
|
||||
"max_purge_tasks".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
wal_dir: Some("/other/wal/dir".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(9))
|
||||
);
|
||||
assert_eq!(
|
||||
opts.meta_client_options.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.checkpoint_margin,
|
||||
DatanodeOptions::default()
|
||||
.storage
|
||||
.manifest
|
||||
.checkpoint_margin
|
||||
);
|
||||
},
|
||||
);
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::Location;
|
||||
|
||||
@@ -24,46 +23,59 @@ use snafu::Location;
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to start datanode, source: {}", source))]
|
||||
StartDatanode {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
StartMetaServer {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
|
||||
ReadConfig {
|
||||
path: String,
|
||||
source: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse config, source: {}", source))]
|
||||
ParseConfig {
|
||||
source: toml::de::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing config, msg: {}", msg))]
|
||||
MissingConfig { msg: String, location: Location },
|
||||
|
||||
@@ -72,14 +84,14 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Illegal auth config: {}", source))]
|
||||
IllegalAuthConfig {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: servers::auth::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
@@ -101,58 +113,46 @@ pub enum Error {
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
CollectRecordBatches {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
PrettyPrintRecordBatches {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client, source: {}", source))]
|
||||
StartMetaClient {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
location: Location,
|
||||
#[snafu(backtrace)]
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config, source: {}", source))]
|
||||
LoadLayeredConfig {
|
||||
source: ConfigError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start catalog manager, source: {}", source))]
|
||||
StartCatalogManager {
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -160,29 +160,31 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::StartDatanode { source, .. } => source.status_code(),
|
||||
Error::StartFrontend { source, .. } => source.status_code(),
|
||||
Error::ShutdownDatanode { source, .. } => source.status_code(),
|
||||
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
||||
Error::StartMetaServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,31 +16,30 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::service_config::{InfluxdbOptions, PromOptions};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::{auth, Mode};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result, StartCatalogManagerSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.catalog_manager()
|
||||
.start()
|
||||
.await
|
||||
.context(StartCatalogManagerSnafu)?;
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
@@ -62,12 +61,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,20 +72,14 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
@@ -108,8 +97,8 @@ pub struct StartCommand {
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
#[clap(long)]
|
||||
@@ -119,101 +108,20 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
user_provider: Option<String>,
|
||||
#[clap(long)]
|
||||
disable_dashboard: Option<bool>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_FRONTEND")]
|
||||
env_prefix: String,
|
||||
disable_dashboard: bool,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
FrontendOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
|
||||
if top_level_opts.log_level.is_some() {
|
||||
opts.logging.level = top_level_opts.log_level;
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
if let Some(http_opts) = &mut opts.http_options {
|
||||
http_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(disable_dashboard) = self.disable_dashboard {
|
||||
opts.http_options
|
||||
.get_or_insert_with(Default::default)
|
||||
.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.grpc_addr {
|
||||
if let Some(grpc_opts) = &mut opts.grpc_options {
|
||||
grpc_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
if let Some(mysql_opts) = &mut opts.mysql_options {
|
||||
mysql_opts.addr = addr.clone();
|
||||
mysql_opts.tls = tls_opts.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() });
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
if let Some(postgres_opts) = &mut opts.postgres_options {
|
||||
postgres_opts.addr = addr.clone();
|
||||
postgres_opts.tls = tls_opts;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
|
||||
opentsdb_addr.addr = addr.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
logging::info!("Frontend start command: {:#?}", self);
|
||||
logging::info!("Frontend options: {:#?}", opts);
|
||||
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts)
|
||||
.build_servers(&opts, plugins)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
@@ -222,7 +130,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
|
||||
let plugins = Plugins::new();
|
||||
let mut plugins = Plugins::new();
|
||||
|
||||
if let Some(provider) = user_provider {
|
||||
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
|
||||
@@ -231,40 +139,107 @@ pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins>
|
||||
Ok(plugins)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(cmd: StartCommand) -> Result<Self> {
|
||||
let mut opts: FrontendOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
FrontendOptions::default()
|
||||
};
|
||||
|
||||
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
|
||||
|
||||
let mut http_options = HttpOptions {
|
||||
disable_dashboard: cmd.disable_dashboard,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.http_addr {
|
||||
http_options.addr = addr;
|
||||
}
|
||||
|
||||
opts.http_options = Some(http_options);
|
||||
|
||||
if let Some(addr) = cmd.grpc_addr {
|
||||
opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
tls: tls_option.clone(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr });
|
||||
}
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
tls: tls_option,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.opentsdb_addr {
|
||||
opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(enable) = cmd.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
.map(&str::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use frontend::service_config::GrpcOptions;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_try_from_start_command() {
|
||||
let command = StartCommand {
|
||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||
grpc_addr: None,
|
||||
prom_addr: Some("127.0.0.1:4444".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
|
||||
influxdb_enable: Some(false),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
config_file: None,
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: false,
|
||||
};
|
||||
|
||||
let Options::Frontend(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
|
||||
let opts: FrontendOptions = command.try_into().unwrap();
|
||||
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
|
||||
assert_eq!(
|
||||
ReadableSize::mb(64),
|
||||
opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
|
||||
assert_eq!(
|
||||
opts.postgres_options.as_ref().unwrap().addr,
|
||||
@@ -306,22 +281,27 @@ mod tests {
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "2GB"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: false,
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
let fe_opts = FrontendOptions::try_from(command).unwrap();
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
@@ -331,162 +311,37 @@ mod tests {
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ReadableSize::gb(2),
|
||||
fe_opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: None,
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
disable_dashboard: false,
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
assert!(plugins.is_ok());
|
||||
let plugins = plugins.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>();
|
||||
assert!(provider.is_some());
|
||||
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
Identity::UserId("test", None),
|
||||
Password::PlainText("test".to_string().into()),
|
||||
)
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
let _ = result.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let logging_opt = options.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[mysql_options]
|
||||
addr = "127.0.0.1:4002"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "FRONTEND_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// mysql_options.addr = 127.0.0.1:14002
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"mysql_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:14002"),
|
||||
),
|
||||
(
|
||||
// mysql_options.runtime_size = 11
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"mysql_options".to_uppercase(),
|
||||
"runtime_size".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("11"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:24000"),
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
http_addr: Some("127.0.0.1:14000".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let top_level_opts = TopLevelOptions {
|
||||
log_dir: None,
|
||||
log_level: Some("error".to_string()),
|
||||
};
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(top_level_opts).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
|
||||
assert_eq!(
|
||||
fe_opts.meta_client_options.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4002"
|
||||
);
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(
|
||||
fe_opts.http_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:14000"
|
||||
);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
fe_opts.grpc_options.as_ref().unwrap().addr,
|
||||
GrpcOptions::default().addr
|
||||
);
|
||||
},
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,5 +19,5 @@ pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod frontend;
|
||||
pub mod metasrv;
|
||||
pub mod options;
|
||||
pub mod standalone;
|
||||
mod toml_loader;
|
||||
|
||||
@@ -15,20 +15,20 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
@@ -50,12 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,20 +61,14 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
#[derive(Debug, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
bind_addr: Option<String>,
|
||||
@@ -93,77 +83,68 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
#[clap(long)]
|
||||
disable_region_failover: bool,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: MetaSrvOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
let opts: MetaSrvOptions = self.try_into()?;
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(cmd: StartCommand) -> Result<Self> {
|
||||
let mut opts: MetaSrvOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
MetaSrvOptions::default()
|
||||
};
|
||||
|
||||
if let Some(addr) = cmd.bind_addr {
|
||||
opts.bind_addr = addr;
|
||||
}
|
||||
|
||||
if top_level_opts.log_level.is_some() {
|
||||
opts.logging.level = top_level_opts.log_level;
|
||||
if let Some(addr) = cmd.server_addr {
|
||||
opts.server_addr = addr;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
opts.bind_addr = addr.clone();
|
||||
if let Some(addr) = cmd.store_addr {
|
||||
opts.store_addr = addr;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.server_addr {
|
||||
opts.server_addr = addr.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.store_addr {
|
||||
opts.store_addr = addr.clone();
|
||||
}
|
||||
|
||||
if let Some(selector_type) = &self.selector {
|
||||
if let Some(selector_type) = &cmd.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
|
||||
info!("Using {} selector", selector_type);
|
||||
}
|
||||
|
||||
opts.use_memory_store = self.use_memory_store;
|
||||
|
||||
opts.disable_region_failover = self.disable_region_failover;
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http_opts.addr = http_addr.clone();
|
||||
if cmd.use_memory_store {
|
||||
warn!("Using memory store for Meta. Make sure you are in running tests.");
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
opts.http_opts.addr = http_addr;
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http_opts.disable_dashboard = true;
|
||||
|
||||
Ok(Options::Metasrv(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,7 +156,6 @@ mod tests {
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_cmd() {
|
||||
@@ -183,13 +163,15 @@ mod tests {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
..Default::default()
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||
}
|
||||
@@ -204,116 +186,24 @@ mod tests {
|
||||
datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
use_memory_store = false
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let cmd = StartCommand {
|
||||
bind_addr: None,
|
||||
server_addr: None,
|
||||
store_addr: None,
|
||||
selector: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
..Default::default()
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
||||
assert_eq!(15, options.datanode_lease_secs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
selector: Some("LoadBased".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let logging_opt = options.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
use_memory_store = false
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "METASRV_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// bind_addr = 127.0.0.1:14002
|
||||
vec![env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:14002"),
|
||||
),
|
||||
(
|
||||
// server_addr = 127.0.0.1:13002
|
||||
vec![env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:13002"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:24000"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
http_addr: Some("127.0.0.1:14000".to_string()),
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.server_addr, "127.0.0.1:3002");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.http_opts.addr, "127.0.0.1:14000");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(opts.store_addr, "127.0.0.1:2379");
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use config::{Config, Environment, File, FileFormat};
|
||||
use datanode::datanode::DatanodeOptions;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result};
|
||||
|
||||
pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
pub struct MixOptions {
|
||||
pub fe_opts: FrontendOptions,
|
||||
pub dn_opts: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
}
|
||||
|
||||
pub enum Options {
|
||||
Datanode(Box<DatanodeOptions>),
|
||||
Frontend(Box<FrontendOptions>),
|
||||
Metasrv(Box<MetaSrvOptions>),
|
||||
Standalone(Box<MixOptions>),
|
||||
Cli(Box<LoggingOptions>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TopLevelOptions {
|
||||
pub log_dir: Option<String>,
|
||||
pub log_level: Option<String>,
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn logging_options(&self) -> &LoggingOptions {
|
||||
match self {
|
||||
Options::Datanode(opts) => &opts.logging,
|
||||
Options::Frontend(opts) => &opts.logging,
|
||||
Options::Metasrv(opts) => &opts.logging,
|
||||
Options::Standalone(opts) => &opts.logging,
|
||||
Options::Cli(opts) => opts,
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from multiple sources and merge them.
|
||||
/// The precedence order is: config file > environment variables > default values.
|
||||
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
||||
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
|
||||
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
|
||||
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
|
||||
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
|
||||
pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
|
||||
config_file: Option<&str>,
|
||||
env_prefix: &str,
|
||||
list_keys: Option<&[&str]>,
|
||||
) -> Result<T> {
|
||||
let default_opts = T::default();
|
||||
|
||||
let env_source = {
|
||||
let mut env = Environment::default();
|
||||
|
||||
if !env_prefix.is_empty() {
|
||||
env = env.prefix(env_prefix);
|
||||
}
|
||||
|
||||
if let Some(list_keys) = list_keys {
|
||||
env = env.list_separator(ENV_LIST_SEP);
|
||||
for key in list_keys {
|
||||
env = env.with_list_parse_key(key);
|
||||
}
|
||||
}
|
||||
|
||||
env.try_parsing(true)
|
||||
.separator(ENV_VAR_SEP)
|
||||
.ignore_empty(true)
|
||||
};
|
||||
|
||||
// Add default values and environment variables as the sources of the configuration.
|
||||
let mut layered_config = Config::builder()
|
||||
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
|
||||
.add_source(env_source);
|
||||
|
||||
// Add config file as the source of the configuration if it is specified.
|
||||
if let Some(config_file) = config_file {
|
||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||
}
|
||||
|
||||
let opts = layered_config
|
||||
.build()
|
||||
.context(LoadLayeredConfigSnafu)?
|
||||
.try_deserialize()
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_layered_options() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
vec![
|
||||
(
|
||||
// storage.manifest.checkpoint_margin = 99
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"checkpoint_margin".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
),
|
||||
(
|
||||
// storage.type = S3
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"type".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("S3"),
|
||||
),
|
||||
(
|
||||
// storage.bucket = mybucket
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"bucket".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("mybucket"),
|
||||
),
|
||||
(
|
||||
// storage.manifest.gc_duration = 42s
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("42s"),
|
||||
),
|
||||
(
|
||||
// storage.manifest.checkpoint_on_startup = true
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"checkpoint_on_startup".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("true"),
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"wal".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("/other/wal/dir"),
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let opts: DatanodeOptions = Options::load_layered_options(
|
||||
Some(file.path().to_str().unwrap()),
|
||||
env_prefix,
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
|
||||
match opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
}
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(42))
|
||||
);
|
||||
assert!(opts.storage.manifest.checkpoint_on_startup);
|
||||
assert_eq!(
|
||||
opts.meta_client_options.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -17,15 +17,17 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromOptions,
|
||||
PrometheusOptions,
|
||||
};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -33,11 +35,11 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
|
||||
StartFrontendSnafu,
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
@@ -46,16 +48,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn build(
|
||||
self,
|
||||
fe_opts: FrontendOptions,
|
||||
dn_opts: DatanodeOptions,
|
||||
) -> Result<Instance> {
|
||||
self.subcmd.build(fe_opts, dn_opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_options)
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,15 +59,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.build(fe_opts, dn_opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_options),
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,8 +81,7 @@ pub struct StandaloneOptions {
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub logging: LoggingOptions,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -112,8 +99,7 @@ impl Default for StandaloneOptions {
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -131,8 +117,6 @@ impl StandaloneOptions {
|
||||
prometheus_options: self.prometheus_options,
|
||||
prom_options: self.prom_options,
|
||||
meta_client_options: None,
|
||||
logging: self.logging,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +137,7 @@ pub struct Instance {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode
|
||||
.start_instance()
|
||||
@@ -181,7 +165,7 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
#[derive(Debug, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
@@ -209,114 +193,37 @@ struct StartCommand {
|
||||
tls_key_path: Option<String>,
|
||||
#[clap(long)]
|
||||
user_provider: Option<String>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: StandaloneOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
|
||||
opts.enable_memory_catalog = self.enable_memory_catalog;
|
||||
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = top_level_options.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
|
||||
if top_level_options.log_level.is_some() {
|
||||
opts.logging.level = top_level_options.log_level;
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
if let Some(http_opts) = &mut opts.http_options {
|
||||
http_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
if addr.eq(&datanode_grpc_addr) {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if let Some(grpc_opts) = &mut opts.grpc_options {
|
||||
grpc_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
if let Some(mysql_opts) = &mut opts.mysql_options {
|
||||
mysql_opts.addr = addr.clone();
|
||||
mysql_opts.tls = tls_opts.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() })
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
if let Some(postgres_opts) = &mut opts.postgres_options {
|
||||
postgres_opts.addr = addr.clone();
|
||||
postgres_opts.tls = tls_opts;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
|
||||
opentsdb_addr.addr = addr.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
|
||||
}
|
||||
|
||||
let fe_opts = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
fe_opts,
|
||||
dn_opts,
|
||||
logging,
|
||||
})))
|
||||
}
|
||||
|
||||
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let fe_opts = FrontendOptions::try_from(self)?;
|
||||
let dn_opts: DatanodeOptions = {
|
||||
let mut opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
opts.enable_memory_catalog = enable_memory_catalog;
|
||||
opts.datanode_options()
|
||||
};
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!(
|
||||
"Standalone frontend options: {:#?}, datanode options: {:#?}",
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let datanode = Datanode::new(dn_opts.clone(), Default::default())
|
||||
let datanode = Datanode::new(dn_opts.clone())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts)
|
||||
.build_servers(&fe_opts, plugins)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -336,97 +243,118 @@ async fn build_frontend(
|
||||
Ok(frontend_instance)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(cmd: StartCommand) -> std::result::Result<Self, Self::Error> {
|
||||
let opts: StandaloneOptions = if let Some(path) = cmd.config_file {
|
||||
toml_loader::from_file!(&path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
|
||||
let mut opts = opts.frontend_options();
|
||||
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(addr) = cmd.http_addr {
|
||||
opts.http_options = Some(HttpOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if let Some(addr) = cmd.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
if addr == datanode_grpc_addr {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.mysql_addr {
|
||||
opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr })
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.postgres_addr {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
if let Some(addr) = cmd.opentsdb_addr {
|
||||
opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
if cmd.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
|
||||
}
|
||||
|
||||
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
|
||||
|
||||
if let Some(mut mysql_options) = opts.mysql_options {
|
||||
mysql_options.tls = tls_option.clone();
|
||||
opts.mysql_options = Some(mysql_options);
|
||||
}
|
||||
|
||||
if let Some(mut postgres_options) = opts.postgres_options {
|
||||
postgres_options.tls = tls_option;
|
||||
opts.postgres_options = Some(postgres_options);
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::default::Default;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
let plugins = plugins.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
Identity::UserId("test", None),
|
||||
Password::PlainText("test".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
let _ = result.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = StandaloneOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
|
||||
enable_memory_catalog = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "S3"
|
||||
access_key_id = "access_key_id"
|
||||
secret_access_key = "secret_access_key"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
checkpoint_on_startup = true
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "128MB"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
fn test_read_config_file() {
|
||||
let cmd = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: Some(format!(
|
||||
"{}/../../config/standalone.example.toml",
|
||||
std::env::current_dir().unwrap().as_path().to_str().unwrap()
|
||||
)),
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
};
|
||||
|
||||
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
let fe_opts = options.fe_opts;
|
||||
let dn_opts = options.dn_opts;
|
||||
let logging_opts = options.logging;
|
||||
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4000".to_string(),
|
||||
@@ -436,10 +364,6 @@ mod tests {
|
||||
Duration::from_secs(30),
|
||||
fe_opts.http_options.as_ref().unwrap().timeout
|
||||
);
|
||||
assert_eq!(
|
||||
ReadableSize::mb(128),
|
||||
fe_opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
assert_eq!(
|
||||
"127.0.0.1:4001".to_string(),
|
||||
fe_opts.grpc_options.unwrap().addr
|
||||
@@ -454,129 +378,42 @@ mod tests {
|
||||
fe_opts.mysql_options.as_ref().unwrap().reject_no_database
|
||||
);
|
||||
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
|
||||
match &dn_opts.storage.store {
|
||||
datanode::datanode::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
||||
format!("{:?}", s3_config.access_key_id)
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: None,
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
assert!(plugins.is_ok());
|
||||
let plugins = plugins.unwrap();
|
||||
let provider = plugins.get::<UserProviderRef>();
|
||||
assert!(provider.is_some());
|
||||
let provider = provider.unwrap();
|
||||
let result = provider
|
||||
.authenticate(Identity::UserId("test", None), Password::PlainText("test"))
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "standalone"
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "STANDALONE_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// logging.dir = /other/log/dir
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"logging".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("/other/log/dir"),
|
||||
),
|
||||
(
|
||||
// logging.level = info
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"logging".to_uppercase(),
|
||||
"level".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("info"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:24000"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
http_addr: Some("127.0.0.1:14000".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let top_level_opts = TopLevelOptions {
|
||||
log_dir: None,
|
||||
log_level: None,
|
||||
};
|
||||
let Options::Standalone(opts) =
|
||||
command.load_options(top_level_opts).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.logging.dir, "/other/log/dir");
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(
|
||||
opts.fe_opts.http_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:14000"
|
||||
);
|
||||
assert_eq!(
|
||||
ReadableSize::mb(64),
|
||||
opts.fe_opts.http_options.as_ref().unwrap().body_limit
|
||||
);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.fe_opts.grpc_options.unwrap().addr,
|
||||
GrpcOptions::default().addr
|
||||
);
|
||||
},
|
||||
);
|
||||
fn test_toml() {
|
||||
let opts = StandaloneOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
94
src/cmd/src/toml_loader.rs
Normal file
94
src/cmd/src/toml_loader.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
macro_rules! from_file {
|
||||
($path: expr) => {
|
||||
toml::from_str(
|
||||
&std::fs::read_to_string($path)
|
||||
.context(crate::error::ReadConfigSnafu { path: $path })?,
|
||||
)
|
||||
.context(crate::error::ParseConfigSnafu)
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use from_file;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
|
||||
#[serde(default)]
|
||||
struct MockConfig {
|
||||
path: String,
|
||||
port: u32,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Default for MockConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "test".to_string(),
|
||||
port: 0,
|
||||
host: "localhost".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_file() -> Result<()> {
|
||||
let config = MockConfig {
|
||||
path: "/tmp".to_string(),
|
||||
port: 999,
|
||||
host: "greptime.test".to_string(),
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("test_from_file");
|
||||
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
|
||||
|
||||
let s = toml::to_string(&config).unwrap();
|
||||
assert!(s.contains("host") && s.contains("path") && s.contains("port"));
|
||||
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all(s.as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, config);
|
||||
|
||||
// Only host in file
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config.host, "greptime.test");
|
||||
assert_eq!(loaded_config.port, 0);
|
||||
assert_eq!(loaded_config.path, "test");
|
||||
|
||||
// Truncate the file.
|
||||
let file = File::create(&test_file).unwrap();
|
||||
file.set_len(0).unwrap();
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, MockConfig::default());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -27,10 +27,10 @@ mod tests {
|
||||
|
||||
impl Repl {
|
||||
fn send_line(&mut self, line: &str) {
|
||||
let _ = self.repl.send_line(line).unwrap();
|
||||
self.repl.send_line(line).unwrap();
|
||||
|
||||
// read a line to consume the prompt
|
||||
let _ = self.read_line();
|
||||
self.read_line();
|
||||
}
|
||||
|
||||
fn read_line(&mut self) -> String {
|
||||
@@ -51,7 +51,7 @@ mod tests {
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_home = create_temp_dir("data");
|
||||
let data_dir = create_temp_dir("data");
|
||||
let wal_dir = create_temp_dir("wal");
|
||||
|
||||
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
@@ -65,7 +65,7 @@ mod tests {
|
||||
"start",
|
||||
"--rpc-addr=0.0.0.0:4321",
|
||||
"--node-id=1",
|
||||
&format!("--data-home={}", data_home.path().display()),
|
||||
&format!("--data-dir={}", data_dir.path().display()),
|
||||
&format!("--wal-dir={}", wal_dir.path().display()),
|
||||
])
|
||||
.stdout(Stdio::null())
|
||||
@@ -76,7 +76,7 @@ mod tests {
|
||||
std::thread::sleep(Duration::from_secs(3));
|
||||
|
||||
let mut repl_cmd = Command::new("./greptime");
|
||||
let _ = repl_cmd.current_dir(bin_path).args([
|
||||
repl_cmd.current_dir(bin_path).args([
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
@@ -105,7 +105,7 @@ mod tests {
|
||||
test_select(repl);
|
||||
|
||||
datanode.kill().unwrap();
|
||||
let _ = datanode.wait().unwrap();
|
||||
datanode.wait().unwrap();
|
||||
}
|
||||
|
||||
fn test_create_database(repl: &mut Repl) {
|
||||
|
||||
@@ -92,14 +92,6 @@ impl StringBytes {
|
||||
pub fn as_utf8(&self) -> &str {
|
||||
unsafe { std::str::from_utf8_unchecked(&self.0) }
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for StringBytes {
|
||||
@@ -186,17 +178,6 @@ mod tests {
|
||||
assert_eq!(world, &bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_len() {
|
||||
let hello = b"hello".to_vec();
|
||||
let bytes = Bytes::from(hello.clone());
|
||||
assert_eq!(bytes.len(), hello.len());
|
||||
|
||||
let zero = b"".to_vec();
|
||||
let bytes = Bytes::from(zero);
|
||||
assert!(bytes.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_bytes_from() {
|
||||
let hello = "hello".to_string();
|
||||
@@ -210,17 +191,6 @@ mod tests {
|
||||
assert_eq!(&bytes, world);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_bytes_len() {
|
||||
let hello = "hello".to_string();
|
||||
let bytes = StringBytes::from(hello.clone());
|
||||
assert_eq!(bytes.len(), hello.len());
|
||||
|
||||
let zero = "".to_string();
|
||||
let bytes = StringBytes::from(zero);
|
||||
assert!(bytes.is_empty());
|
||||
}
|
||||
|
||||
fn check_str(expect: &str, given: &str) {
|
||||
assert_eq!(expect, given);
|
||||
}
|
||||
|
||||
@@ -15,64 +15,9 @@
|
||||
pub mod bit_vec;
|
||||
pub mod buffer;
|
||||
pub mod bytes;
|
||||
pub mod paths;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
|
||||
use core::any::Any;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Plugins {
|
||||
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
|
||||
}
|
||||
|
||||
impl Plugins {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(anymap::Map::new())),
|
||||
}
|
||||
}
|
||||
|
||||
fn lock(&self) -> MutexGuard<anymap::Map<dyn Any + Send + Sync>> {
|
||||
self.inner.lock().unwrap()
|
||||
}
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
let _ = self.lock().insert(value);
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
let binding = self.lock();
|
||||
binding.get::<T>().cloned()
|
||||
}
|
||||
|
||||
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&mut T>) -> R,
|
||||
{
|
||||
let mut binding = self.lock();
|
||||
let opt = binding.get_mut::<T>();
|
||||
mapper(opt)
|
||||
}
|
||||
|
||||
pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&T) -> R,
|
||||
{
|
||||
let binding = self.lock();
|
||||
binding.get::<T>().map(mapper)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
let binding = self.lock();
|
||||
binding.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
let binding = self.lock();
|
||||
binding.is_empty()
|
||||
}
|
||||
}
|
||||
pub type Plugins = anymap::Map<dyn core::any::Any + Send + Sync>;
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Path constants for table engines, cluster states and WAL
|
||||
/// All paths relative to data_home(file storage) or root path(S3, OSS etc).
|
||||
|
||||
/// WAL dir for local file storage
|
||||
pub const WAL_DIR: &str = "wal/";
|
||||
|
||||
/// Data dir for table engines
|
||||
pub const DATA_DIR: &str = "data/";
|
||||
|
||||
/// Cluster state dir
|
||||
pub const CLUSTER_DIR: &str = "cluster/";
|
||||
@@ -119,7 +119,7 @@ mod tests {
|
||||
#[test]
|
||||
fn [<test_read_write_ $num_ty _from_vec_buffer>]() {
|
||||
let mut buf = vec![];
|
||||
let _ = buf.[<write_ $num_ty _le>]($num_ty::MAX).unwrap();
|
||||
assert!(buf.[<write_ $num_ty _le>]($num_ty::MAX).is_ok());
|
||||
assert_eq!($num_ty::MAX, buf.as_slice().[<read_ $num_ty _le>]().unwrap());
|
||||
}
|
||||
}
|
||||
@@ -132,7 +132,7 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_peek_write_from_vec_buffer() {
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
buf.write_from_slice("hello".as_bytes()).unwrap();
|
||||
assert!(buf.write_from_slice("hello".as_bytes()).is_ok());
|
||||
let mut slice = buf.as_slice();
|
||||
assert_eq!(104, slice.peek_u8_le().unwrap());
|
||||
slice.advance_by(1);
|
||||
|
||||
@@ -21,18 +21,10 @@ pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
|
||||
/// User defined table id starts from this value.
|
||||
pub const MIN_USER_TABLE_ID: u32 = 1024;
|
||||
/// the max system table id
|
||||
pub const MAX_SYS_TABLE_ID: u32 = MIN_USER_TABLE_ID - 1;
|
||||
/// system_catalog table id
|
||||
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
|
||||
/// scripts table id
|
||||
pub const SCRIPTS_TABLE_ID: u32 = 1;
|
||||
/// numbers table id
|
||||
pub const NUMBERS_TABLE_ID: u32 = 2;
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
|
||||
|
||||
pub const SEMANTIC_TYPE_PRIMARY_KEY: &str = "PRIMARY KEY";
|
||||
pub const SEMANTIC_TYPE_FIELD: &str = "FIELD";
|
||||
pub const SEMANTIC_TYPE_TIME_INDEX: &str = "TIME INDEX";
|
||||
|
||||
@@ -36,6 +36,9 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: serde_json::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse node id: {}", key))]
|
||||
ParseNodeId { key: String, location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -44,6 +47,7 @@ impl ErrorExt for Error {
|
||||
Error::InvalidCatalog { .. }
|
||||
| Error::DeserializeCatalogEntryValue { .. }
|
||||
| Error::SerializeCatalogEntryValue { .. } => StatusCode::Unexpected,
|
||||
Error::ParseNodeId { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use consts::DEFAULT_CATALOG_NAME;
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
|
||||
@@ -22,23 +20,3 @@ pub mod error;
|
||||
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
/// Build db name from catalog and schema string
|
||||
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
schema.to_string()
|
||||
} else {
|
||||
format!("{catalog}-{schema}")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_db_string() {
|
||||
assert_eq!("test", build_db_string(DEFAULT_CATALOG_NAME, "test"));
|
||||
assert_eq!("a0b1c2d3-test", build_db_string("a0b1c2d3", "test"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,21 +16,13 @@ async-compression = { version = "0.3", features = [
|
||||
"tokio",
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-base = { path = "../base" }
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
datafusion.workspace = true
|
||||
derive_builder = "0.12"
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
orc-rust = { git = "https://github.com/WenyXu/orc-rs.git", rev = "0319acd32456e403c20f135cc012441a76852605" }
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
url = "2.3"
|
||||
paste = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../test-util" }
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use async_trait::async_trait;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
pub struct LazyBufferedWriter<T, U, F> {
|
||||
path: String,
|
||||
writer_factory: F,
|
||||
writer: Option<T>,
|
||||
/// None stands for [`LazyBufferedWriter`] closed.
|
||||
encoder: Option<U>,
|
||||
buffer: SharedBuffer,
|
||||
rows_written: usize,
|
||||
bytes_written: u64,
|
||||
threshold: usize,
|
||||
}
|
||||
|
||||
pub trait DfRecordBatchEncoder {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ArrowWriterCloser {
|
||||
async fn close(mut self) -> Result<FileMetaData>;
|
||||
}
|
||||
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
||||
F: FnMut(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
||||
/// if any row's been written.
|
||||
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.take()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
let metadata = encoder.close().await?;
|
||||
|
||||
// Use `rows_written` to keep a track of if any rows have been written.
|
||||
// If no row's been written, then we can simply close the underlying
|
||||
// writer without flush so that no file will be actually created.
|
||||
if self.rows_written != 0 {
|
||||
self.bytes_written += self.try_flush(true).await?;
|
||||
}
|
||||
// It's important to shut down! flushes all pending writes
|
||||
self.close_inner_writer().await?;
|
||||
Ok((metadata, self.bytes_written))
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder,
|
||||
F: FnMut(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes the writer without flushing the buffer data.
|
||||
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
||||
if let Some(writer) = &mut self.writer {
|
||||
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
threshold: usize,
|
||||
buffer: SharedBuffer,
|
||||
encoder: U,
|
||||
path: impl AsRef<str>,
|
||||
writer_factory: F,
|
||||
) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_string(),
|
||||
threshold,
|
||||
encoder: Some(encoder),
|
||||
buffer,
|
||||
rows_written: 0,
|
||||
bytes_written: 0,
|
||||
writer_factory,
|
||||
writer: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
let encoder = self
|
||||
.encoder
|
||||
.as_mut()
|
||||
.context(error::BufferedWriterClosedSnafu)?;
|
||||
encoder.write(batch)?;
|
||||
self.rows_written += batch.num_rows();
|
||||
self.bytes_written += self.try_flush(false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn try_flush(&mut self, all: bool) -> Result<u64> {
|
||||
let mut bytes_written: u64 = 0;
|
||||
|
||||
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
|
||||
// and write to underlying storage.
|
||||
while self.buffer.buffer.lock().unwrap().len() >= self.threshold {
|
||||
let chunk = {
|
||||
let mut buffer = self.buffer.buffer.lock().unwrap();
|
||||
buffer.split_to(self.threshold)
|
||||
};
|
||||
let size = chunk.len();
|
||||
|
||||
self.maybe_init_writer()
|
||||
.await?
|
||||
.write_all(&chunk)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
|
||||
bytes_written += size as u64;
|
||||
}
|
||||
|
||||
if all {
|
||||
bytes_written += self.try_flush_all().await?;
|
||||
}
|
||||
Ok(bytes_written)
|
||||
}
|
||||
|
||||
/// Only initiates underlying file writer when rows have been written.
|
||||
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
|
||||
if let Some(ref mut writer) = self.writer {
|
||||
Ok(writer)
|
||||
} else {
|
||||
let writer = (self.writer_factory)(self.path.clone()).await?;
|
||||
Ok(self.writer.insert(writer))
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_flush_all(&mut self) -> Result<u64> {
|
||||
let remain = self.buffer.buffer.lock().unwrap().split();
|
||||
let size = remain.len();
|
||||
self.maybe_init_writer()
|
||||
.await?
|
||||
.write_all(&remain)
|
||||
.await
|
||||
.context(error::AsyncWriteSnafu)?;
|
||||
Ok(size as u64)
|
||||
}
|
||||
}
|
||||
@@ -13,29 +13,24 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::io;
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
|
||||
use async_compression::tokio::write;
|
||||
use bytes::Bytes;
|
||||
use futures::Stream;
|
||||
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
use tokio::io::{AsyncRead, BufReader};
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum CompressionType {
|
||||
/// Gzip-ed file
|
||||
Gzip,
|
||||
GZIP,
|
||||
/// Bzip2-ed file
|
||||
Bzip2,
|
||||
BZIP2,
|
||||
/// Xz-ed file (liblzma)
|
||||
Xz,
|
||||
XZ,
|
||||
/// Zstd-ed file,
|
||||
Zstd,
|
||||
ZSTD,
|
||||
/// Uncompressed file
|
||||
Uncompressed,
|
||||
UNCOMPRESSED,
|
||||
}
|
||||
|
||||
impl FromStr for CompressionType {
|
||||
@@ -44,11 +39,11 @@ impl FromStr for CompressionType {
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
let s = s.to_uppercase();
|
||||
match s.as_str() {
|
||||
"GZIP" | "GZ" => Ok(Self::Gzip),
|
||||
"BZIP2" | "BZ2" => Ok(Self::Bzip2),
|
||||
"XZ" => Ok(Self::Xz),
|
||||
"ZST" | "ZSTD" => Ok(Self::Zstd),
|
||||
"" => Ok(Self::Uncompressed),
|
||||
"GZIP" | "GZ" => Ok(Self::GZIP),
|
||||
"BZIP2" | "BZ2" => Ok(Self::BZIP2),
|
||||
"XZ" => Ok(Self::XZ),
|
||||
"ZST" | "ZSTD" => Ok(Self::ZSTD),
|
||||
"" => Ok(Self::UNCOMPRESSED),
|
||||
_ => error::UnsupportedCompressionTypeSnafu {
|
||||
compression_type: s,
|
||||
}
|
||||
@@ -60,121 +55,30 @@ impl FromStr for CompressionType {
|
||||
impl Display for CompressionType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(match self {
|
||||
Self::Gzip => "GZIP",
|
||||
Self::Bzip2 => "BZIP2",
|
||||
Self::Xz => "XZ",
|
||||
Self::Zstd => "ZSTD",
|
||||
Self::Uncompressed => "",
|
||||
Self::GZIP => "GZIP",
|
||||
Self::BZIP2 => "BZIP2",
|
||||
Self::XZ => "XZ",
|
||||
Self::ZSTD => "ZSTD",
|
||||
Self::UNCOMPRESSED => "",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl CompressionType {
|
||||
pub const fn is_compressed(&self) -> bool {
|
||||
!matches!(self, &Self::Uncompressed)
|
||||
!matches!(self, &Self::UNCOMPRESSED)
|
||||
}
|
||||
|
||||
pub const fn file_extension(&self) -> &'static str {
|
||||
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn AsyncRead + Unpin + Send> {
|
||||
match self {
|
||||
Self::Gzip => "gz",
|
||||
Self::Bzip2 => "bz2",
|
||||
Self::Xz => "xz",
|
||||
Self::Zstd => "zst",
|
||||
Self::Uncompressed => "",
|
||||
CompressionType::GZIP => Box::new(GzipDecoder::new(BufReader::new(s))),
|
||||
CompressionType::BZIP2 => Box::new(BzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::XZ => Box::new(XzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::ZSTD => Box::new(ZstdDecoder::new(BufReader::new(s))),
|
||||
CompressionType::UNCOMPRESSED => Box::new(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_compression_type {
|
||||
($(($enum_item:ident, $prefix:ident)),*) => {
|
||||
paste::item! {
|
||||
impl CompressionType {
|
||||
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
||||
match self {
|
||||
$(
|
||||
CompressionType::$enum_item => {
|
||||
let mut buffer = Vec::with_capacity(content.as_ref().len());
|
||||
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
|
||||
encoder.write_all(content.as_ref()).await?;
|
||||
encoder.shutdown().await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
)*
|
||||
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
||||
match self {
|
||||
$(
|
||||
CompressionType::$enum_item => {
|
||||
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
|
||||
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
|
||||
encoder.write_all(content.as_ref()).await?;
|
||||
encoder.shutdown().await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
)*
|
||||
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn AsyncRead + Unpin + Send> {
|
||||
match self {
|
||||
$(CompressionType::$enum_item => Box::new([<$prefix Decoder>]::new(BufReader::new(s))),)*
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
|
||||
match self {
|
||||
$(CompressionType::$enum_item => Box::new(ReaderStream::new([<$prefix Decoder>]::new(StreamReader::new(s)))),)*
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::CompressionType;
|
||||
|
||||
$(
|
||||
#[tokio::test]
|
||||
async fn [<test_ $enum_item:lower _compression>]() {
|
||||
let string = "foo_bar".as_bytes().to_vec();
|
||||
let compress = CompressionType::$enum_item
|
||||
.encode(&string)
|
||||
.await
|
||||
.unwrap();
|
||||
let decompress = CompressionType::$enum_item
|
||||
.decode(&compress)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(decompress, string);
|
||||
})*
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_uncompression() {
|
||||
let string = "foo_bar".as_bytes().to_vec();
|
||||
let compress = CompressionType::Uncompressed
|
||||
.encode(&string)
|
||||
.await
|
||||
.unwrap();
|
||||
let decompress = CompressionType::Uncompressed
|
||||
.decode(&compress)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(decompress, string);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_compression_type!((Gzip, Gzip), (Bzip2, Bz), (Xz, Xz), (Zstd, Zstd));
|
||||
|
||||
@@ -14,9 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::prelude::*;
|
||||
use datafusion::parquet::errors::ParquetError;
|
||||
use snafu::Location;
|
||||
use url::ParseError;
|
||||
|
||||
@@ -24,27 +22,23 @@ use url::ParseError;
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
||||
UnsupportedCompressionType {
|
||||
compression_type: String,
|
||||
location: Location,
|
||||
},
|
||||
UnsupportedCompressionType { compression_type: String },
|
||||
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
UnsupportedBackendProtocol {
|
||||
protocol: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported format protocol: {}", format))]
|
||||
UnsupportedFormat { format: String, location: Location },
|
||||
UnsupportedBackendProtocol { protocol: String },
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String, location: Location },
|
||||
EmptyHostPath { url: String },
|
||||
|
||||
#[snafu(display("Invalid path: {}", path))]
|
||||
InvalidPath { path: String },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
InvalidUrl {
|
||||
url: String,
|
||||
source: ParseError,
|
||||
InvalidUrl { url: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to decompression, source: {}", source))]
|
||||
Decompression {
|
||||
source: object_store::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -54,12 +48,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build orc reader, source: {}", source))]
|
||||
OrcReader {
|
||||
location: Location,
|
||||
source: orc_rust::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
|
||||
ReadObject {
|
||||
path: String,
|
||||
@@ -67,37 +55,6 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write object to path: {}, source: {}", path, source))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write: {}", source))]
|
||||
AsyncWrite {
|
||||
source: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write record batch: {}", source))]
|
||||
WriteRecordBatch {
|
||||
location: Location,
|
||||
source: ArrowError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode record batch: {}", source))]
|
||||
EncodeRecordBatch {
|
||||
location: Location,
|
||||
source: ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read record batch: {}", source))]
|
||||
ReadRecordBatch {
|
||||
location: Location,
|
||||
source: datafusion::error::DataFusionError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read parquet source: {}", source))]
|
||||
ReadParquetSnafu {
|
||||
location: Location,
|
||||
@@ -110,8 +67,9 @@ pub enum Error {
|
||||
source: datafusion::parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to infer schema from file, source: {}", source))]
|
||||
#[snafu(display("Failed to infer schema from file: {}, source: {}", path, source))]
|
||||
InferSchema {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: arrow_schema::ArrowError,
|
||||
},
|
||||
@@ -124,29 +82,13 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String, location: Location },
|
||||
InvalidConnection { msg: String },
|
||||
|
||||
#[snafu(display("Failed to join handle: {}", source))]
|
||||
JoinHandle {
|
||||
location: Location,
|
||||
source: tokio::task::JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
||||
ParseFormat {
|
||||
key: &'static str,
|
||||
value: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to merge schema: {}", source))]
|
||||
MergeSchema {
|
||||
source: arrow_schema::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Buffered writer closed"))]
|
||||
BufferedWriterClosed { location: Location },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -155,30 +97,21 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
BuildBackend { .. }
|
||||
| ListObjects { .. }
|
||||
| ReadObject { .. }
|
||||
| WriteObject { .. }
|
||||
| AsyncWrite { .. } => StatusCode::StorageUnavailable,
|
||||
BuildBackend { .. } | ListObjects { .. } | ReadObject { .. } => {
|
||||
StatusCode::StorageUnavailable
|
||||
}
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| UnsupportedCompressionType { .. }
|
||||
| UnsupportedFormat { .. }
|
||||
| InvalidConnection { .. }
|
||||
| InvalidUrl { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. }
|
||||
| InferSchema { .. }
|
||||
| ReadParquetSnafu { .. }
|
||||
| ParquetToSchema { .. }
|
||||
| ParseFormat { .. }
|
||||
| MergeSchema { .. } => StatusCode::InvalidArguments,
|
||||
| ParquetToSchema { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
JoinHandle { .. }
|
||||
| ReadRecordBatch { .. }
|
||||
| WriteRecordBatch { .. }
|
||||
| EncodeRecordBatch { .. }
|
||||
| BufferedWriterClosed { .. }
|
||||
| OrcReader { .. } => StatusCode::Unexpected,
|
||||
Decompression { .. } | JoinHandle { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,29 +122,21 @@ impl ErrorExt for Error {
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
use Error::*;
|
||||
match self {
|
||||
OrcReader { location, .. } => Some(*location),
|
||||
BuildBackend { location, .. } => Some(*location),
|
||||
ReadObject { location, .. } => Some(*location),
|
||||
ListObjects { location, .. } => Some(*location),
|
||||
InferSchema { location, .. } => Some(*location),
|
||||
ReadParquetSnafu { location, .. } => Some(*location),
|
||||
ParquetToSchema { location, .. } => Some(*location),
|
||||
Decompression { location, .. } => Some(*location),
|
||||
JoinHandle { location, .. } => Some(*location),
|
||||
ParseFormat { location, .. } => Some(*location),
|
||||
MergeSchema { location, .. } => Some(*location),
|
||||
WriteObject { location, .. } => Some(*location),
|
||||
ReadRecordBatch { location, .. } => Some(*location),
|
||||
WriteRecordBatch { location, .. } => Some(*location),
|
||||
AsyncWrite { location, .. } => Some(*location),
|
||||
EncodeRecordBatch { location, .. } => Some(*location),
|
||||
BufferedWriterClosed { location, .. } => Some(*location),
|
||||
|
||||
UnsupportedBackendProtocol { location, .. } => Some(*location),
|
||||
EmptyHostPath { location, .. } => Some(*location),
|
||||
InvalidUrl { location, .. } => Some(*location),
|
||||
InvalidConnection { location, .. } => Some(*location),
|
||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
||||
UnsupportedFormat { location, .. } => Some(*location),
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. }
|
||||
| InvalidUrl { .. }
|
||||
| InvalidConnection { .. }
|
||||
| UnsupportedCompressionType { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,207 +14,17 @@
|
||||
|
||||
pub mod csv;
|
||||
pub mod json;
|
||||
pub mod orc;
|
||||
pub mod parquet;
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
pub const DEFAULT_SCHEMA_INFER_MAX_RECORD: usize = 1000;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::task::Poll;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::{ArrowError, Schema as ArrowSchema};
|
||||
use arrow::datatypes::SchemaRef;
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes};
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::physical_plan::file_format::FileOpenFuture;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use futures::StreamExt;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use self::csv::CsvFormat;
|
||||
use self::json::JsonFormat;
|
||||
use self::orc::OrcFormat;
|
||||
use self::parquet::ParquetFormat;
|
||||
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
|
||||
pub const FORMAT_DELIMITER: &str = "delimiter";
|
||||
pub const FORMAT_SCHEMA_INFER_MAX_RECORD: &str = "schema_infer_max_record";
|
||||
pub const FORMAT_HAS_HEADER: &str = "has_header";
|
||||
pub const FORMAT_TYPE: &str = "format";
|
||||
pub const FILE_PATTERN: &str = "pattern";
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Format {
|
||||
Csv(CsvFormat),
|
||||
Json(JsonFormat),
|
||||
Parquet(ParquetFormat),
|
||||
Orc(OrcFormat),
|
||||
}
|
||||
|
||||
impl Format {
|
||||
pub fn suffix(&self) -> &'static str {
|
||||
match self {
|
||||
Format::Csv(_) => ".csv",
|
||||
Format::Json(_) => ".json",
|
||||
Format::Parquet(_) => ".parquet",
|
||||
&Format::Orc(_) => ".orc",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for Format {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(options: &HashMap<String, String>) -> Result<Self> {
|
||||
let format = options
|
||||
.get(FORMAT_TYPE)
|
||||
.map(|format| format.to_ascii_uppercase())
|
||||
.unwrap_or_else(|| "PARQUET".to_string());
|
||||
|
||||
match format.as_str() {
|
||||
"CSV" => Ok(Self::Csv(CsvFormat::try_from(options)?)),
|
||||
"JSON" => Ok(Self::Json(JsonFormat::try_from(options)?)),
|
||||
"PARQUET" => Ok(Self::Parquet(ParquetFormat::default())),
|
||||
"ORC" => Ok(Self::Orc(OrcFormat)),
|
||||
_ => error::UnsupportedFormatSnafu { format: &format }.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
use crate::error::Result;
|
||||
|
||||
#[async_trait]
|
||||
pub trait FileFormat: Send + Sync + std::fmt::Debug {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<ArrowSchema>;
|
||||
}
|
||||
|
||||
pub trait ArrowDecoder: Send + 'static {
|
||||
/// Decode records from `buf` returning the number of bytes read.
|
||||
///
|
||||
/// This method returns `Ok(0)` once `batch_size` objects have been parsed since the
|
||||
/// last call to [`Self::flush`], or `buf` is exhausted.
|
||||
///
|
||||
/// Any remaining bytes should be included in the next call to [`Self::decode`].
|
||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError>;
|
||||
|
||||
/// Flushes the currently buffered data to a [`RecordBatch`].
|
||||
///
|
||||
/// This should only be called after [`Self::decode`] has returned `Ok(0)`,
|
||||
/// otherwise may return an error if part way through decoding a record
|
||||
///
|
||||
/// Returns `Ok(None)` if no buffered data.
|
||||
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError>;
|
||||
}
|
||||
|
||||
impl ArrowDecoder for arrow::csv::reader::Decoder {
|
||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
|
||||
self.decode(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl ArrowDecoder for arrow::json::RawDecoder {
|
||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
|
||||
self.decode(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> result::Result<Option<RecordBatch>, ArrowError> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
||||
object_store: Arc<ObjectStore>,
|
||||
path: String,
|
||||
compression_type: CompressionType,
|
||||
decoder_factory: F,
|
||||
) -> DataFusionResult<FileOpenFuture> {
|
||||
let mut decoder = decoder_factory()?;
|
||||
Ok(Box::pin(async move {
|
||||
let reader = object_store
|
||||
.reader(&path)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let mut upstream = compression_type.convert_stream(reader).fuse();
|
||||
|
||||
let mut buffered = Bytes::new();
|
||||
|
||||
let stream = futures::stream::poll_fn(move |cx| {
|
||||
loop {
|
||||
if buffered.is_empty() {
|
||||
if let Some(result) = futures::ready!(upstream.poll_next_unpin(cx)) {
|
||||
buffered = result?;
|
||||
};
|
||||
}
|
||||
|
||||
let decoded = decoder.decode(buffered.as_ref())?;
|
||||
|
||||
if decoded == 0 {
|
||||
break;
|
||||
} else {
|
||||
buffered.advance(decoded);
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(decoder.flush().transpose())
|
||||
});
|
||||
|
||||
Ok(stream.boxed())
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn infer_schemas(
|
||||
store: &ObjectStore,
|
||||
files: &[String],
|
||||
file_format: &dyn FileFormat,
|
||||
) -> Result<ArrowSchema> {
|
||||
let mut schemas = Vec::with_capacity(files.len());
|
||||
for file in files {
|
||||
schemas.push(file_format.infer_schema(store, file).await?)
|
||||
}
|
||||
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
|
||||
}
|
||||
|
||||
pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
encoder_factory: U,
|
||||
) -> Result<usize> {
|
||||
let buffer = SharedBuffer::with_capacity(threshold);
|
||||
let encoder = encoder_factory(buffer.clone());
|
||||
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
|
||||
store
|
||||
.writer(&path)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path })
|
||||
});
|
||||
|
||||
let mut rows = 0;
|
||||
|
||||
while let Some(batch) = stream.next().await {
|
||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||
writer.write(&batch).await?;
|
||||
rows += batch.num_rows();
|
||||
}
|
||||
|
||||
// Flushes all pending writes
|
||||
let _ = writer.try_flush(true).await?;
|
||||
writer.close_inner_writer().await?;
|
||||
|
||||
Ok(rows)
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef>;
|
||||
}
|
||||
|
||||
@@ -12,33 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::csv;
|
||||
#[allow(deprecated)]
|
||||
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use arrow_schema::SchemaRef;
|
||||
use async_trait::async_trait;
|
||||
use common_runtime;
|
||||
use datafusion::error::Result as DataFusionResult;
|
||||
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use derive_builder::Builder;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::io::SyncIoBridge;
|
||||
|
||||
use super::stream_to_file;
|
||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::{self, open_with_decoder, FileFormat};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
use crate::file_format::{self, FileFormat};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug)]
|
||||
pub struct CsvFormat {
|
||||
pub has_header: bool,
|
||||
pub delimiter: u8,
|
||||
@@ -46,128 +34,24 @@ pub struct CsvFormat {
|
||||
pub compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for CsvFormat {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: &HashMap<String, String>) -> Result<Self> {
|
||||
let mut format = CsvFormat::default();
|
||||
if let Some(delimiter) = value.get(file_format::FORMAT_DELIMITER) {
|
||||
// TODO(weny): considers to support parse like "\t" (not only b'\t')
|
||||
format.delimiter = u8::from_str(delimiter).map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_DELIMITER,
|
||||
value: delimiter,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
};
|
||||
if let Some(compression_type) = value.get(file_format::FORMAT_COMPRESSION_TYPE) {
|
||||
format.compression_type = CompressionType::from_str(compression_type)?;
|
||||
};
|
||||
if let Some(schema_infer_max_record) =
|
||||
value.get(file_format::FORMAT_SCHEMA_INFER_MAX_RECORD)
|
||||
{
|
||||
format.schema_infer_max_record =
|
||||
Some(schema_infer_max_record.parse::<usize>().map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
value: schema_infer_max_record,
|
||||
}
|
||||
.build()
|
||||
})?);
|
||||
};
|
||||
if let Some(has_header) = value.get(file_format::FORMAT_HAS_HEADER) {
|
||||
format.has_header = has_header.parse().map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_HAS_HEADER,
|
||||
value: has_header,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
}
|
||||
Ok(format)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CsvFormat {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
has_header: true,
|
||||
delimiter: b',',
|
||||
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
|
||||
compression_type: CompressionType::Uncompressed,
|
||||
compression_type: CompressionType::UNCOMPRESSED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Builder)]
|
||||
pub struct CsvConfig {
|
||||
batch_size: usize,
|
||||
file_schema: SchemaRef,
|
||||
#[builder(default = "None")]
|
||||
file_projection: Option<Vec<usize>>,
|
||||
#[builder(default = "true")]
|
||||
has_header: bool,
|
||||
#[builder(default = "b','")]
|
||||
delimiter: u8,
|
||||
}
|
||||
|
||||
impl CsvConfig {
|
||||
fn builder(&self) -> csv::ReaderBuilder {
|
||||
let mut builder = csv::ReaderBuilder::new(self.file_schema.clone())
|
||||
.with_delimiter(self.delimiter)
|
||||
.with_batch_size(self.batch_size)
|
||||
.has_header(self.has_header);
|
||||
|
||||
if let Some(proj) = &self.file_projection {
|
||||
builder = builder.with_projection(proj.clone());
|
||||
}
|
||||
|
||||
builder
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CsvOpener {
|
||||
config: Arc<CsvConfig>,
|
||||
object_store: Arc<ObjectStore>,
|
||||
compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl CsvOpener {
|
||||
/// Return a new [`CsvOpener`]. The caller must ensure [`CsvConfig`].file_schema must correspond to the opening file.
|
||||
pub fn new(
|
||||
config: CsvConfig,
|
||||
object_store: ObjectStore,
|
||||
compression_type: CompressionType,
|
||||
) -> Self {
|
||||
CsvOpener {
|
||||
config: Arc::new(config),
|
||||
object_store: Arc::new(object_store),
|
||||
compression_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FileOpener for CsvOpener {
|
||||
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
|
||||
open_with_decoder(
|
||||
self.object_store.clone(),
|
||||
meta.location().to_string(),
|
||||
self.compression_type,
|
||||
|| Ok(self.config.builder().build_decoder()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[async_trait]
|
||||
impl FileFormat for CsvFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
|
||||
let reader = store
|
||||
.reader(path)
|
||||
.reader(&path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
.context(error::ReadObjectSnafu { path: &path })?;
|
||||
|
||||
let decoded = self.compression_type.convert_async_read(reader);
|
||||
|
||||
@@ -180,40 +64,20 @@ impl FileFormat for CsvFormat {
|
||||
|
||||
let (schema, _records_read) =
|
||||
infer_csv_schema(reader, delimiter, schema_infer_max_record, has_header)
|
||||
.context(error::InferSchemaSnafu)?;
|
||||
Ok(schema)
|
||||
.context(error::InferSchemaSnafu { path: &path })?;
|
||||
|
||||
Ok(Arc::new(schema))
|
||||
})
|
||||
.await
|
||||
.context(error::JoinHandleSnafu)?
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stream_to_csv(
|
||||
stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
) -> Result<usize> {
|
||||
stream_to_file(stream, store, path, threshold, |buffer| {
|
||||
csv::Writer::new(buffer)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
self.write(batch).context(error::WriteRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::file_format::{
|
||||
FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
};
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -224,7 +88,10 @@ mod tests {
|
||||
async fn infer_schema_basic() {
|
||||
let csv = CsvFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = csv.infer_schema(&store, "simple.csv").await.unwrap();
|
||||
let schema = csv
|
||||
.infer_schema(&store, "simple.csv".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(
|
||||
@@ -255,7 +122,7 @@ mod tests {
|
||||
};
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "schema_infer_limit.csv")
|
||||
.infer_schema(&store, "schema_infer_limit.csv".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
@@ -273,7 +140,7 @@ mod tests {
|
||||
let json = CsvFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "schema_infer_limit.csv")
|
||||
.infer_schema(&store, "schema_infer_limit.csv".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
@@ -288,33 +155,4 @@ mod tests {
|
||||
formatted
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from() {
|
||||
let map = HashMap::new();
|
||||
let format: CsvFormat = CsvFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(format, CsvFormat::default());
|
||||
|
||||
let map = HashMap::from([
|
||||
(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
),
|
||||
(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
|
||||
(FORMAT_DELIMITER.to_string(), b'\t'.to_string()),
|
||||
(FORMAT_HAS_HEADER.to_string(), "false".to_string()),
|
||||
]);
|
||||
let format = CsvFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
format,
|
||||
CsvFormat {
|
||||
compression_type: CompressionType::Zstd,
|
||||
schema_infer_max_record: Some(2000),
|
||||
delimiter: b'\t',
|
||||
has_header: false,
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,80 +12,43 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io::BufReader;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::datatypes::SchemaRef;
|
||||
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
|
||||
use arrow::json::writer::LineDelimited;
|
||||
#[allow(deprecated)]
|
||||
use arrow::json::{self, RawReaderBuilder};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::Schema;
|
||||
use async_trait::async_trait;
|
||||
use common_runtime;
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::physical_plan::file_format::{FileMeta, FileOpenFuture, FileOpener};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::io::SyncIoBridge;
|
||||
|
||||
use super::stream_to_file;
|
||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::{self, open_with_decoder, FileFormat};
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
use crate::file_format::{self, FileFormat};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug)]
|
||||
pub struct JsonFormat {
|
||||
pub schema_infer_max_record: Option<usize>,
|
||||
pub compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for JsonFormat {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(value: &HashMap<String, String>) -> Result<Self> {
|
||||
let mut format = JsonFormat::default();
|
||||
if let Some(compression_type) = value.get(file_format::FORMAT_COMPRESSION_TYPE) {
|
||||
format.compression_type = CompressionType::from_str(compression_type)?
|
||||
};
|
||||
if let Some(schema_infer_max_record) =
|
||||
value.get(file_format::FORMAT_SCHEMA_INFER_MAX_RECORD)
|
||||
{
|
||||
format.schema_infer_max_record =
|
||||
Some(schema_infer_max_record.parse::<usize>().map_err(|_| {
|
||||
error::ParseFormatSnafu {
|
||||
key: file_format::FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||
value: schema_infer_max_record,
|
||||
}
|
||||
.build()
|
||||
})?);
|
||||
};
|
||||
Ok(format)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for JsonFormat {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
|
||||
compression_type: CompressionType::Uncompressed,
|
||||
compression_type: CompressionType::UNCOMPRESSED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for JsonFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
|
||||
let reader = store
|
||||
.reader(path)
|
||||
.reader(&path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
.context(error::ReadObjectSnafu { path: &path })?;
|
||||
|
||||
let decoded = self.compression_type.convert_async_read(reader);
|
||||
|
||||
@@ -96,79 +59,20 @@ impl FileFormat for JsonFormat {
|
||||
|
||||
let iter = ValueIter::new(&mut reader, schema_infer_max_record);
|
||||
|
||||
let schema = infer_json_schema_from_iterator(iter).context(error::InferSchemaSnafu)?;
|
||||
let schema = infer_json_schema_from_iterator(iter)
|
||||
.context(error::InferSchemaSnafu { path: &path })?;
|
||||
|
||||
Ok(schema)
|
||||
Ok(Arc::new(schema))
|
||||
})
|
||||
.await
|
||||
.context(error::JoinHandleSnafu)?
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JsonOpener {
|
||||
batch_size: usize,
|
||||
projected_schema: SchemaRef,
|
||||
object_store: Arc<ObjectStore>,
|
||||
compression_type: CompressionType,
|
||||
}
|
||||
|
||||
impl JsonOpener {
|
||||
/// Return a new [`JsonOpener`]. Any fields not present in `projected_schema` will be ignored.
|
||||
pub fn new(
|
||||
batch_size: usize,
|
||||
projected_schema: SchemaRef,
|
||||
object_store: ObjectStore,
|
||||
compression_type: CompressionType,
|
||||
) -> Self {
|
||||
Self {
|
||||
batch_size,
|
||||
projected_schema,
|
||||
object_store: Arc::new(object_store),
|
||||
compression_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl FileOpener for JsonOpener {
|
||||
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
|
||||
open_with_decoder(
|
||||
self.object_store.clone(),
|
||||
meta.location().to_string(),
|
||||
self.compression_type,
|
||||
|| {
|
||||
RawReaderBuilder::new(self.projected_schema.clone())
|
||||
.with_batch_size(self.batch_size)
|
||||
.build_decoder()
|
||||
.map_err(DataFusionError::from)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stream_to_json(
|
||||
stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
) -> Result<usize> {
|
||||
stream_to_file(stream, store, path, threshold, |buffer| {
|
||||
json::LineDelimitedWriter::new(buffer)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
self.write(batch).context(error::WriteRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD};
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -179,7 +83,10 @@ mod tests {
|
||||
async fn infer_schema_basic() {
|
||||
let json = JsonFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json.infer_schema(&store, "simple.json").await.unwrap();
|
||||
let schema = json
|
||||
.infer_schema(&store, "simple.json".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(
|
||||
@@ -201,7 +108,7 @@ mod tests {
|
||||
};
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json
|
||||
.infer_schema(&store, "schema_infer_limit.json")
|
||||
.infer_schema(&store, "schema_infer_limit.json".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
@@ -211,29 +118,4 @@ mod tests {
|
||||
formatted
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from() {
|
||||
let map = HashMap::new();
|
||||
let format = JsonFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(format, JsonFormat::default());
|
||||
|
||||
let map = HashMap::from([
|
||||
(
|
||||
FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
|
||||
"2000".to_string(),
|
||||
),
|
||||
(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
|
||||
]);
|
||||
let format = JsonFormat::try_from(&map).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
format,
|
||||
JsonFormat {
|
||||
compression_type: CompressionType::Zstd,
|
||||
schema_infer_max_record: Some(2000),
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch;
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::physical_plan::RecordBatchStream;
|
||||
use futures::Stream;
|
||||
use object_store::ObjectStore;
|
||||
use orc_rust::arrow_reader::{create_arrow_schema, Cursor};
|
||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||
pub use orc_rust::error::Error as OrcError;
|
||||
use orc_rust::reader::Reader;
|
||||
use snafu::ResultExt;
|
||||
use tokio::io::{AsyncRead, AsyncSeek};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::FileFormat;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct OrcFormat;
|
||||
|
||||
pub async fn new_orc_cursor<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<Cursor<R>> {
|
||||
let reader = Reader::new_async(reader)
|
||||
.await
|
||||
.context(error::OrcReaderSnafu)?;
|
||||
let cursor = Cursor::root(reader).context(error::OrcReaderSnafu)?;
|
||||
Ok(cursor)
|
||||
}
|
||||
|
||||
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<ArrowStreamReader<R>> {
|
||||
let cursor = new_orc_cursor(reader).await?;
|
||||
Ok(ArrowStreamReader::new(cursor, None))
|
||||
}
|
||||
|
||||
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||
reader: R,
|
||||
) -> Result<Schema> {
|
||||
let cursor = new_orc_cursor(reader).await?;
|
||||
Ok(create_arrow_schema(&cursor))
|
||||
}
|
||||
|
||||
pub struct OrcArrowStreamReaderAdapter<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> {
|
||||
stream: ArrowStreamReader<T>,
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> OrcArrowStreamReaderAdapter<T> {
|
||||
pub fn new(stream: ArrowStreamReader<T>) -> Self {
|
||||
Self { stream }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> RecordBatchStream
|
||||
for OrcArrowStreamReaderAdapter<T>
|
||||
{
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.stream.schema()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> Stream for OrcArrowStreamReaderAdapter<T> {
|
||||
type Item = DfResult<DfRecordBatch>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let batch = futures::ready!(Pin::new(&mut self.stream).poll_next(cx))
|
||||
.map(|r| r.map_err(|e| DataFusionError::External(Box::new(e))));
|
||||
Poll::Ready(batch)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for OrcFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
let reader = store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let schema = infer_orc_schema(reader).await?;
|
||||
|
||||
Ok(schema)
|
||||
}
|
||||
}
|
||||
@@ -12,39 +12,28 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::Schema;
|
||||
use arrow_schema::SchemaRef;
|
||||
use async_trait::async_trait;
|
||||
use datafusion::error::Result as DatafusionResult;
|
||||
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
|
||||
use datafusion::parquet::arrow::{parquet_to_arrow_schema, ArrowWriter};
|
||||
use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
||||
use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use datafusion::physical_plan::file_format::{FileMeta, ParquetFileReaderFactory};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use futures::future::BoxFuture;
|
||||
use object_store::{ObjectStore, Reader};
|
||||
use datafusion::parquet::arrow::parquet_to_arrow_schema;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ParquetFormat {}
|
||||
|
||||
#[async_trait]
|
||||
impl FileFormat for ParquetFormat {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
|
||||
let mut reader = store
|
||||
.reader(path)
|
||||
.reader(&path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
.context(error::ReadObjectSnafu { path: &path })?;
|
||||
|
||||
let metadata = reader
|
||||
.get_metadata()
|
||||
@@ -58,107 +47,14 @@ impl FileFormat for ParquetFormat {
|
||||
)
|
||||
.context(error::ParquetToSchemaSnafu)?;
|
||||
|
||||
Ok(schema)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DefaultParquetFileReaderFactory {
|
||||
object_store: ObjectStore,
|
||||
}
|
||||
|
||||
/// Returns a AsyncFileReader factory
|
||||
impl DefaultParquetFileReaderFactory {
|
||||
pub fn new(object_store: ObjectStore) -> Self {
|
||||
Self { object_store }
|
||||
}
|
||||
}
|
||||
|
||||
impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
|
||||
// TODO(weny): Supports [`metadata_size_hint`].
|
||||
// The upstream has a implementation supports [`metadata_size_hint`],
|
||||
// however it coupled with Box<dyn ObjectStore>.
|
||||
fn create_reader(
|
||||
&self,
|
||||
_partition_index: usize,
|
||||
file_meta: FileMeta,
|
||||
_metadata_size_hint: Option<usize>,
|
||||
_metrics: &ExecutionPlanMetricsSet,
|
||||
) -> DatafusionResult<Box<dyn AsyncFileReader + Send>> {
|
||||
let path = file_meta.location().to_string();
|
||||
let object_store = self.object_store.clone();
|
||||
|
||||
Ok(Box::new(LazyParquetFileReader::new(object_store, path)))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LazyParquetFileReader {
|
||||
object_store: ObjectStore,
|
||||
reader: Option<Reader>,
|
||||
path: String,
|
||||
}
|
||||
|
||||
impl LazyParquetFileReader {
|
||||
pub fn new(object_store: ObjectStore, path: String) -> Self {
|
||||
LazyParquetFileReader {
|
||||
object_store,
|
||||
path,
|
||||
reader: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Must initialize the reader, or throw an error from the future.
|
||||
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
|
||||
if self.reader.is_none() {
|
||||
let reader = self.object_store.reader(&self.path).await?;
|
||||
self.reader = Some(reader);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncFileReader for LazyParquetFileReader {
|
||||
fn get_bytes(
|
||||
&mut self,
|
||||
range: std::ops::Range<usize>,
|
||||
) -> BoxFuture<'_, ParquetResult<bytes::Bytes>> {
|
||||
Box::pin(async move {
|
||||
self.maybe_initialize()
|
||||
.await
|
||||
.map_err(|e| ParquetError::External(Box::new(e)))?;
|
||||
// Safety: Must initialized
|
||||
self.reader.as_mut().unwrap().get_bytes(range).await
|
||||
})
|
||||
}
|
||||
|
||||
fn get_metadata(&mut self) -> BoxFuture<'_, ParquetResult<Arc<ParquetMetaData>>> {
|
||||
Box::pin(async move {
|
||||
self.maybe_initialize()
|
||||
.await
|
||||
.map_err(|e| ParquetError::External(Box::new(e)))?;
|
||||
// Safety: Must initialized
|
||||
self.reader.as_mut().unwrap().get_metadata().await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DfRecordBatchEncoder for ArrowWriter<SharedBuffer> {
|
||||
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
||||
self.write(batch).context(error::EncodeRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
async fn close(self) -> Result<FileMetaData> {
|
||||
self.close().context(error::EncodeRecordBatchSnafu)
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::test_util::{self, format_schema, test_store};
|
||||
|
||||
fn test_data_root() -> String {
|
||||
@@ -171,7 +67,10 @@ mod tests {
|
||||
async fn infer_schema_basic() {
|
||||
let json = ParquetFormat::default();
|
||||
let store = test_store(&test_data_root());
|
||||
let schema = json.infer_schema(&store, "basic.parquet").await.unwrap();
|
||||
let schema = json
|
||||
.infer_schema(&store, "basic.parquet".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let formatted: Vec<_> = format_schema(schema);
|
||||
|
||||
assert_eq!(vec!["num: Int64: NULL", "str: Utf8: NULL"], formatted);
|
||||
|
||||
@@ -1,228 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::vec;
|
||||
|
||||
use datafusion::assert_batches_eq;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream, ParquetExec};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::prelude::SessionContext;
|
||||
use futures::StreamExt;
|
||||
|
||||
use super::FORMAT_TYPE;
|
||||
use crate::compression::CompressionType;
|
||||
use crate::error;
|
||||
use crate::file_format::csv::{CsvConfigBuilder, CsvOpener};
|
||||
use crate::file_format::json::JsonOpener;
|
||||
use crate::file_format::parquet::DefaultParquetFileReaderFactory;
|
||||
use crate::file_format::Format;
|
||||
use crate::test_util::{self, scan_config, test_basic_schema, test_store};
|
||||
|
||||
struct Test<'a, T: FileOpener> {
|
||||
config: FileScanConfig,
|
||||
opener: T,
|
||||
expected: Vec<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a, T: FileOpener> Test<'a, T> {
|
||||
pub async fn run(self) {
|
||||
let result = FileStream::new(
|
||||
&self.config,
|
||||
0,
|
||||
self.opener,
|
||||
&ExecutionPlanMetricsSet::new(),
|
||||
)
|
||||
.unwrap()
|
||||
.map(|b| b.unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
assert_batches_eq!(self.expected, &result);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_json_opener() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let json_opener = JsonOpener::new(
|
||||
100,
|
||||
schema.clone(),
|
||||
store.clone(),
|
||||
CompressionType::Uncompressed,
|
||||
);
|
||||
|
||||
let path = &test_util::get_data_dir("tests/json/basic.json")
|
||||
.display()
|
||||
.to_string();
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: json_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
"| 5 | test |",
|
||||
"| 2 | hello |",
|
||||
"| 4 | foo |",
|
||||
"+-----+-------+",
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: json_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+------+",
|
||||
"| num | str |",
|
||||
"+-----+------+",
|
||||
"| 5 | test |",
|
||||
"+-----+------+",
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_csv_opener() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
let path = &test_util::get_data_dir("tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string();
|
||||
let csv_conf = CsvConfigBuilder::default()
|
||||
.batch_size(test_util::TEST_BATCH_SIZE)
|
||||
.file_schema(schema.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let csv_opener = CsvOpener::new(csv_conf, store, CompressionType::Uncompressed);
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: csv_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
"| 5 | test |",
|
||||
"| 2 | hello |",
|
||||
"| 4 | foo |",
|
||||
"+-----+-------+",
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: csv_opener.clone(),
|
||||
expected: vec![
|
||||
"+-----+------+",
|
||||
"| num | str |",
|
||||
"+-----+------+",
|
||||
"| 5 | test |",
|
||||
"+-----+------+",
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_parquet_exec() {
|
||||
let store = test_store("/");
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let path = &test_util::get_data_dir("tests/parquet/basic.parquet")
|
||||
.display()
|
||||
.to_string();
|
||||
let base_config = scan_config(schema.clone(), None, path);
|
||||
|
||||
let exec = ParquetExec::new(base_config, None, None)
|
||||
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
let context = Arc::new(TaskContext::from(&ctx));
|
||||
|
||||
// The stream batch size can be set by ctx.session_config.batch_size
|
||||
let result = exec
|
||||
.execute(0, context)
|
||||
.unwrap()
|
||||
.map(|b| b.unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
assert_batches_eq!(
|
||||
vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
"+-----+-------+",
|
||||
"| 5 | test |",
|
||||
"| 2 | hello |",
|
||||
"| 4 | foo |",
|
||||
"+-----+-------+",
|
||||
],
|
||||
&result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format() {
|
||||
let value = [(FORMAT_TYPE.to_string(), "csv".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Csv(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "Parquet".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Parquet(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "JSON".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Json(_));
|
||||
|
||||
let value = [(FORMAT_TYPE.to_string(), "Foobar".to_string())]
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
assert_matches!(
|
||||
Format::try_from(&value).unwrap_err(),
|
||||
error::Error::UnsupportedFormat { .. }
|
||||
);
|
||||
|
||||
let value = HashMap::new();
|
||||
|
||||
assert_matches!(Format::try_from(&value).unwrap(), Format::Parquet(_));
|
||||
}
|
||||
@@ -12,17 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod buffered_writer;
|
||||
pub mod compression;
|
||||
pub mod error;
|
||||
pub mod file_format;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod share_buffer;
|
||||
#[cfg(test)]
|
||||
pub mod test_util;
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
pub mod util;
|
||||
|
||||
@@ -27,7 +27,7 @@ use crate::error::{self, Result};
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// Returns (schema, Option<host>, path)
|
||||
/// parse url returns (schema,Option<host>,path)
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
@@ -43,7 +43,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: &HashMap<String, String>) -> Result<ObjectStore> {
|
||||
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user