mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 07:30:02 +00:00
Compare commits
52 Commits
show
...
v0.1.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eff07d5986 | ||
|
|
40c55e4da7 | ||
|
|
8d113550cf | ||
|
|
15a0ed0853 | ||
|
|
44493e9d8c | ||
|
|
efd15839d4 | ||
|
|
1f62b36537 | ||
|
|
7b8e65ce93 | ||
|
|
6475339ad0 | ||
|
|
0bd802c70d | ||
|
|
28d07c7a2e | ||
|
|
dc33b0c0ce | ||
|
|
4b4f8f27e8 | ||
|
|
c994e0de88 | ||
|
|
d1ba9ca126 | ||
|
|
0877dabce2 | ||
|
|
8b9671f376 | ||
|
|
dcf66d9d52 | ||
|
|
65b61e78ad | ||
|
|
3638704f95 | ||
|
|
8a2f4256bf | ||
|
|
83aeadc506 | ||
|
|
f556052951 | ||
|
|
8658d428e0 | ||
|
|
e8e11072f8 | ||
|
|
6f0f72c377 | ||
|
|
32030a8194 | ||
|
|
0f7cde2411 | ||
|
|
1ece402ec8 | ||
|
|
7ee54b3e69 | ||
|
|
9b4dcba8cf | ||
|
|
c3bcb1111f | ||
|
|
a4ebd03a61 | ||
|
|
e7daf1226f | ||
|
|
05c0ea9a59 | ||
|
|
604c20a83d | ||
|
|
c7f114c8fa | ||
|
|
8a83de4ea5 | ||
|
|
3377930a50 | ||
|
|
85dd7e4f24 | ||
|
|
f790fa05c1 | ||
|
|
dfd91a1bf8 | ||
|
|
ded31fb069 | ||
|
|
6a574fc52b | ||
|
|
58bdf27068 | ||
|
|
610a895b66 | ||
|
|
a9ccc06449 | ||
|
|
38fe1a2f01 | ||
|
|
3414ac46b0 | ||
|
|
757b4a87a0 | ||
|
|
ba1517fceb | ||
|
|
5b5d953d56 |
78
.github/workflows/apidoc.yml
vendored
78
.github/workflows/apidoc.yml
vendored
@@ -1,42 +1,42 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
# on:
|
||||
# push:
|
||||
# branches:
|
||||
# - develop
|
||||
# paths-ignore:
|
||||
# - 'docs/**'
|
||||
# - 'config/**'
|
||||
# - '**.md'
|
||||
# - '.dockerignore'
|
||||
# - 'docker/**'
|
||||
# - '.gitignore'
|
||||
|
||||
name: Build API docs
|
||||
# name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
# env:
|
||||
# RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; url='greptime/'" />
|
||||
</head>
|
||||
<body></body></html>
|
||||
EOF
|
||||
- name: Publish dist directory
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
# jobs:
|
||||
# apidoc:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# - run: cargo doc --workspace --no-deps --document-private-items
|
||||
# - run: |
|
||||
# cat <<EOF > target/doc/index.html
|
||||
# <!DOCTYPE html>
|
||||
# <html>
|
||||
# <head>
|
||||
# <meta http-equiv="refresh" content="0; url='greptime/'" />
|
||||
# </head>
|
||||
# <body></body></html>
|
||||
# EOF
|
||||
# - name: Publish dist directory
|
||||
# uses: JamesIves/github-pages-deploy-action@v4
|
||||
# with:
|
||||
# folder: target/doc
|
||||
|
||||
7
.github/workflows/develop.yml
vendored
7
.github/workflows/develop.yml
vendored
@@ -213,10 +213,11 @@ jobs:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
# - name: Install cargo-llvm-cov
|
||||
# uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
run: cargo nextest run -F pyo3_backend
|
||||
# run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
252
.github/workflows/release.yml
vendored
252
.github/workflows/release.yml
vendored
@@ -2,9 +2,9 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# schedule:
|
||||
# # At 00:00 on Monday.
|
||||
# - cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Release
|
||||
@@ -12,13 +12,11 @@ name: Release
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
|
||||
|
||||
# In the future, we can change SCHEDULED_PERIOD to nightly.
|
||||
SCHEDULED_PERIOD: weekly
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
CARGO_PROFILE: weekly
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -30,17 +28,24 @@ jobs:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
# - arch: aarch64-unknown-linux-gnu
|
||||
# os: ubuntu-2004-16-cores
|
||||
# file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
# opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: true
|
||||
# opts: "-F pyo3_backend"
|
||||
# - arch: aarch64-apple-darwin
|
||||
# os: macos-latest
|
||||
# file: greptime-darwin-arm64
|
||||
# continue-on-error: true
|
||||
# - arch: x86_64-apple-darwin
|
||||
# os: macos-latest
|
||||
# file: greptime-darwin-amd64
|
||||
# continue-on-error: true
|
||||
runs-on: ${{ matrix.os }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb-edge'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -93,7 +98,13 @@ jobs:
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
- name: Compile Python 3.10.10 from source for Aarch64
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -107,8 +118,18 @@ jobs:
|
||||
- name: Run tests
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=python3
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
@@ -133,7 +154,7 @@ jobs:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb-edge'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -141,12 +162,12 @@ jobs:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
@@ -172,83 +193,142 @@ jobs:
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
# docker:
|
||||
# name: Build docker image
|
||||
# needs: [build]
|
||||
# runs-on: ubuntu-latest
|
||||
# if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
# steps:
|
||||
# - name: Checkout sources
|
||||
# uses: actions/checkout@v3
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
# - name: Login to UCloud Container Registry
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# registry: uhub.service.ucloud.cn
|
||||
# username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
# password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
# - name: Login to Dockerhub
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# - name: Download arm64 binary
|
||||
# uses: actions/download-artifact@v3
|
||||
# with:
|
||||
# name: greptime-linux-arm64
|
||||
# path: arm64
|
||||
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
# shell: bash
|
||||
# if: github.event_name == 'schedule'
|
||||
# run: |
|
||||
# buildTime=`date "+%Y%m%d"`
|
||||
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
# - name: Unzip the arm64 artifacts
|
||||
# run: |
|
||||
# cd arm64
|
||||
# tar xvf greptime-linux-arm64.tgz
|
||||
# rm greptime-linux-arm64.tgz
|
||||
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
# shell: bash
|
||||
# if: github.event_name != 'schedule'
|
||||
# run: |
|
||||
# VERSION=${{ github.ref_name }}
|
||||
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uhub.service.ucloud.cn
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
# - name: Set up QEMU
|
||||
# uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# - name: Set up buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
# - name: Download amd64 binary
|
||||
# uses: actions/download-artifact@v3
|
||||
# with:
|
||||
# name: greptime-linux-amd64
|
||||
# path: amd64
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
# - name: Unzip the amd64 artifacts
|
||||
# run: |
|
||||
# cd amd64
|
||||
# tar xvf greptime-linux-amd64.tgz
|
||||
# rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
# - name: Download arm64 binary
|
||||
# id: download-arm64
|
||||
# uses: actions/download-artifact@v3
|
||||
# with:
|
||||
# name: greptime-linux-arm64
|
||||
# path: arm64
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# - name: Unzip the arm64 artifacts
|
||||
# id: unzip-arm64
|
||||
# if: success() || steps.download-arm64.conclusion == 'success'
|
||||
# run: |
|
||||
# cd arm64
|
||||
# tar xvf greptime-linux-arm64.tgz
|
||||
# rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
# platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
# - name: Build and push all
|
||||
# uses: docker/build-push-action@v3
|
||||
# if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
# with:
|
||||
# context: .
|
||||
# file: ./docker/ci/Dockerfile
|
||||
# push: true
|
||||
# platforms: linux/amd64,linux/arm64
|
||||
# tags: |
|
||||
# greptime/greptimedb:latest
|
||||
# greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
# - name: Build and push amd64 only
|
||||
# uses: docker/build-push-action@v3
|
||||
# if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
# with:
|
||||
# context: .
|
||||
# file: ./docker/ci/Dockerfile
|
||||
# push: true
|
||||
# platforms: linux/amd64
|
||||
# tags: |
|
||||
# greptime/greptimedb:latest
|
||||
# greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
# docker-push-uhub:
|
||||
# name: Push docker image to UCloud Container Registry
|
||||
# needs: [docker]
|
||||
# runs-on: ubuntu-latest
|
||||
# if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
# # Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||
# continue-on-error: true
|
||||
# steps:
|
||||
# - name: Checkout sources
|
||||
# uses: actions/checkout@v3
|
||||
|
||||
# - name: Set up QEMU
|
||||
# uses: docker/setup-qemu-action@v2
|
||||
|
||||
# - name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
|
||||
# - name: Login to UCloud Container Registry
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# registry: uhub.service.ucloud.cn
|
||||
# username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
# password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
# shell: bash
|
||||
# if: github.event_name == 'schedule'
|
||||
# run: |
|
||||
# buildTime=`date "+%Y%m%d"`
|
||||
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
# shell: bash
|
||||
# if: github.event_name != 'schedule'
|
||||
# run: |
|
||||
# VERSION=${{ github.ref_name }}
|
||||
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
# - name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
# run: |
|
||||
# docker buildx imagetools create \
|
||||
# --tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
|
||||
# --tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
# greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
905
Cargo.lock
generated
905
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -50,28 +50,29 @@ edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "33.0", features = ["pyarrow"] }
|
||||
arrow-array = "33.0"
|
||||
arrow-flight = "33.0"
|
||||
arrow-schema = { version = "33.0", features = ["serde"] }
|
||||
arrow = { version = "34.0" }
|
||||
arrow-array = "34.0"
|
||||
arrow-flight = "34.0"
|
||||
arrow-schema = { version = "34.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
# TODO(LFC): Use official DataFusion, when https://github.com/apache/arrow-datafusion/pull/5542 got merged
|
||||
datafusion = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-common = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-optimizer = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-physical-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-sql = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "33.0"
|
||||
parquet = "34.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.30"
|
||||
sqlparser = "0.32"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
@@ -81,7 +82,7 @@ uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
[profile.weekly]
|
||||
[profile.nightly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
|
||||
@@ -10,10 +10,6 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# MySQL server address, "127.0.0.1:4406" by default.
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
# The number of MySQL server worker threads, 2 by default.
|
||||
mysql_runtime_size = 2
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
|
||||
11
config/edge.example.toml
Normal file
11
config/edge.example.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory.
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_dir = "/tmp/greptimedb/data"
|
||||
57
docker/aarch64/Dockerfile
Normal file
57
docker/aarch64/Dockerfile
Normal file
@@ -0,0 +1,57 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install cross platform toolchain
|
||||
RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
|
||||
COPY ./rust-toolchain.toml .
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
COPY . .
|
||||
# Update dependency, using separate `RUN` to separate cache
|
||||
RUN cargo fetch
|
||||
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||
# Build the project in release mode.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
46
docker/aarch64/compile-python.sh
Normal file
46
docker/aarch64/compile-python.sh
Normal file
@@ -0,0 +1,46 @@
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
|
||||
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
|
||||
tar -xvf Python-3.10.10.tgz
|
||||
cd Python-3.10.10
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
./configure \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
|
||||
cd Python-3.10.10 && \
|
||||
make clean && \
|
||||
make distclean && \
|
||||
alias python=python3 && \
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix=$PY_INSTALL_PATH --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make && make altinstall && \
|
||||
cd ..
|
||||
@@ -59,5 +59,5 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
fi
|
||||
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ad0187295035e83f76272da553453e649b7570de" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0a7b790ed41364b5599dff806d1080bd59c5c9f6" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -221,4 +221,8 @@ impl TableEngine for MockTableEngine {
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
enum_dispatch = "0.3"
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
@@ -23,6 +24,10 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
@@ -118,7 +123,7 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
@@ -131,11 +136,23 @@ impl Client {
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,25 +12,27 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
||||
InsertRequest, QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_telemetry::logging;
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -77,8 +79,26 @@ impl Database {
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
@@ -95,6 +115,24 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn prom_range_query(
|
||||
&self,
|
||||
promql: &str,
|
||||
start: &str,
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
@@ -116,6 +154,13 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
@@ -129,7 +174,7 @@ impl Database {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
@@ -138,17 +183,26 @@ impl Database {
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
})
|
||||
.unwrap_err()
|
||||
})?;
|
||||
|
||||
@@ -175,12 +229,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -26,12 +27,7 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
addr,
|
||||
tonic_code,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
|
||||
FlightGet {
|
||||
addr: String,
|
||||
tonic_code: Code,
|
||||
@@ -73,6 +69,13 @@ pub enum Error {
|
||||
/// Error deserialized from gRPC metadata
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -82,7 +85,10 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
@@ -100,3 +106,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,39 @@ struct Command {
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
Datanode(datanode::Instance),
|
||||
Frontend(frontend::Instance),
|
||||
Metasrv(metasrv::Instance),
|
||||
Standalone(standalone::Instance),
|
||||
Cli(cli::Instance),
|
||||
}
|
||||
|
||||
impl Application {
|
||||
async fn run(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.run().await,
|
||||
Application::Frontend(instance) => instance.run().await,
|
||||
Application::Metasrv(instance) => instance.run().await,
|
||||
Application::Standalone(instance) => instance.run().await,
|
||||
Application::Cli(instance) => instance.run().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.stop().await,
|
||||
Application::Frontend(instance) => instance.stop().await,
|
||||
Application::Metasrv(instance) => instance.stop().await,
|
||||
Application::Standalone(instance) => instance.stop().await,
|
||||
Application::Cli(instance) => instance.stop().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
async fn build(self) -> Result<Application> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,13 +81,28 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Application> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.run().await,
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Cli(cmd) => cmd.run().await,
|
||||
SubCommand::Datanode(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
SubCommand::Metasrv(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
SubCommand::Standalone(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
SubCommand::Cli(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,13 +149,18 @@ async fn main() -> Result<()> {
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||
|
||||
let mut app = cmd.build().await?;
|
||||
|
||||
tokio::select! {
|
||||
result = cmd.run() => {
|
||||
result = app.run() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,10 +17,25 @@ mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use repl::Repl;
|
||||
pub use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.repl.run().await
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle cli shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +43,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.cmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.cmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +54,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.run().await,
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,8 +72,8 @@ pub(crate) struct AttachCommand {
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let mut repl = Repl::try_new(&self).await?;
|
||||
repl.run().await
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance { repl })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::QueryEngineState;
|
||||
use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
@@ -49,7 +50,7 @@ use crate::error::{
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub(crate) struct Repl {
|
||||
pub struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
@@ -166,12 +167,16 @@ impl Repl {
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
let LogicalPlan::DfPlan(plan) = query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
|
||||
let plan = query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.and_then(|x| query_engine.optimize(&x))
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let LogicalPlan::DfPlan(plan) =
|
||||
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
@@ -262,6 +267,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
));
|
||||
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(catalog_list, Default::default()))
|
||||
Ok(DatafusionQueryEngine::new(state))
|
||||
}
|
||||
|
||||
@@ -24,6 +24,21 @@ use snafu::ResultExt;
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle datanode shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -31,8 +46,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,9 +57,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -72,19 +87,16 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
|
||||
let opts: DatanodeOptions = self.try_into()?;
|
||||
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
Datanode::new(opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?
|
||||
.start()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,24 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop datanode, source: {}", source))]
|
||||
StopDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
StartMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -138,6 +150,7 @@ impl ErrorExt for Error {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
@@ -156,6 +169,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
|
||||
Error::StopDatanode { source } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,10 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::Instance;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
@@ -34,6 +34,24 @@ use snafu::ResultExt;
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle frontend shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -41,8 +59,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,9 +70,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -90,16 +108,20 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let instance = Instance::try_new_distributed(&opts, plugins.clone())
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
instance
|
||||
.build_servers(&opts, plugins)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,13 +14,32 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap;
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle metasrv shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +47,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +58,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,16 +82,17 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
let opts: MetaSrvOptions = self.try_into()?;
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
bootstrap::bootstrap_meta_srv(opts)
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,15 +16,16 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::Instance as FeInstance;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
@@ -36,7 +37,9 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu, StopDatanodeSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -47,8 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,9 +61,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,6 +136,40 @@ impl StandaloneOptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode
|
||||
.start_instance()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(StopDatanodeSnafu)?;
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(StopDatanodeSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
@@ -164,7 +201,7 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
@@ -184,33 +221,30 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let mut datanode = Datanode::new(dn_opts.clone())
|
||||
let datanode = Datanode::new(dn_opts.clone())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
|
||||
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
datanode
|
||||
.start_instance()
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts, plugins)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
Ok(Instance { datanode, frontend })
|
||||
}
|
||||
}
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
fe_opts: FrontendOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
datanode_instance: InstanceRef,
|
||||
) -> Result<Frontend<FeInstance>> {
|
||||
) -> Result<FeInstance> {
|
||||
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
||||
frontend_instance.set_script_handler(datanode_instance);
|
||||
frontend_instance.set_plugins(plugins.clone());
|
||||
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
|
||||
Ok(frontend_instance)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
|
||||
@@ -46,6 +46,9 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(LFC): Un-ignore this REPL test.
|
||||
// Ignore this REPL test because some logical plans like create database are not supported yet in Datanode.
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_dir = create_temp_dir("data");
|
||||
|
||||
@@ -15,6 +15,3 @@ tokio.workspace = true
|
||||
[dependencies.tikv-jemalloc-sys]
|
||||
version = "0.5"
|
||||
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
@@ -77,6 +77,7 @@ impl Default for ObjectStoreConfig {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal directory
|
||||
pub dir: String,
|
||||
@@ -108,6 +109,7 @@ impl Default for WalConfig {
|
||||
|
||||
/// Options for table compaction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct CompactionConfig {
|
||||
/// Max task number that can concurrently run.
|
||||
pub max_inflight_tasks: usize,
|
||||
@@ -240,6 +242,20 @@ impl Datanode {
|
||||
pub fn get_instance(&self) -> InstanceRef {
|
||||
self.instance.clone()
|
||||
}
|
||||
|
||||
async fn shutdown_instance(&self) -> Result<()> {
|
||||
self.instance.shutdown().await
|
||||
}
|
||||
|
||||
async fn shutdown_services(&self) -> Result<()> {
|
||||
self.services.shutdown().await
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
// We must shutdown services first
|
||||
self.shutdown_services().await?;
|
||||
self.shutdown_instance().await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -35,6 +35,24 @@ pub enum Error {
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute statement, source: {}", source))]
|
||||
ExecuteStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute logical plan, source: {}", source))]
|
||||
ExecuteLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan, source: {}", source))]
|
||||
DecodeLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
@@ -151,6 +169,13 @@ pub enum Error {
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
|
||||
FlushTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start server, source: {}", source))]
|
||||
StartServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -482,6 +507,24 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to close table engine, source: {}", source))]
|
||||
CloseTableEngine {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown server, source: {}", source))]
|
||||
ShutdownServer {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown instance, source: {}", source))]
|
||||
ShutdownInstance {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -490,7 +533,12 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
ExecuteSql { source } | DescribeStatement { source } => source.status_code(),
|
||||
ExecuteSql { source }
|
||||
| PlanStatement { source }
|
||||
| ExecuteStatement { source }
|
||||
| ExecuteLogicalPlan { source }
|
||||
| DescribeStatement { source } => source.status_code(),
|
||||
|
||||
DecodeLogicalPlan { source } => source.status_code(),
|
||||
NewCatalog { source } | RegisterSchema { source } => source.status_code(),
|
||||
FindTable { source, .. } => source.status_code(),
|
||||
@@ -498,6 +546,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
DropTable { source, .. } => source.status_code(),
|
||||
FlushTable { source, .. } => source.status_code(),
|
||||
|
||||
Insert { source, .. } => source.status_code(),
|
||||
Delete { source, .. } => source.status_code(),
|
||||
@@ -550,7 +599,10 @@ impl ErrorExt for Error {
|
||||
| BuildParquetRecordBatchStream { .. }
|
||||
| InvalidSchema { .. }
|
||||
| ParseDataTypes { .. }
|
||||
| IncorrectInternalState { .. } => StatusCode::Internal,
|
||||
| IncorrectInternalState { .. }
|
||||
| ShutdownServer { .. }
|
||||
| ShutdownInstance { .. }
|
||||
| CloseTableEngine { .. } => StatusCode::Internal,
|
||||
|
||||
BuildBackend { .. }
|
||||
| InitBackend { .. }
|
||||
|
||||
@@ -144,6 +144,18 @@ impl HeartbeatTask {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
let running = self.running.clone();
|
||||
if running
|
||||
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
|
||||
.is_err()
|
||||
{
|
||||
warn!("Call close heartbeat task multiple times");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves hostname:port address for meta registration
|
||||
|
||||
@@ -20,6 +20,7 @@ use catalog::remote::MetaKvBackend;
|
||||
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
@@ -36,12 +37,14 @@ use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder
|
||||
use object_store::{util, ObjectStore, ObjectStoreBuilder};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use storage::compaction::{CompactionHandler, CompactionSchedulerRef, SimplePicker};
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::{LocalScheduler, SchedulerConfig};
|
||||
use storage::EngineImpl;
|
||||
use store_api::logstore::LogStore;
|
||||
use table::requests::FlushTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProviderRef;
|
||||
use table::Table;
|
||||
@@ -51,11 +54,11 @@ use crate::datanode::{
|
||||
};
|
||||
use crate::error::{
|
||||
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result, ShutdownInstanceSnafu,
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
mod grpc;
|
||||
mod script;
|
||||
@@ -77,9 +80,6 @@ pub type InstanceRef = Arc<Instance>;
|
||||
|
||||
impl Instance {
|
||||
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage).await?;
|
||||
let logstore = Arc::new(create_log_store(&opts.wal).await?);
|
||||
|
||||
let meta_client = match opts.mode {
|
||||
Mode::Standalone => None,
|
||||
Mode::Distributed => {
|
||||
@@ -96,11 +96,22 @@ impl Instance {
|
||||
|
||||
let compaction_scheduler = create_compaction_scheduler(opts);
|
||||
|
||||
Self::new_with(opts, meta_client, compaction_scheduler).await
|
||||
}
|
||||
|
||||
pub(crate) async fn new_with(
|
||||
opts: &DatanodeOptions,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
compaction_scheduler: CompactionSchedulerRef<RaftEngineLogStore>,
|
||||
) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage).await?;
|
||||
let log_store = Arc::new(create_log_store(&opts.wal).await?);
|
||||
|
||||
let table_engine = Arc::new(DefaultEngine::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::from(opts),
|
||||
logstore.clone(),
|
||||
log_store.clone(),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
@@ -108,7 +119,7 @@ impl Instance {
|
||||
));
|
||||
|
||||
// create remote catalog manager
|
||||
let (catalog_manager, factory, table_id_provider) = match opts.mode {
|
||||
let (catalog_manager, table_id_provider) = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
if opts.enable_memory_catalog {
|
||||
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
|
||||
@@ -125,11 +136,8 @@ impl Instance {
|
||||
.await
|
||||
.expect("Failed to register numbers");
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
} else {
|
||||
@@ -138,11 +146,9 @@ impl Instance {
|
||||
.await
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
}
|
||||
@@ -156,11 +162,11 @@ impl Instance {
|
||||
client: meta_client.as_ref().unwrap().clone(),
|
||||
}),
|
||||
));
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(catalog as CatalogManagerRef, factory, None)
|
||||
(catalog as CatalogManagerRef, None)
|
||||
}
|
||||
};
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_manager.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let script_executor =
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||
@@ -220,6 +226,60 @@ impl Instance {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
if let Some(heartbeat_task) = &self.heartbeat_task {
|
||||
heartbeat_task
|
||||
.close()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
}
|
||||
|
||||
self.flush_tables().await?;
|
||||
|
||||
self.sql_handler
|
||||
.close()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)
|
||||
}
|
||||
|
||||
pub async fn flush_tables(&self) -> Result<()> {
|
||||
info!("going to flush all schemas");
|
||||
let schema_list = self
|
||||
.catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?
|
||||
.expect("Default schema not found")
|
||||
.schema_names()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
let flush_requests = schema_list
|
||||
.into_iter()
|
||||
.map(|schema_name| {
|
||||
SqlRequest::FlushTable(FlushTableRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name,
|
||||
table_name: None,
|
||||
region_number: None,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let flush_result = futures::future::try_join_all(
|
||||
flush_requests
|
||||
.into_iter()
|
||||
.map(|request| self.sql_handler.execute(request, QueryContext::arc())),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu);
|
||||
info!("flush success: {}", flush_result.is_ok());
|
||||
flush_result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sql_handler(&self) -> &SqlHandler {
|
||||
&self.sql_handler
|
||||
}
|
||||
@@ -227,6 +287,10 @@ impl Instance {
|
||||
pub fn catalog_manager(&self) -> &CatalogManagerRef {
|
||||
&self.catalog_manager
|
||||
}
|
||||
|
||||
pub fn query_engine(&self) -> QueryEngineRef {
|
||||
self.query_engine.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn create_compaction_scheduler<S: LogStore>(opts: &DatanodeOptions) -> CompactionSchedulerRef<S> {
|
||||
|
||||
@@ -18,15 +18,19 @@ use api::v1::query_request::Query;
|
||||
use api::v1::{CreateDatabaseExpr, DdlRequest, InsertRequest};
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::plan::LogicalPlan;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::prelude::*;
|
||||
use sql::statements::statement::Statement;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use table::requests::CreateDatabaseRequest;
|
||||
|
||||
use crate::error::{self, DecodeLogicalPlanSnafu, ExecuteSqlSnafu, Result};
|
||||
use crate::error::{
|
||||
self, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu, ExecuteSqlSnafu, PlanStatementSnafu,
|
||||
Result,
|
||||
};
|
||||
use crate::instance::Instance;
|
||||
|
||||
impl Instance {
|
||||
@@ -51,17 +55,42 @@ impl Instance {
|
||||
self.query_engine
|
||||
.execute(&LogicalPlan::DfPlan(logical_plan))
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
async fn handle_query(&self, query: Query, ctx: QueryContextRef) -> Result<Output> {
|
||||
Ok(match query {
|
||||
match query {
|
||||
Query::Sql(sql) => {
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).context(ExecuteSqlSnafu)?;
|
||||
self.execute_stmt(stmt, ctx).await?
|
||||
match stmt {
|
||||
// TODO(LFC): Remove SQL execution branch here.
|
||||
// Keep this because substrait can't handle much of SQLs now.
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(stmt, ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
_ => self.execute_stmt(stmt, ctx).await,
|
||||
}
|
||||
}
|
||||
Query::LogicalPlan(plan) => self.execute_logical(plan).await?,
|
||||
})
|
||||
Query::LogicalPlan(plan) => self.execute_logical(plan).await,
|
||||
Query::PromRangeQuery(promql) => {
|
||||
let prom_query = PromQuery {
|
||||
query: promql.query,
|
||||
start: promql.start,
|
||||
end: promql.end,
|
||||
step: promql.step,
|
||||
};
|
||||
self.execute_promql(&prom_query, ctx).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_insert(
|
||||
@@ -98,7 +127,7 @@ impl Instance {
|
||||
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
|
||||
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
|
||||
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
|
||||
DdlExpr::FlushTable(_) => todo!(),
|
||||
DdlExpr::FlushTable(expr) => self.handle_flush_table(expr).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -132,11 +161,23 @@ mod test {
|
||||
};
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::prelude::*;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::test_util::{self, MockInstance};
|
||||
|
||||
async fn exec_selection(instance: &Instance, sql: &str) -> Output {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let engine = instance.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
engine.execute(&plan).await.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_handle_ddl() {
|
||||
let instance = MockInstance::new("test_handle_ddl").await;
|
||||
@@ -199,22 +240,17 @@ mod test {
|
||||
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(
|
||||
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
|
||||
)
|
||||
.unwrap();
|
||||
let output = instance
|
||||
.execute_sql(
|
||||
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
|
||||
QueryContext::arc(),
|
||||
)
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(1)));
|
||||
|
||||
let output = instance
|
||||
.execute_sql(
|
||||
"SELECT ts, a, b FROM my_database.my_table",
|
||||
QueryContext::arc(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let output = exec_selection(instance, "SELECT ts, a, b FROM my_database.my_table").await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
@@ -280,10 +316,7 @@ mod test {
|
||||
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(3)));
|
||||
|
||||
let output = instance
|
||||
.execute_sql("SELECT ts, host, cpu FROM demo", QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = exec_selection(instance, "SELECT ts, host, cpu FROM demo").await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
|
||||
@@ -17,27 +17,28 @@ use std::time::{Duration, SystemTime};
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging::info;
|
||||
use common_telemetry::timer;
|
||||
use datatypes::schema::Schema;
|
||||
use futures::StreamExt;
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::query_engine::StatementHandler;
|
||||
use servers::error as server_error;
|
||||
use servers::prom::PromHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::ast::ObjectName;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{
|
||||
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
||||
};
|
||||
|
||||
use crate::error::{self, BumpTableIdSnafu, ExecuteSqlSnafu, Result, TableIdProviderNotFoundSnafu};
|
||||
use crate::error::{
|
||||
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
|
||||
TableIdProviderNotFoundSnafu,
|
||||
};
|
||||
use crate::instance::Instance;
|
||||
use crate::metric;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
@@ -50,18 +51,6 @@ impl Instance {
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) | QueryStatement::Promql(_) => {
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
QueryStatement::Sql(Statement::Insert(insert)) => {
|
||||
let requests = self
|
||||
.sql_handler
|
||||
@@ -163,11 +152,6 @@ impl Instance {
|
||||
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Explain(explain)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::Explain(Box::new(explain)), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::DescribeTable(describe_table)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::DescribeTable(describe_table), query_ctx)
|
||||
@@ -176,17 +160,6 @@ impl Instance {
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||
}
|
||||
QueryStatement::Sql(Statement::Use(ref schema)) => {
|
||||
let catalog = &query_ctx.current_catalog();
|
||||
ensure!(
|
||||
self.is_valid_schema(catalog, schema)?,
|
||||
error::DatabaseNotFoundSnafu { catalog, schema }
|
||||
);
|
||||
|
||||
query_ctx.set_current_schema(schema);
|
||||
|
||||
Ok(Output::RecordBatches(RecordBatches::empty()))
|
||||
}
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
@@ -220,49 +193,30 @@ impl Instance {
|
||||
.await
|
||||
}
|
||||
},
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => self.execute_tql(tql, query_ctx).await,
|
||||
QueryStatement::Sql(Statement::Query(_))
|
||||
| QueryStatement::Sql(Statement::Explain(_))
|
||||
| QueryStatement::Sql(Statement::Use(_))
|
||||
| QueryStatement::Sql(Statement::Tql(_))
|
||||
| QueryStatement::Promql(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ExecuteSqlSnafu)?;
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
Tql::Explain(_explain) => {
|
||||
todo!("waiting for promql-parser ast adding a explain node")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute_sql(&self, sql: &str, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).context(ExecuteSqlSnafu)?;
|
||||
self.execute_stmt(stmt, query_ctx).await
|
||||
}
|
||||
|
||||
pub async fn execute_promql(
|
||||
&self,
|
||||
promql: &PromQuery,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
|
||||
let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?;
|
||||
self.execute_stmt(stmt, query_ctx).await
|
||||
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
// TODO(ruihang): merge this and `execute_promql` after #951 landed
|
||||
@@ -291,7 +245,14 @@ impl Instance {
|
||||
eval_stmt.lookback_delta = lookback
|
||||
}
|
||||
}
|
||||
self.execute_stmt(stmt, query_ctx).await
|
||||
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,57 +288,16 @@ pub fn table_idents_to_full_name(
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for Instance {
|
||||
type Error = error::Error;
|
||||
|
||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||
// we assume sql string has only 1 statement in datanode
|
||||
let result = self.execute_sql(query, query_ctx).await;
|
||||
vec![result]
|
||||
}
|
||||
|
||||
async fn do_promql_query(
|
||||
impl StatementHandler for Instance {
|
||||
async fn handle_statement(
|
||||
&self,
|
||||
query: &PromQuery,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Vec<Result<Output>> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
let result = self.execute_promql(query, query_ctx).await;
|
||||
vec![result]
|
||||
}
|
||||
|
||||
async fn do_statement_query(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||
self.execute_stmt(QueryStatement::Sql(stmt), query_ctx)
|
||||
) -> query::error::Result<Output> {
|
||||
self.execute_stmt(stmt, query_ctx)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_describe(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Option<Schema>> {
|
||||
if let Statement::Query(_) = stmt {
|
||||
self.query_engine
|
||||
.describe(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.map(Some)
|
||||
.context(error::DescribeStatementSnafu)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.catalog_manager
|
||||
.schema(catalog, schema)
|
||||
.map(|s| s.is_some())
|
||||
.context(error::CatalogSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(QueryExecutionSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(trait_upcasting)]
|
||||
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
mod heartbeat;
|
||||
pub mod instance;
|
||||
mod metric;
|
||||
pub mod metric;
|
||||
mod mock;
|
||||
mod script;
|
||||
pub mod server;
|
||||
|
||||
@@ -12,32 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_srv::mocks::MockInfo;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
|
||||
use crate::datanode::DatanodeOptions;
|
||||
use crate::error::{CatalogSnafu, RecoverProcedureSnafu, Result};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::instance::{
|
||||
create_log_store, create_procedure_manager, new_object_store, DefaultEngine, Instance,
|
||||
};
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::error::Result;
|
||||
use crate::instance::Instance;
|
||||
|
||||
impl Instance {
|
||||
pub async fn with_mock_meta_client(opts: &DatanodeOptions) -> Result<Self> {
|
||||
@@ -46,98 +29,9 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn with_mock_meta_server(opts: &DatanodeOptions, meta_srv: MockInfo) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage).await?;
|
||||
let logstore = Arc::new(create_log_store(&opts.wal).await?);
|
||||
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(DefaultEngine::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
logstore.clone(),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
|
||||
// By default, catalog manager and factory are created in standalone mode
|
||||
let (catalog_manager, factory, heartbeat_task) = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
let catalog = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(catalog as CatalogManagerRef, factory, None)
|
||||
}
|
||||
Mode::Distributed => {
|
||||
let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
|
||||
table_engine.clone(),
|
||||
opts.node_id.unwrap_or(42),
|
||||
Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
}),
|
||||
));
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
opts.node_id.unwrap_or(42),
|
||||
opts.rpc_addr.clone(),
|
||||
None,
|
||||
meta_client.clone(),
|
||||
catalog.clone(),
|
||||
);
|
||||
(catalog as CatalogManagerRef, factory, Some(heartbeat_task))
|
||||
}
|
||||
};
|
||||
let query_engine = factory.query_engine();
|
||||
let script_executor =
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||
|
||||
let procedure_manager = create_procedure_manager(&opts.procedure).await?;
|
||||
if let Some(procedure_manager) = &procedure_manager {
|
||||
table_engine.register_procedure_loaders(&**procedure_manager);
|
||||
// Recover procedures.
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
query_engine: query_engine.clone(),
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
procedure_manager,
|
||||
),
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
table_id_provider: Some(Arc::new(LocalTableIdProvider::default())),
|
||||
heartbeat_task,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct LocalTableIdProvider {
|
||||
inner: Arc<AtomicU32>,
|
||||
}
|
||||
|
||||
impl Default for LocalTableIdProvider {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(AtomicU32::new(MIN_USER_TABLE_ID)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableIdProvider for LocalTableIdProvider {
|
||||
async fn next_table_id(&self) -> table::Result<TableId> {
|
||||
Ok(self.inner.fetch_add(1, Ordering::Relaxed))
|
||||
Instance::new_with(opts, Some(meta_client), compaction_scheduler).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,20 +17,15 @@ use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_runtime::Builder as RuntimeBuilder;
|
||||
use common_telemetry::tracing::log::info;
|
||||
use servers::error::Error::InternalIo;
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
|
||||
use servers::server::Server;
|
||||
use servers::tls::TlsOption;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::datanode::DatanodeOptions;
|
||||
use crate::error::Error::StartServer;
|
||||
use crate::error::{ParseAddrSnafu, Result, RuntimeResourceSnafu, StartServerSnafu};
|
||||
use crate::error::{
|
||||
ParseAddrSnafu, Result, RuntimeResourceSnafu, ShutdownServerSnafu, StartServerSnafu,
|
||||
};
|
||||
use crate::instance::InstanceRef;
|
||||
|
||||
pub mod grpc;
|
||||
@@ -38,7 +33,6 @@ pub mod grpc;
|
||||
/// All rpc services.
|
||||
pub struct Services {
|
||||
grpc_server: GrpcServer,
|
||||
mysql_server: Option<Box<dyn Server>>,
|
||||
}
|
||||
|
||||
impl Services {
|
||||
@@ -51,48 +45,12 @@ impl Services {
|
||||
.context(RuntimeResourceSnafu)?,
|
||||
);
|
||||
|
||||
let mysql_server = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
info!("Disable MySQL server on datanode when running in standalone mode");
|
||||
None
|
||||
}
|
||||
Mode::Distributed => {
|
||||
let mysql_io_runtime = Arc::new(
|
||||
RuntimeBuilder::default()
|
||||
.worker_threads(opts.mysql_runtime_size)
|
||||
.thread_name("mysql-io-handlers")
|
||||
.build()
|
||||
.context(RuntimeResourceSnafu)?,
|
||||
);
|
||||
let tls = TlsOption::default();
|
||||
// default tls config returns None
|
||||
// but try to think a better way to do this
|
||||
Some(MysqlServer::create_server(
|
||||
mysql_io_runtime,
|
||||
Arc::new(MysqlSpawnRef::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
||||
None,
|
||||
)),
|
||||
Arc::new(MysqlSpawnConfig::new(
|
||||
tls.should_force_tls(),
|
||||
tls.setup()
|
||||
.map_err(|e| StartServer {
|
||||
source: InternalIo { source: e },
|
||||
})?
|
||||
.map(Arc::new),
|
||||
false,
|
||||
)),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
grpc_server: GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance),
|
||||
None,
|
||||
grpc_runtime,
|
||||
),
|
||||
mysql_server,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -100,19 +58,17 @@ impl Services {
|
||||
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.rpc_addr,
|
||||
})?;
|
||||
|
||||
let mut res = vec![self.grpc_server.start(grpc_addr)];
|
||||
if let Some(mysql_server) = &self.mysql_server {
|
||||
let mysql_addr = &opts.mysql_addr;
|
||||
let mysql_addr: SocketAddr = mysql_addr
|
||||
.parse()
|
||||
.context(ParseAddrSnafu { addr: mysql_addr })?;
|
||||
res.push(mysql_server.start(mysql_addr));
|
||||
};
|
||||
|
||||
futures::future::try_join_all(res)
|
||||
self.grpc_server
|
||||
.start(grpc_addr)
|
||||
.await
|
||||
.context(StartServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
self.grpc_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownServerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr};
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, FlushTableExpr};
|
||||
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
|
||||
use common_query::Output;
|
||||
use common_telemetry::info;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use table::requests::DropTableRequest;
|
||||
use table::requests::{DropTableRequest, FlushTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu,
|
||||
@@ -82,6 +82,24 @@ impl Instance {
|
||||
.execute(SqlRequest::DropTable(req), QueryContext::arc())
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
let table_name = if expr.table_name.trim().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(expr.table_name)
|
||||
};
|
||||
|
||||
let req = FlushTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name,
|
||||
region_number: expr.region_id,
|
||||
};
|
||||
self.sql_handler()
|
||||
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -136,7 +154,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
fn test_create_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "a".to_string(),
|
||||
|
||||
@@ -13,22 +13,24 @@
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use query::query_engine::QueryEngineRef;
|
||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu};
|
||||
use crate::error::{
|
||||
self, CloseTableEngineSnafu, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
|
||||
mod alter;
|
||||
@@ -37,6 +39,7 @@ mod copy_table_from;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
mod flush_table;
|
||||
pub(crate) mod insert;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -46,10 +49,10 @@ pub enum SqlRequest {
|
||||
CreateDatabase(CreateDatabaseRequest),
|
||||
Alter(AlterTableRequest),
|
||||
DropTable(DropTableRequest),
|
||||
FlushTable(FlushTableRequest),
|
||||
ShowDatabases(ShowDatabases),
|
||||
ShowTables(ShowTables),
|
||||
DescribeTable(DescribeTable),
|
||||
Explain(Box<Explain>),
|
||||
Delete(Delete),
|
||||
CopyTable(CopyTableRequest),
|
||||
CopyTableFrom(CopyTableFromRequest),
|
||||
@@ -115,9 +118,7 @@ impl SqlHandler {
|
||||
})?;
|
||||
describe_table(table).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::Explain(req) => explain(req, self.query_engine.clone(), query_ctx.clone())
|
||||
.await
|
||||
.context(ExecuteSqlSnafu),
|
||||
SqlRequest::FlushTable(req) => self.flush_table(req).await,
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
error!(e; "{query_ctx}");
|
||||
@@ -139,6 +140,14 @@ impl SqlHandler {
|
||||
pub fn table_engine(&self) -> TableEngineRef {
|
||||
self.table_engine.clone()
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
self.table_engine
|
||||
.close()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(CloseTableEngineSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
83
src/datanode/src/sql/flush_table.rs
Normal file
83
src/datanode/src/sql/flush_table.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_query::Output;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::FlushTableRequest;
|
||||
|
||||
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
|
||||
let all_table_names = schema.table_names().context(CatalogSnafu)?;
|
||||
futures::future::join_all(all_table_names.iter().map(|table| {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
}))
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
}
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
async fn flush_table_inner(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
region: Option<u32>,
|
||||
) -> Result<()> {
|
||||
if schema == DEFAULT_SCHEMA_NAME && table == "numbers" {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_ref = TableReference {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
};
|
||||
|
||||
let full_table_name = table_ref.to_string();
|
||||
let table = self.get_table(&table_ref)?;
|
||||
table.flush(region).await.context(error::FlushTableSnafu {
|
||||
table_name: full_table_name,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -39,8 +39,8 @@ use table::TableRef;
|
||||
use crate::error::{
|
||||
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
|
||||
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
|
||||
ExecuteSqlSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu, ParseSqlValueSnafu,
|
||||
Result, TableNotFoundSnafu,
|
||||
ExecuteLogicalPlanSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||
ParseSqlValueSnafu, PlanStatementSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
|
||||
|
||||
@@ -236,18 +236,19 @@ impl SqlHandler {
|
||||
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(
|
||||
.planner()
|
||||
.plan(
|
||||
QueryStatement::Sql(Statement::Query(Box::new(query))),
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let output = self
|
||||
.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
.context(ExecuteLogicalPlanSnafu)?;
|
||||
|
||||
let stream: InsertRequestStream = match output {
|
||||
Output::RecordBatches(batches) => {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// TODO(LFC): These tests should be moved to frontend crate. They are actually standalone instance tests.
|
||||
mod instance_test;
|
||||
mod promql_test;
|
||||
pub(crate) mod test_util;
|
||||
|
||||
@@ -19,9 +19,12 @@ use common_query::Output;
|
||||
use common_recordbatch::util;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
|
||||
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||
use session::context::QueryContext;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::error::{Error, ExecuteLogicalPlanSnafu, PlanStatementSnafu};
|
||||
use crate::tests::test_util::{self, check_output_stream, setup_test_instance, MockInstance};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
@@ -414,7 +417,6 @@ pub async fn test_execute_create() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let instance = MockInstance::new("test_rename_table_local").await;
|
||||
|
||||
let output = execute_sql(&instance, "create database db").await;
|
||||
@@ -933,7 +935,20 @@ async fn try_execute_sql_in_db(
|
||||
db: &str,
|
||||
) -> Result<Output, crate::error::Error> {
|
||||
let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
|
||||
instance.inner().execute_sql(sql, query_ctx).await
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let engine = instance.inner().query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
_ => instance.inner().execute_stmt(stmt, query_ctx).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sql_in_db(instance: &MockInstance, sql: &str, db: &str) -> Output {
|
||||
|
||||
@@ -31,22 +31,14 @@ async fn create_insert_query_assert(
|
||||
expected: &str,
|
||||
) {
|
||||
let instance = setup_test_instance("test_execute_insert").await;
|
||||
let query_ctx = QueryContext::arc();
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(create, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(insert, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
instance.execute_sql(create).await;
|
||||
|
||||
instance.execute_sql(insert).await;
|
||||
|
||||
let query_output = instance
|
||||
.inner()
|
||||
.execute_promql_statement(promql, start, end, interval, lookback, query_ctx)
|
||||
.execute_promql_statement(promql, start, end, interval, lookback, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = String::from(expected);
|
||||
@@ -56,24 +48,12 @@ async fn create_insert_query_assert(
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn create_insert_tql_assert(create: &str, insert: &str, tql: &str, expected: &str) {
|
||||
let instance = setup_test_instance("test_execute_insert").await;
|
||||
let query_ctx = QueryContext::arc();
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(create, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(insert, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
instance.execute_sql(create).await;
|
||||
|
||||
let query_output = instance
|
||||
.inner()
|
||||
.execute_sql(tql, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
instance.execute_sql(insert).await;
|
||||
|
||||
let query_output = instance.execute_sql(tql).await;
|
||||
let expected = String::from(expected);
|
||||
check_unordered_output_stream(query_output, expected).await;
|
||||
}
|
||||
|
||||
@@ -22,9 +22,13 @@ use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use mito::config::EngineConfig;
|
||||
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
|
||||
@@ -72,6 +76,40 @@ impl MockInstance {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn execute_sql(&self, sql: &str) -> Output {
|
||||
let engine = self.inner().query_engine();
|
||||
let planner = engine.planner();
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let plan = planner.plan(stmt, QueryContext::arc()).await.unwrap();
|
||||
engine.execute(&plan).await.unwrap()
|
||||
}
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).unwrap();
|
||||
planner.plan(stmt, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
engine.execute(&plan).await.unwrap()
|
||||
}
|
||||
_ => self
|
||||
.inner()
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn inner(&self) -> &Instance {
|
||||
&self.instance
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-function = { path = "../common/function" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
|
||||
@@ -44,6 +44,12 @@ pub enum Error {
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown server, source: {}", source))]
|
||||
ShutdownServer {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse address {}, source: {}", addr, source))]
|
||||
ParseAddr {
|
||||
addr: String,
|
||||
@@ -241,6 +247,24 @@ pub enum Error {
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse query, source: {}", source))]
|
||||
ParseQuery {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute logical plan, source: {}", source))]
|
||||
ExecLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build DataFusion logical plan, source: {}", source))]
|
||||
BuildDfLogicalPlan {
|
||||
source: datafusion_common::DataFusionError,
|
||||
@@ -381,6 +405,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::SqlExecIntercepted { source, .. } => source.status_code(),
|
||||
Error::StartServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownServer { source, .. } => source.status_code(),
|
||||
|
||||
Error::ParseSql { source } => source.status_code(),
|
||||
|
||||
@@ -419,9 +444,12 @@ impl ErrorExt for Error {
|
||||
| Error::ToTableInsertRequest { source }
|
||||
| Error::FindNewColumnsOnInsertion { source } => source.status_code(),
|
||||
|
||||
Error::ExecuteStatement { source, .. } | Error::DescribeStatement { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::ExecuteStatement { source, .. }
|
||||
| Error::PlanStatement { source }
|
||||
| Error::ParseQuery { source }
|
||||
| Error::ExecLogicalPlan { source }
|
||||
| Error::DescribeStatement { source } => source.status_code(),
|
||||
|
||||
Error::AlterExprToRequest { source, .. } => source.status_code(),
|
||||
Error::LeaderNotFound { .. } => StatusCode::StorageUnavailable,
|
||||
Error::TableAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||
|
||||
@@ -12,25 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::Plugins;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::grpc::GrpcOptions;
|
||||
use crate::influxdb::InfluxdbOptions;
|
||||
use crate::instance::FrontendInstance;
|
||||
use crate::mysql::MysqlOptions;
|
||||
use crate::opentsdb::OpentsdbOptions;
|
||||
use crate::postgres::PostgresOptions;
|
||||
use crate::prom::PromOptions;
|
||||
use crate::prometheus::PrometheusOptions;
|
||||
use crate::server::Services;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
@@ -64,40 +57,6 @@ impl Default for FrontendOptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Frontend<T>
|
||||
where
|
||||
T: FrontendInstance,
|
||||
{
|
||||
opts: FrontendOptions,
|
||||
instance: Option<T>,
|
||||
plugins: Arc<Plugins>,
|
||||
}
|
||||
|
||||
impl<T: FrontendInstance> Frontend<T> {
|
||||
pub fn new(opts: FrontendOptions, instance: T, plugins: Arc<Plugins>) -> Self {
|
||||
Self {
|
||||
opts,
|
||||
instance: Some(instance),
|
||||
plugins,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
let mut instance = self
|
||||
.instance
|
||||
.take()
|
||||
.context(error::IllegalFrontendStateSnafu {
|
||||
err_msg: "Frontend instance not initialized",
|
||||
})?;
|
||||
instance.start().await?;
|
||||
|
||||
let instance = Arc::new(instance);
|
||||
|
||||
// TODO(sunng87): merge this into instance
|
||||
Services::start(&self.opts, instance, self.plugins.clone()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -36,22 +36,26 @@ use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging::{debug, info};
|
||||
use common_telemetry::timer;
|
||||
use datafusion::sql::sqlparser::ast::ObjectName;
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datanode::instance::InstanceRef as DnInstanceRef;
|
||||
use datanode::metric;
|
||||
use datatypes::schema::Schema;
|
||||
use distributed::DistInstance;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_client::MetaClientOptions;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use partition::route::TableRoutes;
|
||||
use query::parser::PromQuery;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
|
||||
use query::query_engine::StatementHandlerRef;
|
||||
use query::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::error as server_error;
|
||||
use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
|
||||
use servers::prom::{PromHandler, PromHandlerRef};
|
||||
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
||||
use servers::query_handler::sql::{SqlQueryHandler, SqlQueryHandlerRef};
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::{
|
||||
InfluxdbLineProtocolHandler, OpentsdbProtocolHandler, PrometheusProtocolHandler, ScriptHandler,
|
||||
ScriptHandlerRef,
|
||||
@@ -62,16 +66,19 @@ use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
|
||||
use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, Error, ExecutePromqlSnafu, ExternalSnafu, MissingMetasrvOptsSnafu, NotSupportedSnafu,
|
||||
ParseSqlSnafu, Result, SqlExecInterceptedSnafu,
|
||||
self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExecuteStatementSnafu, ExternalSnafu,
|
||||
InvalidInsertRequestSnafu, MissingMetasrvOptsSnafu, NotSupportedSnafu, ParseQuerySnafu,
|
||||
ParseSqlSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
|
||||
};
|
||||
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::instance::standalone::{StandaloneGrpcQueryHandler, StandaloneSqlQueryHandler};
|
||||
use crate::instance::standalone::StandaloneGrpcQueryHandler;
|
||||
use crate::server::{start_server, ServerHandlers, Services};
|
||||
|
||||
#[async_trait]
|
||||
pub trait FrontendInstance:
|
||||
@@ -97,7 +104,8 @@ pub struct Instance {
|
||||
|
||||
/// Script handler is None in distributed mode, only works on standalone mode.
|
||||
script_handler: Option<ScriptHandlerRef>,
|
||||
sql_handler: SqlQueryHandlerRef<Error>,
|
||||
statement_handler: StatementHandlerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
grpc_query_handler: GrpcQueryHandlerRef<Error>,
|
||||
promql_handler: Option<PromHandlerRef>,
|
||||
|
||||
@@ -106,6 +114,8 @@ pub struct Instance {
|
||||
/// plugins: this map holds extensions to customize query or auth
|
||||
/// behaviours.
|
||||
plugins: Arc<Plugins>,
|
||||
|
||||
servers: Arc<ServerHandlers>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
@@ -128,22 +138,24 @@ impl Instance {
|
||||
datanode_clients.clone(),
|
||||
));
|
||||
|
||||
let dist_instance = DistInstance::new(
|
||||
meta_client,
|
||||
catalog_manager.clone(),
|
||||
datanode_clients,
|
||||
plugins.clone(),
|
||||
);
|
||||
let dist_instance =
|
||||
DistInstance::new(meta_client, catalog_manager.clone(), datanode_clients);
|
||||
let dist_instance = Arc::new(dist_instance);
|
||||
|
||||
let query_engine =
|
||||
QueryEngineFactory::new_with_plugins(catalog_manager.clone(), plugins.clone())
|
||||
.query_engine();
|
||||
|
||||
Ok(Instance {
|
||||
catalog_manager,
|
||||
script_handler: None,
|
||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||
sql_handler: dist_instance.clone(),
|
||||
statement_handler: dist_instance.clone(),
|
||||
query_engine,
|
||||
grpc_query_handler: dist_instance,
|
||||
promql_handler: None,
|
||||
plugins,
|
||||
plugins: plugins.clone(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -182,23 +194,40 @@ impl Instance {
|
||||
catalog_manager: dn_instance.catalog_manager().clone(),
|
||||
script_handler: None,
|
||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||
sql_handler: StandaloneSqlQueryHandler::arc(dn_instance.clone()),
|
||||
statement_handler: dn_instance.clone(),
|
||||
query_engine: dn_instance.query_engine(),
|
||||
grpc_query_handler: StandaloneGrpcQueryHandler::arc(dn_instance.clone()),
|
||||
promql_handler: Some(dn_instance.clone()),
|
||||
plugins: Default::default(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn build_servers(
|
||||
&mut self,
|
||||
opts: &FrontendOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
) -> Result<()> {
|
||||
let servers = Services::build(opts, Arc::new(self.clone()), plugins).await?;
|
||||
self.servers = Arc::new(servers);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new_distributed(dist_instance: Arc<DistInstance>) -> Self {
|
||||
let catalog_manager = dist_instance.catalog_manager();
|
||||
let query_engine = QueryEngineFactory::new(catalog_manager.clone()).query_engine();
|
||||
Instance {
|
||||
catalog_manager: dist_instance.catalog_manager(),
|
||||
catalog_manager,
|
||||
script_handler: None,
|
||||
statement_handler: dist_instance.clone(),
|
||||
query_engine,
|
||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||
sql_handler: dist_instance.clone(),
|
||||
grpc_query_handler: dist_instance,
|
||||
promql_handler: None,
|
||||
plugins: Default::default(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,7 +260,7 @@ impl Instance {
|
||||
}
|
||||
|
||||
async fn handle_insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<Output> {
|
||||
self.create_or_alter_table_on_demand(ctx.clone(), &request.table_name, &request.columns)
|
||||
self.create_or_alter_table_on_demand(ctx.clone(), &request)
|
||||
.await?;
|
||||
|
||||
let query = Request::Insert(request);
|
||||
@@ -244,11 +273,12 @@ impl Instance {
|
||||
async fn create_or_alter_table_on_demand(
|
||||
&self,
|
||||
ctx: QueryContextRef,
|
||||
table_name: &str,
|
||||
columns: &[Column],
|
||||
request: &InsertRequest,
|
||||
) -> Result<()> {
|
||||
let catalog_name = &ctx.current_catalog();
|
||||
let schema_name = &ctx.current_schema();
|
||||
let table_name = &request.table_name;
|
||||
let columns = &request.columns;
|
||||
|
||||
let table = self
|
||||
.catalog_manager
|
||||
@@ -271,6 +301,8 @@ impl Instance {
|
||||
Some(table) => {
|
||||
let schema = table.schema();
|
||||
|
||||
validate_insert_request(schema.as_ref(), request)?;
|
||||
|
||||
if let Some(add_columns) = common_grpc_expr::find_new_columns(&schema, columns)
|
||||
.context(error::FindNewColumnsOnInsertionSnafu)?
|
||||
{
|
||||
@@ -370,13 +402,24 @@ impl Instance {
|
||||
pub fn plugins(&self) -> Arc<Plugins> {
|
||||
self.plugins.clone()
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
futures::future::try_join_all(self.servers.values().map(|server| server.0.shutdown()))
|
||||
.await
|
||||
.context(error::ShutdownServerSnafu)
|
||||
.map(|_| ())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FrontendInstance for Instance {
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
// TODO(hl): Frontend init should move to here
|
||||
Ok(())
|
||||
|
||||
futures::future::try_join_all(self.servers.values().map(start_server))
|
||||
.await
|
||||
.context(error::StartServerSnafu)
|
||||
.map(|_| ())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -387,20 +430,57 @@ fn parse_stmt(sql: &str) -> Result<Vec<Statement>> {
|
||||
impl Instance {
|
||||
async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
|
||||
|
||||
let planner = self.query_engine.planner();
|
||||
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) => {
|
||||
let plan = planner
|
||||
.plan(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt =
|
||||
QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||
planner
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
Statement::CreateDatabase(_)
|
||||
| Statement::ShowDatabases(_)
|
||||
| Statement::CreateTable(_)
|
||||
| Statement::ShowTables(_)
|
||||
| Statement::DescribeTable(_)
|
||||
| Statement::Explain(_)
|
||||
| Statement::Query(_)
|
||||
| Statement::Insert(_)
|
||||
| Statement::Delete(_)
|
||||
| Statement::Alter(_)
|
||||
| Statement::DropTable(_)
|
||||
| Statement::Tql(_)
|
||||
| Statement::Copy(_) => self.sql_handler.do_statement_query(stmt, query_ctx).await,
|
||||
| Statement::Copy(_) => self
|
||||
.statement_handler
|
||||
.handle_statement(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu),
|
||||
Statement::Use(db) => self.handle_use(db, query_ctx),
|
||||
Statement::ShowCreateTable(_) => NotSupportedSnafu {
|
||||
feat: format!("{stmt:?}"),
|
||||
@@ -415,6 +495,8 @@ impl SqlQueryHandler for Instance {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||
|
||||
let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
|
||||
let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
|
||||
Ok(q) => q,
|
||||
@@ -471,28 +553,26 @@ impl SqlQueryHandler for Instance {
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_statement_query(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
|
||||
|
||||
// TODO(sunng87): figure out at which stage we can call
|
||||
// this hook after ArrowFlight adoption. We need to provide
|
||||
// LogicalPlan as to this hook.
|
||||
query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
|
||||
self.query_statement(stmt, query_ctx.clone())
|
||||
.await
|
||||
.and_then(|output| query_interceptor.post_execute(output, query_ctx.clone()))
|
||||
}
|
||||
|
||||
async fn do_describe(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Option<Schema>> {
|
||||
self.sql_handler.do_describe(stmt, query_ctx).await
|
||||
if let Statement::Query(_) = stmt {
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.describe(plan)
|
||||
.await
|
||||
.map(Some)
|
||||
.context(error::DescribeStatementSnafu)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
@@ -616,13 +696,39 @@ fn validate_param(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()>
|
||||
.context(SqlExecInterceptedSnafu)
|
||||
}
|
||||
|
||||
fn validate_insert_request(schema: &Schema, request: &InsertRequest) -> Result<()> {
|
||||
for column_schema in schema.column_schemas() {
|
||||
if column_schema.is_nullable() || column_schema.default_constraint().is_some() {
|
||||
continue;
|
||||
}
|
||||
let not_null = request
|
||||
.columns
|
||||
.iter()
|
||||
.find(|x| x.column_name == column_schema.name)
|
||||
.map(|column| column.null_mask.is_empty() || column.null_mask.iter().all(|x| *x == 0));
|
||||
ensure!(
|
||||
not_null == Some(true),
|
||||
InvalidInsertRequestSnafu {
|
||||
reason: format!(
|
||||
"Expecting insert data to be presented on a not null or no default value column '{}'.",
|
||||
&column_schema.name
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use api::v1::column::Values;
|
||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||
use datatypes::prelude::{ConcreteDataType, Value};
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
|
||||
use query::query_engine::options::QueryOptions;
|
||||
use session::context::QueryContext;
|
||||
use strfmt::Format;
|
||||
@@ -632,6 +738,71 @@ mod tests {
|
||||
use crate::tests;
|
||||
use crate::tests::MockDistributedInstance;
|
||||
|
||||
#[test]
|
||||
fn test_validate_insert_request() {
|
||||
let schema = Schema::new(vec![
|
||||
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true)
|
||||
.with_default_constraint(None)
|
||||
.unwrap(),
|
||||
ColumnSchema::new("b", ConcreteDataType::int32_datatype(), true)
|
||||
.with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::Int32(100))))
|
||||
.unwrap(),
|
||||
]);
|
||||
let request = InsertRequest {
|
||||
columns: vec![Column {
|
||||
column_name: "c".to_string(),
|
||||
values: Some(Values {
|
||||
i32_values: vec![1],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
// If nullable is true, it doesn't matter whether the insert request has the column.
|
||||
assert!(validate_insert_request(&schema, &request).is_ok());
|
||||
|
||||
let schema = Schema::new(vec![
|
||||
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false)
|
||||
.with_default_constraint(None)
|
||||
.unwrap(),
|
||||
ColumnSchema::new("b", ConcreteDataType::int32_datatype(), false)
|
||||
.with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::Int32(-100))))
|
||||
.unwrap(),
|
||||
]);
|
||||
let request = InsertRequest {
|
||||
columns: vec![Column {
|
||||
column_name: "a".to_string(),
|
||||
values: Some(Values {
|
||||
i32_values: vec![1],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
// If nullable is false, but the column is defined with default value,
|
||||
// it also doesn't matter whether the insert request has the column.
|
||||
assert!(validate_insert_request(&schema, &request).is_ok());
|
||||
|
||||
let request = InsertRequest {
|
||||
columns: vec![Column {
|
||||
column_name: "b".to_string(),
|
||||
values: Some(Values {
|
||||
i32_values: vec![1],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![0],
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
// Neither of the above cases.
|
||||
assert!(validate_insert_request(&schema, &request).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_validation() {
|
||||
let query_ctx = Arc::new(QueryContext::new());
|
||||
@@ -906,12 +1077,16 @@ mod tests {
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
let dn = instance.datanodes.get(dn).unwrap();
|
||||
let output = dn
|
||||
.execute_sql("SELECT ts, host FROM demo ORDER BY ts", QueryContext::arc())
|
||||
let engine = dn.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt.clone(), QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
@@ -19,15 +19,14 @@ use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::{
|
||||
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, InsertRequest,
|
||||
TableId,
|
||||
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, FlushTableExpr,
|
||||
InsertRequest, TableId,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use catalog::helper::{SchemaKey, SchemaValue};
|
||||
use catalog::{CatalogManager, DeregisterTableRequest, RegisterTableRequest};
|
||||
use chrono::DateTime;
|
||||
use client::Database;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::prelude::BoxedError;
|
||||
@@ -35,18 +34,18 @@ use common_query::Output;
|
||||
use common_telemetry::{debug, info};
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{RawSchema, Schema};
|
||||
use datatypes::schema::RawSchema;
|
||||
use meta_client::client::MetaClient;
|
||||
use meta_client::rpc::router::DeleteRequest as MetaDeleteRequest;
|
||||
use meta_client::rpc::{
|
||||
CompareAndPutRequest, CreateRequest as MetaCreateRequest, Partition as MetaPartition,
|
||||
RouteResponse, TableName,
|
||||
RouteRequest, RouteResponse, TableName,
|
||||
};
|
||||
use partition::partition::{PartitionBound, PartitionDef};
|
||||
use query::parser::{PromQuery, QueryStatement};
|
||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
||||
use query::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::parser::QueryStatement;
|
||||
use query::query_engine::StatementHandler;
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
@@ -61,12 +60,12 @@ use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, AlterExprToRequestSnafu, CatalogEntrySerdeSnafu, CatalogSnafu, ColumnDataTypeSnafu,
|
||||
DeserializePartitionSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu, RequestDatanodeSnafu,
|
||||
RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu, TableAlreadyExistSnafu,
|
||||
TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu, UnrecognizedTableOptionSnafu,
|
||||
DeserializePartitionSnafu, NotSupportedSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu,
|
||||
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu,
|
||||
TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu,
|
||||
UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
use crate::expr_factory;
|
||||
use crate::instance::parse_stmt;
|
||||
use crate::sql::insert_to_request;
|
||||
use crate::table::DistTable;
|
||||
|
||||
@@ -75,7 +74,6 @@ pub(crate) struct DistInstance {
|
||||
meta_client: Arc<MetaClient>,
|
||||
catalog_manager: Arc<FrontendCatalogManager>,
|
||||
datanode_clients: Arc<DatanodeClients>,
|
||||
query_engine: QueryEngineRef,
|
||||
}
|
||||
|
||||
impl DistInstance {
|
||||
@@ -83,16 +81,11 @@ impl DistInstance {
|
||||
meta_client: Arc<MetaClient>,
|
||||
catalog_manager: Arc<FrontendCatalogManager>,
|
||||
datanode_clients: Arc<DatanodeClients>,
|
||||
plugins: Arc<Plugins>,
|
||||
) -> Self {
|
||||
let query_engine =
|
||||
QueryEngineFactory::new_with_plugins(catalog_manager.clone(), plugins.clone())
|
||||
.query_engine();
|
||||
Self {
|
||||
meta_client,
|
||||
catalog_manager,
|
||||
datanode_clients,
|
||||
query_engine,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,20 +259,67 @@ impl DistInstance {
|
||||
Ok(Output::AffectedRows(1))
|
||||
}
|
||||
|
||||
async fn flush_table(&self, table_name: TableName, region_id: Option<u32>) -> Result<Output> {
|
||||
let _ = self
|
||||
.catalog_manager
|
||||
.table(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?;
|
||||
|
||||
let route_response = self
|
||||
.meta_client
|
||||
.route(RouteRequest {
|
||||
table_names: vec![table_name.clone()],
|
||||
})
|
||||
.await
|
||||
.context(RequestMetaSnafu)?;
|
||||
|
||||
let expr = FlushTableExpr {
|
||||
catalog_name: table_name.catalog_name.clone(),
|
||||
schema_name: table_name.schema_name.clone(),
|
||||
table_name: table_name.table_name.clone(),
|
||||
region_id,
|
||||
};
|
||||
|
||||
for table_route in &route_response.table_routes {
|
||||
let should_send_rpc = table_route.region_routes.iter().any(|route| {
|
||||
if let Some(region_id) = region_id {
|
||||
region_id == route.region.id as u32
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
if !should_send_rpc {
|
||||
continue;
|
||||
}
|
||||
for datanode in table_route.find_leaders() {
|
||||
debug!("Flushing table {table_name} on Datanode {datanode:?}");
|
||||
|
||||
let client = self.datanode_clients.get_client(&datanode).await;
|
||||
let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
|
||||
client
|
||||
.flush_table(expr.clone())
|
||||
.await
|
||||
.context(RequestDatanodeSnafu)?;
|
||||
}
|
||||
}
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
async fn handle_statement(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
Statement::Query(_) => {
|
||||
let plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.context(error::ExecuteStatementSnafu {})?;
|
||||
self.query_engine.execute(&plan).await
|
||||
}
|
||||
Statement::CreateDatabase(stmt) => {
|
||||
let expr = CreateDatabaseExpr {
|
||||
database_name: stmt.name.to_string(),
|
||||
@@ -321,9 +361,6 @@ impl DistInstance {
|
||||
})?;
|
||||
describe_table(table)
|
||||
}
|
||||
Statement::Explain(stmt) => {
|
||||
explain(Box::new(stmt), self.query_engine.clone(), query_ctx).await
|
||||
}
|
||||
Statement::Insert(insert) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(insert.table_name(), query_ctx.clone())
|
||||
@@ -353,29 +390,6 @@ impl DistInstance {
|
||||
.context(error::ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
async fn handle_sql(&self, sql: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
let stmts = parse_stmt(sql);
|
||||
match stmts {
|
||||
Ok(stmts) => {
|
||||
let mut results = Vec::with_capacity(stmts.len());
|
||||
|
||||
for stmt in stmts {
|
||||
let result = self.handle_statement(stmt, query_ctx.clone()).await;
|
||||
let is_err = result.is_err();
|
||||
|
||||
results.push(result);
|
||||
|
||||
if is_err {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
Err(e) => vec![Err(e)],
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles distributed database creation
|
||||
async fn handle_create_database(
|
||||
&self,
|
||||
@@ -519,50 +533,21 @@ impl DistInstance {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for DistInstance {
|
||||
type Error = error::Error;
|
||||
|
||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
self.handle_sql(query, query_ctx).await
|
||||
}
|
||||
|
||||
async fn do_promql_query(
|
||||
impl StatementHandler for DistInstance {
|
||||
async fn handle_statement(
|
||||
&self,
|
||||
_: &PromQuery,
|
||||
_: QueryContextRef,
|
||||
) -> Vec<std::result::Result<Output, Self::Error>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn do_statement_query(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
self.handle_statement(stmt, query_ctx).await
|
||||
}
|
||||
|
||||
async fn do_describe(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Option<Schema>> {
|
||||
if let Statement::Query(_) = stmt {
|
||||
self.query_engine
|
||||
.describe(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.map(Some)
|
||||
.context(error::DescribeStatementSnafu)
|
||||
} else {
|
||||
Ok(None)
|
||||
) -> query::error::Result<Output> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(stmt) => self.handle_statement(stmt, query_ctx).await,
|
||||
QueryStatement::Promql(_) => NotSupportedSnafu {
|
||||
feat: "distributed execute promql".to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.catalog_manager
|
||||
.schema(catalog, schema)
|
||||
.map(|s| s.is_some())
|
||||
.context(CatalogSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(QueryExecutionSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -721,14 +706,15 @@ fn find_partition_columns(
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use itertools::Itertools;
|
||||
use servers::query_handler::sql::SqlQueryHandlerRef;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::query_engine::StatementHandlerRef;
|
||||
use session::context::QueryContext;
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use super::*;
|
||||
use crate::instance::standalone::StandaloneSqlQueryHandler;
|
||||
use crate::instance::parse_stmt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_parse_partitions() {
|
||||
@@ -771,28 +757,28 @@ ENGINE=mito",
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_sql(instance: &Arc<DistInstance>, sql: &str) -> Output {
|
||||
let stmt = parse_stmt(sql).unwrap().remove(0);
|
||||
instance
|
||||
.handle_statement(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_show_databases() {
|
||||
let instance = crate::tests::create_distributed_instance("test_show_databases").await;
|
||||
let dist_instance = &instance.dist_instance;
|
||||
|
||||
let sql = "create database test_show_databases";
|
||||
let output = dist_instance
|
||||
.handle_sql(sql, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
let output = handle_sql(dist_instance, sql).await;
|
||||
match output {
|
||||
Output::AffectedRows(rows) => assert_eq!(rows, 1),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let sql = "show databases";
|
||||
let output = dist_instance
|
||||
.handle_sql(sql, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
let output = handle_sql(dist_instance, sql).await;
|
||||
match output {
|
||||
Output::RecordBatches(r) => {
|
||||
let expected1 = vec![
|
||||
@@ -829,11 +815,7 @@ ENGINE=mito",
|
||||
let datanode_instances = instance.datanodes;
|
||||
|
||||
let sql = "create database test_show_tables";
|
||||
dist_instance
|
||||
.handle_sql(sql, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
handle_sql(dist_instance, sql).await;
|
||||
|
||||
let sql = "
|
||||
CREATE TABLE greptime.test_show_tables.dist_numbers (
|
||||
@@ -848,18 +830,14 @@ ENGINE=mito",
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
ENGINE=mito";
|
||||
dist_instance
|
||||
.handle_sql(sql, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
handle_sql(dist_instance, sql).await;
|
||||
|
||||
async fn assert_show_tables(instance: SqlQueryHandlerRef<error::Error>) {
|
||||
async fn assert_show_tables(handler: StatementHandlerRef) {
|
||||
let sql = "show tables in test_show_tables";
|
||||
let output = instance
|
||||
.do_query(sql, QueryContext::arc())
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let output = handler
|
||||
.handle_statement(stmt, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
match output {
|
||||
Output::RecordBatches(r) => {
|
||||
@@ -878,7 +856,7 @@ ENGINE=mito",
|
||||
|
||||
// Asserts that new table is created in Datanode as well.
|
||||
for x in datanode_instances.values() {
|
||||
assert_show_tables(StandaloneSqlQueryHandler::arc(x.clone())).await
|
||||
assert_show_tables(x.clone()).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,11 @@ impl GrpcQueryHandler for DistInstance {
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.drop_table(table_name).await
|
||||
}
|
||||
DdlExpr::FlushTable(_) => todo!(),
|
||||
DdlExpr::FlushTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.flush_table(table_name, expr.region_id).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use query::parser::PromQuery;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -54,6 +55,23 @@ impl GrpcQueryHandler for Instance {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Query::PromRangeQuery(promql) => {
|
||||
let prom_query = PromQuery {
|
||||
query: promql.query,
|
||||
start: promql.start,
|
||||
end: promql.end,
|
||||
step: promql.step,
|
||||
};
|
||||
let mut result =
|
||||
SqlQueryHandler::do_promql_query(self, &prom_query, ctx).await;
|
||||
ensure!(
|
||||
result.len() == 1,
|
||||
error::NotSupportedSnafu {
|
||||
feat: "execute multiple statements in PromQL query string through GRPC interface"
|
||||
}
|
||||
);
|
||||
result.remove(0)?
|
||||
}
|
||||
}
|
||||
}
|
||||
Request::Ddl(request) => {
|
||||
@@ -73,13 +91,15 @@ mod test {
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::{
|
||||
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
|
||||
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, InsertRequest,
|
||||
QueryRequest,
|
||||
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||
InsertRequest, QueryRequest,
|
||||
};
|
||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
use tests::{has_parquet_file, test_region_dir};
|
||||
|
||||
use super::*;
|
||||
use crate::table::DistTable;
|
||||
@@ -333,6 +353,108 @@ CREATE TABLE {table_name} (
|
||||
test_insert_and_query_on_auto_created_table(instance).await
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_flush_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
|
||||
let data_tmp_dirs = instance.data_tmp_dirs();
|
||||
let frontend = instance.frontend.as_ref();
|
||||
|
||||
let table_name = "my_dist_table";
|
||||
let sql = format!(
|
||||
r"
|
||||
CREATE TABLE {table_name} (
|
||||
a INT,
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) PARTITION BY RANGE COLUMNS(a) (
|
||||
PARTITION r0 VALUES LESS THAN (10),
|
||||
PARTITION r1 VALUES LESS THAN (20),
|
||||
PARTITION r2 VALUES LESS THAN (50),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)"
|
||||
);
|
||||
create_table(frontend, sql).await;
|
||||
|
||||
test_insert_and_query_on_existing_table(frontend, table_name).await;
|
||||
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
// Wait for previous task finished
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
|
||||
let table_id = 1024;
|
||||
|
||||
let table = instance
|
||||
.frontend
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||
|
||||
let TableGlobalValue { regions_id_map, .. } = table
|
||||
.table_global_value(&TableGlobalKey {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_to_dn_map = regions_id_map
|
||||
.iter()
|
||||
.map(|(k, v)| (v[0], *k))
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
// data_tmp_dirs -> dn: 1..4
|
||||
let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
|
||||
let region_dir = test_region_dir(
|
||||
data_tmp_dir.path().to_str().unwrap(),
|
||||
"greptime",
|
||||
"public",
|
||||
table_id,
|
||||
*region,
|
||||
);
|
||||
has_parquet_file(®ion_dir);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_flush_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
|
||||
let instance = &standalone.instance;
|
||||
let data_tmp_dir = standalone.data_tmp_dir();
|
||||
|
||||
let table_name = "my_table";
|
||||
let sql = format!("CREATE TABLE {table_name} (a INT, ts TIMESTAMP, TIME INDEX (ts))");
|
||||
|
||||
create_table(instance, sql).await;
|
||||
|
||||
test_insert_and_query_on_existing_table(instance, table_name).await;
|
||||
|
||||
let table_id = 1024;
|
||||
let region_id = 0;
|
||||
let region_dir = test_region_dir(
|
||||
data_tmp_dir.path().to_str().unwrap(),
|
||||
"greptime",
|
||||
"public",
|
||||
table_id,
|
||||
region_id,
|
||||
);
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||
// Wait for previous task finished
|
||||
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
async fn create_table(frontend: &Instance, sql: String) {
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql)),
|
||||
@@ -341,6 +463,26 @@ CREATE TABLE {table_name} (
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
|
||||
async fn flush_table(
|
||||
frontend: &Instance,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
region_id: Option<u32>,
|
||||
) {
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(FlushTableExpr {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
region_id,
|
||||
})),
|
||||
});
|
||||
|
||||
let output = query(frontend, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
|
||||
async fn test_insert_and_query_on_existing_table(instance: &Instance, table_name: &str) {
|
||||
let insert = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
@@ -437,14 +579,18 @@ CREATE TABLE {table_name} (
|
||||
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
||||
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
let stmt = QueryLanguageParser::parse_sql(&format!(
|
||||
"SELECT ts, a FROM {table_name} ORDER BY ts"
|
||||
))
|
||||
.unwrap();
|
||||
let dn = instance.datanodes.get(dn).unwrap();
|
||||
let output = dn
|
||||
.execute_sql(
|
||||
&format!("SELECT ts, a FROM {table_name} ORDER BY ts"),
|
||||
QueryContext::arc(),
|
||||
)
|
||||
let engine = dn.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
@@ -542,4 +688,100 @@ CREATE TABLE {table_name} (
|
||||
+---------------------+---+---+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_promql_query() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_standalone_promql_query").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
let table_name = "my_table";
|
||||
let sql = format!("CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))");
|
||||
create_table(instance, sql).await;
|
||||
|
||||
let insert = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
Column {
|
||||
column_name: "h".to_string(),
|
||||
values: Some(Values {
|
||||
string_values: vec![
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "a".to_string(),
|
||||
values: Some(Values {
|
||||
f64_values: vec![1f64, 11f64, 20f64, 22f64, 50f64, 55f64, 99f64],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![4],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
},
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(Values {
|
||||
ts_millisecond_values: vec![
|
||||
1672557972000,
|
||||
1672557973000,
|
||||
1672557974000,
|
||||
1672557975000,
|
||||
1672557976000,
|
||||
1672557977000,
|
||||
1672557978000,
|
||||
1672557979000,
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 8,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let request = Request::Insert(insert);
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(8)));
|
||||
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(api::v1::PromRangeQuery {
|
||||
query: "my_table".to_owned(),
|
||||
start: "1672557973".to_owned(),
|
||||
end: "1672557978".to_owned(),
|
||||
step: "1s".to_owned(),
|
||||
})),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---+------+---------------------+
|
||||
| h | a | ts |
|
||||
+---+------+---------------------+
|
||||
| t | 11.0 | 2023-01-01T07:26:13 |
|
||||
| t | | 2023-01-01T07:26:14 |
|
||||
| t | 20.0 | 2023-01-01T07:26:15 |
|
||||
| t | 22.0 | 2023-01-01T07:26:16 |
|
||||
| t | 50.0 | 2023-01-01T07:26:17 |
|
||||
| t | 55.0 | 2023-01-01T07:26:18 |
|
||||
+---+------+---------------------+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,74 +18,12 @@ use api::v1::greptime_request::Request as GreptimeRequest;
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use datanode::error::Error as DatanodeError;
|
||||
use datatypes::schema::Schema;
|
||||
use query::parser::PromQuery;
|
||||
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
||||
use servers::query_handler::sql::{SqlQueryHandler, SqlQueryHandlerRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub(crate) struct StandaloneSqlQueryHandler(SqlQueryHandlerRef<DatanodeError>);
|
||||
|
||||
impl StandaloneSqlQueryHandler {
|
||||
pub(crate) fn arc(handler: SqlQueryHandlerRef<DatanodeError>) -> Arc<Self> {
|
||||
Arc::new(Self(handler))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for StandaloneSqlQueryHandler {
|
||||
type Error = error::Error;
|
||||
|
||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
self.0
|
||||
.do_query(query, query_ctx)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|x| x.context(error::InvokeDatanodeSnafu))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn do_promql_query(
|
||||
&self,
|
||||
_: &PromQuery,
|
||||
_: QueryContextRef,
|
||||
) -> Vec<std::result::Result<Output, Self::Error>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn do_statement_query(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
self.0
|
||||
.do_statement_query(stmt, query_ctx)
|
||||
.await
|
||||
.context(error::InvokeDatanodeSnafu)
|
||||
}
|
||||
|
||||
async fn do_describe(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Option<Schema>> {
|
||||
self.0
|
||||
.do_describe(stmt, query_ctx)
|
||||
.await
|
||||
.context(error::InvokeDatanodeSnafu)
|
||||
}
|
||||
|
||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.0
|
||||
.is_valid_schema(catalog, schema)
|
||||
.context(error::InvokeDatanodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct StandaloneGrpcQueryHandler(GrpcQueryHandlerRef<DatanodeError>);
|
||||
|
||||
impl StandaloneGrpcQueryHandler {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -30,7 +31,6 @@ use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
|
||||
use servers::server::Server;
|
||||
use snafu::ResultExt;
|
||||
use tokio::try_join;
|
||||
|
||||
use crate::error::Error::StartServer;
|
||||
use crate::error::{self, Result};
|
||||
@@ -41,19 +41,23 @@ use crate::prometheus::PrometheusOptions;
|
||||
|
||||
pub(crate) struct Services;
|
||||
|
||||
pub type ServerHandlers = HashMap<String, ServerHandler>;
|
||||
|
||||
pub type ServerHandler = (Box<dyn Server>, SocketAddr);
|
||||
|
||||
impl Services {
|
||||
pub(crate) async fn start<T>(
|
||||
pub(crate) async fn build<T>(
|
||||
opts: &FrontendOptions,
|
||||
instance: Arc<T>,
|
||||
plugins: Arc<Plugins>,
|
||||
) -> Result<()>
|
||||
) -> Result<ServerHandlers>
|
||||
where
|
||||
T: FrontendInstance,
|
||||
{
|
||||
info!("Starting frontend servers");
|
||||
let mut result = Vec::<ServerHandler>::with_capacity(plugins.len());
|
||||
let user_provider = plugins.get::<UserProviderRef>().cloned();
|
||||
|
||||
let grpc_server_and_addr = if let Some(opts) = &opts.grpc_options {
|
||||
if let Some(opts) = &opts.grpc_options {
|
||||
let grpc_addr = parse_addr(&opts.addr)?;
|
||||
|
||||
let grpc_runtime = Arc::new(
|
||||
@@ -70,12 +74,10 @@ impl Services {
|
||||
grpc_runtime,
|
||||
);
|
||||
|
||||
Some((Box::new(grpc_server) as _, grpc_addr))
|
||||
} else {
|
||||
None
|
||||
result.push((Box::new(grpc_server), grpc_addr));
|
||||
};
|
||||
|
||||
let mysql_server_and_addr = if let Some(opts) = &opts.mysql_options {
|
||||
if let Some(opts) = &opts.mysql_options {
|
||||
let mysql_addr = parse_addr(&opts.addr)?;
|
||||
|
||||
let mysql_io_runtime = Arc::new(
|
||||
@@ -102,13 +104,10 @@ impl Services {
|
||||
opts.reject_no_database.unwrap_or(false),
|
||||
)),
|
||||
);
|
||||
result.push((mysql_server, mysql_addr));
|
||||
}
|
||||
|
||||
Some((mysql_server, mysql_addr))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let postgres_server_and_addr = if let Some(opts) = &opts.postgres_options {
|
||||
if let Some(opts) = &opts.postgres_options {
|
||||
let pg_addr = parse_addr(&opts.addr)?;
|
||||
|
||||
let pg_io_runtime = Arc::new(
|
||||
@@ -126,12 +125,12 @@ impl Services {
|
||||
user_provider.clone(),
|
||||
)) as Box<dyn Server>;
|
||||
|
||||
Some((pg_server, pg_addr))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
result.push((pg_server, pg_addr));
|
||||
}
|
||||
|
||||
let opentsdb_server_and_addr = if let Some(opts) = &opts.opentsdb_options {
|
||||
let mut set_opentsdb_handler = false;
|
||||
|
||||
if let Some(opts) = &opts.opentsdb_options {
|
||||
let addr = parse_addr(&opts.addr)?;
|
||||
|
||||
let io_runtime = Arc::new(
|
||||
@@ -144,23 +143,23 @@ impl Services {
|
||||
|
||||
let server = OpentsdbServer::create_server(instance.clone(), io_runtime);
|
||||
|
||||
Some((server, addr))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
result.push((server, addr));
|
||||
set_opentsdb_handler = true;
|
||||
}
|
||||
|
||||
let http_server_and_addr = if let Some(http_options) = &opts.http_options {
|
||||
if let Some(http_options) = &opts.http_options {
|
||||
let http_addr = parse_addr(&http_options.addr)?;
|
||||
|
||||
let mut http_server = HttpServer::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
|
||||
http_options.clone(),
|
||||
);
|
||||
if let Some(user_provider) = user_provider.clone() {
|
||||
http_server.set_user_provider(user_provider);
|
||||
}
|
||||
|
||||
if opentsdb_server_and_addr.is_some() {
|
||||
if set_opentsdb_handler {
|
||||
http_server.set_opentsdb_handler(instance.clone());
|
||||
}
|
||||
if matches!(
|
||||
@@ -178,34 +177,24 @@ impl Services {
|
||||
}
|
||||
http_server.set_script_handler(instance.clone());
|
||||
|
||||
Some((Box::new(http_server) as _, http_addr))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
result.push((Box::new(http_server), http_addr));
|
||||
}
|
||||
|
||||
let prom_server_and_addr = if let Some(prom_options) = &opts.prom_options {
|
||||
if let Some(prom_options) = &opts.prom_options {
|
||||
let prom_addr = parse_addr(&prom_options.addr)?;
|
||||
|
||||
let mut prom_server = PromServer::create_server(instance.clone());
|
||||
let mut prom_server = PromServer::create_server(instance);
|
||||
if let Some(user_provider) = user_provider {
|
||||
prom_server.set_user_provider(user_provider);
|
||||
}
|
||||
|
||||
Some((prom_server as _, prom_addr))
|
||||
} else {
|
||||
None
|
||||
result.push((prom_server, prom_addr));
|
||||
};
|
||||
|
||||
try_join!(
|
||||
start_server(http_server_and_addr),
|
||||
start_server(grpc_server_and_addr),
|
||||
start_server(mysql_server_and_addr),
|
||||
start_server(postgres_server_and_addr),
|
||||
start_server(opentsdb_server_and_addr),
|
||||
start_server(prom_server_and_addr),
|
||||
)
|
||||
.context(error::StartServerSnafu)?;
|
||||
Ok(())
|
||||
Ok(result
|
||||
.into_iter()
|
||||
.map(|(server, addr)| (server.name().to_string(), (server, addr)))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,13 +202,10 @@ fn parse_addr(addr: &str) -> Result<SocketAddr> {
|
||||
addr.parse().context(error::ParseAddrSnafu { addr })
|
||||
}
|
||||
|
||||
async fn start_server(
|
||||
server_and_addr: Option<(Box<dyn Server>, SocketAddr)>,
|
||||
pub async fn start_server(
|
||||
server_and_addr: &(Box<dyn Server>, SocketAddr),
|
||||
) -> servers::error::Result<Option<SocketAddr>> {
|
||||
if let Some((server, addr)) = server_and_addr {
|
||||
info!("Starting server at {}", addr);
|
||||
server.start(addr).await.map(Some)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
let (server, addr) = server_and_addr;
|
||||
info!("Starting {} at {}", server.name(), addr);
|
||||
server.start(*addr).await.map(Some)
|
||||
}
|
||||
|
||||
@@ -140,8 +140,11 @@ impl Table for DistTable {
|
||||
Ok(Arc::new(dist_scan))
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Inexact)
|
||||
fn supports_filters_pushdown(
|
||||
&self,
|
||||
filters: &[&Expr],
|
||||
) -> table::Result<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Inexact; filters.len()])
|
||||
}
|
||||
|
||||
async fn alter(&self, context: AlterContext, request: &AlterTableRequest) -> table::Result<()> {
|
||||
|
||||
@@ -74,8 +74,7 @@ impl DistTable {
|
||||
|
||||
let mut success = 0;
|
||||
for join in joins {
|
||||
let object_result = join.await.context(error::JoinTaskSnafu)??;
|
||||
let Output::AffectedRows(rows) = object_result else { unreachable!() };
|
||||
let rows = join.await.context(error::JoinTaskSnafu)?? as usize;
|
||||
success += rows;
|
||||
}
|
||||
Ok(Output::AffectedRows(success))
|
||||
|
||||
@@ -47,7 +47,7 @@ impl DatanodeInstance {
|
||||
Self { table, db }
|
||||
}
|
||||
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<Output> {
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<u32> {
|
||||
self.db.insert(request).await
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ use partition::route::TableRoutes;
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||
use servers::Mode;
|
||||
use table::engine::{region_name, table_dir};
|
||||
use tonic::transport::Server;
|
||||
use tower::service_fn;
|
||||
|
||||
@@ -56,11 +57,23 @@ pub(crate) struct MockDistributedInstance {
|
||||
_guards: Vec<TestGuard>,
|
||||
}
|
||||
|
||||
impl MockDistributedInstance {
|
||||
pub fn data_tmp_dirs(&self) -> Vec<&TempDir> {
|
||||
self._guards.iter().map(|g| &g._data_tmp_dir).collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct MockStandaloneInstance {
|
||||
pub(crate) instance: Arc<Instance>,
|
||||
_guard: TestGuard,
|
||||
}
|
||||
|
||||
impl MockStandaloneInstance {
|
||||
pub fn data_tmp_dir(&self) -> &TempDir {
|
||||
&self._guard._data_tmp_dir
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandaloneInstance {
|
||||
let (opts, guard) = create_tmp_dir_and_datanode_opts(test_name);
|
||||
let datanode_instance = DatanodeInstance::new(&opts).await.unwrap();
|
||||
@@ -112,15 +125,15 @@ pub(crate) async fn create_datanode_client(
|
||||
|
||||
// create a mock datanode grpc service, see example here:
|
||||
// https://github.com/hyperium/tonic/blob/master/examples/src/mock/mock.rs
|
||||
let datanode_service = GrpcServer::new(
|
||||
let grpc_server = GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
|
||||
None,
|
||||
runtime,
|
||||
)
|
||||
.create_service();
|
||||
);
|
||||
tokio::spawn(async move {
|
||||
Server::builder()
|
||||
.add_service(datanode_service)
|
||||
.add_service(grpc_server.create_flight_service())
|
||||
.add_service(grpc_server.create_database_service())
|
||||
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
|
||||
.await
|
||||
});
|
||||
@@ -258,7 +271,6 @@ pub(crate) async fn create_distributed_instance(test_name: &str) -> MockDistribu
|
||||
meta_client.clone(),
|
||||
catalog_manager,
|
||||
datanode_clients.clone(),
|
||||
Default::default(),
|
||||
);
|
||||
let dist_instance = Arc::new(dist_instance);
|
||||
let frontend = Instance::new_distributed(dist_instance.clone());
|
||||
@@ -270,3 +282,29 @@ pub(crate) async fn create_distributed_instance(test_name: &str) -> MockDistribu
|
||||
_guards: test_guards,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn test_region_dir(
|
||||
dir: &str,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_id: u32,
|
||||
region_id: u32,
|
||||
) -> String {
|
||||
let table_dir = table_dir(catalog_name, schema_name, table_id);
|
||||
let region_name = region_name(table_id, region_id);
|
||||
|
||||
format!("{}/{}/{}", dir, table_dir, region_name)
|
||||
}
|
||||
|
||||
pub fn has_parquet_file(sst_dir: &str) -> bool {
|
||||
for entry in std::fs::read_dir(sst_dir).unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
let path = entry.path();
|
||||
if !path.is_dir() {
|
||||
assert_eq!("parquet", path.extension().unwrap());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
@@ -39,18 +39,45 @@ use crate::service::store::kv::ResettableKvStoreRef;
|
||||
use crate::service::store::memory::MemStore;
|
||||
use crate::{error, Result};
|
||||
|
||||
// Bootstrap the rpc server to serve incoming request
|
||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||
let meta_srv = make_meta_srv(opts.clone()).await?;
|
||||
bootstrap_meta_srv_with_router(opts, router(meta_srv)).await
|
||||
#[derive(Clone)]
|
||||
pub struct MetaSrvInstance {
|
||||
meta_srv: MetaSrv,
|
||||
|
||||
opts: MetaSrvOptions,
|
||||
}
|
||||
|
||||
pub async fn bootstrap_meta_srv_with_router(opts: MetaSrvOptions, router: Router) -> Result<()> {
|
||||
let listener = TcpListener::bind(&opts.bind_addr)
|
||||
impl MetaSrvInstance {
|
||||
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
|
||||
let meta_srv = build_meta_srv(&opts).await?;
|
||||
|
||||
Ok(MetaSrvInstance { meta_srv, opts })
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
self.meta_srv.start().await;
|
||||
bootstrap_meta_srv_with_router(&self.opts.bind_addr, router(self.meta_srv.clone())).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
// TODO: shutdown the router
|
||||
self.meta_srv.shutdown();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Bootstrap the rpc server to serve incoming request
|
||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||
let meta_srv = make_meta_srv(&opts).await?;
|
||||
bootstrap_meta_srv_with_router(&opts.bind_addr, router(meta_srv)).await
|
||||
}
|
||||
|
||||
pub async fn bootstrap_meta_srv_with_router(bind_addr: &str, router: Router) -> Result<()> {
|
||||
let listener = TcpListener::bind(bind_addr)
|
||||
.await
|
||||
.context(error::TcpBindSnafu {
|
||||
addr: &opts.bind_addr,
|
||||
})?;
|
||||
.context(error::TcpBindSnafu { addr: bind_addr })?;
|
||||
let listener = TcpListenerStream::new(listener);
|
||||
|
||||
router
|
||||
@@ -72,7 +99,7 @@ pub fn router(meta_srv: MetaSrv) -> Router {
|
||||
.add_service(admin::make_admin_service(meta_srv))
|
||||
}
|
||||
|
||||
pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
pub async fn build_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
|
||||
let (kv_store, election, lock) = if opts.use_memory_store {
|
||||
(Arc::new(MemStore::new()) as _, None, None)
|
||||
} else {
|
||||
@@ -107,7 +134,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
};
|
||||
|
||||
let meta_srv = MetaSrvBuilder::new()
|
||||
.options(opts)
|
||||
.options(opts.clone())
|
||||
.kv_store(kv_store)
|
||||
.in_memory(in_memory)
|
||||
.selector(selector)
|
||||
@@ -117,6 +144,12 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
.build()
|
||||
.await;
|
||||
|
||||
Ok(meta_srv)
|
||||
}
|
||||
|
||||
pub async fn make_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
|
||||
let meta_srv = build_meta_srv(opts).await?;
|
||||
|
||||
meta_srv.start().await;
|
||||
|
||||
Ok(meta_srv)
|
||||
|
||||
@@ -31,13 +31,14 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::{
|
||||
ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder, ColumnId,
|
||||
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, Region,
|
||||
RegionDescriptorBuilder, RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
||||
RegionDescriptorBuilder, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
||||
};
|
||||
use table::engine::{
|
||||
region_id, region_name, table_dir, EngineContext, TableEngine, TableEngineProcedure,
|
||||
TableReference,
|
||||
};
|
||||
use table::engine::{EngineContext, TableEngine, TableEngineProcedure, TableReference};
|
||||
use table::error::TableOperationSnafu;
|
||||
use table::metadata::{
|
||||
TableId, TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion,
|
||||
};
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion};
|
||||
use table::requests::{
|
||||
AlterKind, AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
};
|
||||
@@ -59,22 +60,6 @@ pub const MITO_ENGINE: &str = "mito";
|
||||
pub const INIT_COLUMN_ID: ColumnId = 0;
|
||||
const INIT_TABLE_VERSION: TableVersion = 0;
|
||||
|
||||
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
|
||||
#[inline]
|
||||
fn region_name(table_id: TableId, n: u32) -> String {
|
||||
format!("{table_id}_{n:010}")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn region_id(table_id: TableId, n: u32) -> RegionId {
|
||||
(u64::from(table_id) << 32) | u64::from(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> String {
|
||||
format!("{catalog_name}/{schema_name}/{table_id}/")
|
||||
}
|
||||
|
||||
/// [TableEngine] implementation.
|
||||
///
|
||||
/// About mito <https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo>.
|
||||
@@ -165,6 +150,10 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)
|
||||
}
|
||||
|
||||
async fn close(&self) -> TableResult<()> {
|
||||
self.inner.close().await
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: StorageEngine> TableEngineProcedure for MitoEngine<S> {
|
||||
@@ -623,6 +612,19 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
.remove(&table_reference.to_string())
|
||||
.is_some())
|
||||
}
|
||||
|
||||
async fn close(&self) -> TableResult<()> {
|
||||
let _lock = self.table_mutex.lock().await;
|
||||
|
||||
let tables = self.tables.write().unwrap().clone();
|
||||
|
||||
futures::future::try_join_all(tables.values().map(|t| t.close()))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
|
||||
@@ -25,6 +25,7 @@ use store_api::storage::{
|
||||
ColumnId, CreateOptions, EngineContext, OpenOptions, RegionDescriptorBuilder, RegionNumber,
|
||||
StorageEngine,
|
||||
};
|
||||
use table::engine::{region_id, table_dir};
|
||||
use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType};
|
||||
use table::requests::CreateTableRequest;
|
||||
|
||||
@@ -146,7 +147,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
/// Creates regions for the table.
|
||||
async fn on_create_regions(&mut self) -> Result<Status> {
|
||||
let engine_ctx = EngineContext::default();
|
||||
let table_dir = engine::table_dir(
|
||||
let table_dir = table_dir(
|
||||
&self.data.request.catalog_name,
|
||||
&self.data.request.schema_name,
|
||||
self.data.request.id,
|
||||
@@ -203,7 +204,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
}
|
||||
|
||||
// We need to create that region.
|
||||
let region_id = engine::region_id(self.data.request.id, *number);
|
||||
let region_id = region_id(self.data.request.id, *number);
|
||||
let region_desc = RegionDescriptorBuilder::default()
|
||||
.id(region_id)
|
||||
.name(region_name.clone())
|
||||
@@ -234,7 +235,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
|
||||
/// Writes metadata to the table manifest.
|
||||
async fn on_write_table_manifest(&mut self) -> Result<Status> {
|
||||
let table_dir = engine::table_dir(
|
||||
let table_dir = table_dir(
|
||||
&self.data.request.catalog_name,
|
||||
&self.data.request.schema_name,
|
||||
self.data.request.id,
|
||||
|
||||
@@ -31,14 +31,28 @@ use storage::region::RegionImpl;
|
||||
use storage::EngineImpl;
|
||||
use store_api::manifest::Manifest;
|
||||
use store_api::storage::ReadContext;
|
||||
use table::requests::{AddColumnRequest, AlterKind, DeleteRequest, TableOptions};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, DeleteRequest, FlushTableRequest, TableOptions,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
use crate::table::test_util;
|
||||
use crate::table::test_util::{
|
||||
new_insert_request, schema_for_test, TestEngineComponents, TABLE_NAME,
|
||||
self, new_insert_request, schema_for_test, setup_table, TestEngineComponents, TABLE_NAME,
|
||||
};
|
||||
|
||||
pub fn has_parquet_file(sst_dir: &str) -> bool {
|
||||
for entry in std::fs::read_dir(sst_dir).unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
let path = entry.path();
|
||||
if !path.is_dir() {
|
||||
assert_eq!("parquet", path.extension().unwrap());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
async fn setup_table_with_column_default_constraint() -> (TempDir, String, TableRef) {
|
||||
let table_name = "test_default_constraint";
|
||||
let column_schemas = vec![
|
||||
@@ -752,3 +766,76 @@ async fn test_table_delete_rows() {
|
||||
+-------+-----+--------+-------------------------+"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_table_all_regions() {
|
||||
let TestEngineComponents {
|
||||
table_ref: table,
|
||||
dir,
|
||||
..
|
||||
} = test_util::setup_test_engine_and_table().await;
|
||||
|
||||
setup_table(table.clone()).await;
|
||||
|
||||
let table_id = 1u32;
|
||||
let region_name = region_name(table_id, 0);
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
|
||||
|
||||
let region_dir = format!(
|
||||
"{}/{}/{}",
|
||||
dir.path().to_str().unwrap(),
|
||||
table_dir,
|
||||
region_name
|
||||
);
|
||||
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(None).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(None).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_table_with_region_id() {
|
||||
let TestEngineComponents {
|
||||
table_ref: table,
|
||||
dir,
|
||||
..
|
||||
} = test_util::setup_test_engine_and_table().await;
|
||||
|
||||
setup_table(table.clone()).await;
|
||||
|
||||
let table_id = 1u32;
|
||||
let region_name = region_name(table_id, 0);
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
|
||||
|
||||
let region_dir = format!(
|
||||
"{}/{}/{}",
|
||||
dir.path().to_str().unwrap(),
|
||||
table_dir,
|
||||
region_name
|
||||
);
|
||||
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
let req = FlushTableRequest {
|
||||
region_number: Some(0),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
@@ -208,8 +208,8 @@ impl<R: Region> Table for MitoTable<R> {
|
||||
Ok(Arc::new(SimpleTableScan::new(stream)))
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::error::Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Inexact)
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> TableResult<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Inexact; filters.len()])
|
||||
}
|
||||
|
||||
/// Alter table changes the schemas of the table.
|
||||
@@ -322,6 +322,34 @@ impl<R: Region> Table for MitoTable<R> {
|
||||
}
|
||||
Ok(rows_deleted)
|
||||
}
|
||||
|
||||
async fn flush(&self, region_number: Option<RegionNumber>) -> TableResult<()> {
|
||||
if let Some(region_number) = region_number {
|
||||
if let Some(region) = self.regions.get(®ion_number) {
|
||||
region
|
||||
.flush()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
}
|
||||
} else {
|
||||
futures::future::try_join_all(self.regions.values().map(|region| region.flush()))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close(&self) -> TableResult<()> {
|
||||
futures::future::try_join_all(self.regions.values().map(|region| region.close()))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ChunkStream {
|
||||
|
||||
@@ -20,7 +20,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector, VectorRef};
|
||||
use log_store::NoopLogStore;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
@@ -30,7 +30,7 @@ use storage::EngineImpl;
|
||||
use table::engine::{EngineContext, TableEngine};
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
|
||||
use table::requests::{CreateTableRequest, InsertRequest, TableOptions};
|
||||
use table::TableRef;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::config::EngineConfig;
|
||||
use crate::engine::{MitoEngine, MITO_ENGINE};
|
||||
@@ -178,3 +178,19 @@ pub async fn setup_mock_engine_and_table(
|
||||
|
||||
(mock_engine, table_engine, table, object_store, dir)
|
||||
}
|
||||
|
||||
pub async fn setup_table(table: Arc<dyn Table>) {
|
||||
let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
|
||||
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2", "host3", "host4"]));
|
||||
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
|
||||
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
|
||||
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2, 2, 1]));
|
||||
|
||||
columns_values.insert("host".to_string(), hosts.clone());
|
||||
columns_values.insert("cpu".to_string(), cpus.clone());
|
||||
columns_values.insert("memory".to_string(), memories.clone());
|
||||
columns_values.insert("ts".to_string(), tss.clone());
|
||||
|
||||
let insert_req = new_insert_request("demo".to_string(), columns_values);
|
||||
assert_eq!(4, table.insert(insert_req).await.unwrap());
|
||||
}
|
||||
|
||||
@@ -200,6 +200,10 @@ impl Region for MockRegion {
|
||||
fn disk_usage_bytes(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl MockRegionInner {
|
||||
|
||||
@@ -23,7 +23,7 @@ use datafusion::arrow::datatypes::{DataType, TimeUnit};
|
||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -37,7 +37,7 @@ use futures::Stream;
|
||||
|
||||
use crate::extension_plan::Millisecond;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct EmptyMetric {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
@@ -86,9 +86,9 @@ impl EmptyMetric {
|
||||
}
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for EmptyMetric {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for EmptyMetric {
|
||||
fn name(&self) -> &str {
|
||||
"EmptyMetric"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -111,12 +111,8 @@ impl UserDefinedLogicalNode for EmptyMetric {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[datafusion::prelude::Expr],
|
||||
_inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
Arc::new(self.clone())
|
||||
fn from_template(&self, _expr: &[Expr], _inputs: &[LogicalPlan]) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
||||
use datafusion::common::DFSchemaRef;
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -42,7 +42,7 @@ use crate::extension_plan::Millisecond;
|
||||
/// This plan will try to align the input time series, for every timestamp between
|
||||
/// `start` and `end` with step `interval`. Find in the `lookback` range if data
|
||||
/// is missing at the given timestamp.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct InstantManipulate {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
@@ -52,9 +52,9 @@ pub struct InstantManipulate {
|
||||
input: LogicalPlan,
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for InstantManipulate {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for InstantManipulate {
|
||||
fn name(&self) -> &str {
|
||||
"InstantManipulate"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -77,21 +77,17 @@ impl UserDefinedLogicalNode for InstantManipulate {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
lookback_delta: self.lookback_delta,
|
||||
interval: self.interval,
|
||||
time_index_column: self.time_index_column.clone(),
|
||||
input: inputs[0].clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ use datafusion::arrow::compute;
|
||||
use datafusion::common::{DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -43,7 +43,7 @@ use crate::extension_plan::Millisecond;
|
||||
/// - bias sample's timestamp by offset
|
||||
/// - sort the record batch based on timestamp column
|
||||
/// - remove NaN values
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct SeriesNormalize {
|
||||
offset: Millisecond,
|
||||
time_index_column_name: String,
|
||||
@@ -51,9 +51,9 @@ pub struct SeriesNormalize {
|
||||
input: LogicalPlan,
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for SeriesNormalize {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for SeriesNormalize {
|
||||
fn name(&self) -> &str {
|
||||
"SeriesNormalize"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -76,18 +76,14 @@ impl UserDefinedLogicalNode for SeriesNormalize {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[datafusion::logical_expr::Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
offset: self.offset,
|
||||
time_index_column_name: self.time_index_column_name.clone(),
|
||||
input: inputs[0].clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef};
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -42,7 +42,7 @@ use crate::range_array::RangeArray;
|
||||
///
|
||||
/// This plan will "fold" time index and value columns into [RangeArray]s, and truncate
|
||||
/// other columns to the same length with the "folded" [RangeArray] column.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct RangeManipulate {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
@@ -137,9 +137,9 @@ impl RangeManipulate {
|
||||
}
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for RangeManipulate {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||
fn name(&self) -> &str {
|
||||
"RangeManipulate"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -162,14 +162,10 @@ impl UserDefinedLogicalNode for RangeManipulate {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
interval: self.interval,
|
||||
@@ -178,7 +174,7 @@ impl UserDefinedLogicalNode for RangeManipulate {
|
||||
value_columns: self.value_columns.clone(),
|
||||
input: inputs[0].clone(),
|
||||
output_schema: self.output_schema.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
||||
use datafusion::common::DFSchemaRef;
|
||||
use datafusion::error::Result as DataFusionResult;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -33,15 +33,15 @@ use datafusion::physical_plan::{
|
||||
use datatypes::arrow::compute;
|
||||
use futures::{ready, Stream, StreamExt};
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct SeriesDivide {
|
||||
tag_columns: Vec<String>,
|
||||
input: LogicalPlan,
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for SeriesDivide {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for SeriesDivide {
|
||||
fn name(&self) -> &str {
|
||||
"SeriesDivide"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -60,17 +60,13 @@ impl UserDefinedLogicalNode for SeriesDivide {
|
||||
write!(f, "PromSeriesDivide: tags={:?}", self.tag_columns)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
tag_columns: self.tag_columns.clone(),
|
||||
input: inputs[0].clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,9 +21,6 @@ mod planner;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use catalog::CatalogListRef;
|
||||
use common_base::Plugins;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::scalars::udf::create_udf;
|
||||
@@ -36,115 +33,44 @@ use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream};
|
||||
use common_telemetry::timer;
|
||||
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion_sql::planner::{ParserOptions, SqlToRel};
|
||||
use datatypes::schema::Schema;
|
||||
use promql::planner::PromPlanner;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
|
||||
pub use crate::datafusion::planner::DfContextProviderAdapter;
|
||||
use crate::error::{
|
||||
DataFusionSnafu, PlanSqlSnafu, QueryExecutionSnafu, QueryPlanSnafu, Result, SqlSnafu,
|
||||
};
|
||||
use crate::error::{DataFusionSnafu, QueryExecutionSnafu, Result};
|
||||
use crate::executor::QueryExecutor;
|
||||
use crate::logical_optimizer::LogicalOptimizer;
|
||||
use crate::parser::QueryStatement;
|
||||
use crate::physical_optimizer::PhysicalOptimizer;
|
||||
use crate::physical_planner::PhysicalPlanner;
|
||||
use crate::plan::LogicalPlan;
|
||||
use crate::planner::{DfLogicalPlanner, LogicalPlanner};
|
||||
use crate::query_engine::{QueryEngineContext, QueryEngineState};
|
||||
use crate::{metric, QueryEngine};
|
||||
|
||||
pub struct DatafusionQueryEngine {
|
||||
state: QueryEngineState,
|
||||
state: Arc<QueryEngineState>,
|
||||
}
|
||||
|
||||
impl DatafusionQueryEngine {
|
||||
pub fn new(catalog_list: CatalogListRef, plugins: Arc<Plugins>) -> Self {
|
||||
Self {
|
||||
state: QueryEngineState::new(catalog_list.clone(), plugins),
|
||||
}
|
||||
}
|
||||
|
||||
async fn plan_sql_stmt(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<LogicalPlan> {
|
||||
let session_state = self.state.session_state();
|
||||
|
||||
let df_stmt = (&stmt).try_into().context(SqlSnafu)?;
|
||||
|
||||
let config_options = session_state.config().config_options();
|
||||
let parser_options = ParserOptions {
|
||||
enable_ident_normalization: config_options.sql_parser.enable_ident_normalization,
|
||||
parse_float_as_decimal: config_options.sql_parser.parse_float_as_decimal,
|
||||
};
|
||||
|
||||
let context_provider = DfContextProviderAdapter::try_new(
|
||||
self.state.clone(),
|
||||
session_state,
|
||||
&df_stmt,
|
||||
query_ctx,
|
||||
)
|
||||
.await?;
|
||||
let sql_to_rel = SqlToRel::new_with_options(&context_provider, parser_options);
|
||||
|
||||
let result = sql_to_rel.statement_to_plan(df_stmt).with_context(|_| {
|
||||
let sql = if let Statement::Query(query) = stmt {
|
||||
query.inner.to_string()
|
||||
} else {
|
||||
format!("{stmt:?}")
|
||||
};
|
||||
PlanSqlSnafu { sql }
|
||||
})?;
|
||||
Ok(LogicalPlan::DfPlan(result))
|
||||
}
|
||||
|
||||
// TODO(ruihang): test this method once parser is ready.
|
||||
async fn plan_promql_stmt(
|
||||
&self,
|
||||
stmt: EvalStmt,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<LogicalPlan> {
|
||||
let table_provider = DfTableSourceProvider::new(
|
||||
self.state.catalog_list().clone(),
|
||||
self.state.disallow_cross_schema_query(),
|
||||
query_ctx.as_ref(),
|
||||
);
|
||||
PromPlanner::stmt_to_plan(table_provider, stmt)
|
||||
.await
|
||||
.map(LogicalPlan::DfPlan)
|
||||
.map_err(BoxedError::new)
|
||||
.context(QueryPlanSnafu)
|
||||
pub fn new(state: Arc<QueryEngineState>) -> Self {
|
||||
Self { state }
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(LFC): Refactor consideration: extract a "Planner" that stores query context and execute queries inside.
|
||||
#[async_trait]
|
||||
impl QueryEngine for DatafusionQueryEngine {
|
||||
fn planner(&self) -> Arc<dyn LogicalPlanner> {
|
||||
Arc::new(DfLogicalPlanner::new(self.state.clone()))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"datafusion"
|
||||
}
|
||||
|
||||
async fn statement_to_plan(
|
||||
&self,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<LogicalPlan> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(stmt) => self.plan_sql_stmt(stmt, query_ctx).await,
|
||||
QueryStatement::Promql(stmt) => self.plan_promql_stmt(stmt, query_ctx).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn describe(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<Schema> {
|
||||
async fn describe(&self, plan: LogicalPlan) -> Result<Schema> {
|
||||
// TODO(sunng87): consider cache optmised logical plan between describe
|
||||
// and execute
|
||||
let plan = self.statement_to_plan(stmt, query_ctx).await?;
|
||||
let optimised_plan = self.optimize(&plan)?;
|
||||
optimised_plan.schema()
|
||||
}
|
||||
@@ -159,11 +85,6 @@ impl QueryEngine for DatafusionQueryEngine {
|
||||
Ok(Output::Stream(self.execute_stream(&ctx, &physical_plan)?))
|
||||
}
|
||||
|
||||
async fn execute_physical(&self, plan: &Arc<dyn PhysicalPlan>) -> Result<Output> {
|
||||
let ctx = QueryEngineContext::new(self.state.session_state());
|
||||
Ok(Output::Stream(self.execute_stream(&ctx, plan)?))
|
||||
}
|
||||
|
||||
fn register_udf(&self, udf: ScalarUdf) {
|
||||
self.state.register_udf(udf);
|
||||
}
|
||||
@@ -348,7 +269,8 @@ mod tests {
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -369,7 +291,8 @@ mod tests {
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.planner()
|
||||
.plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -406,11 +329,14 @@ mod tests {
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
|
||||
let schema = engine
|
||||
.describe(stmt, Arc::new(QueryContext::new()))
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = engine.describe(plan).await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
schema.column_schemas()[0],
|
||||
ColumnSchema::new(
|
||||
|
||||
@@ -37,7 +37,7 @@ use crate::error::{CatalogSnafu, DataFusionSnafu, Result};
|
||||
use crate::query_engine::QueryEngineState;
|
||||
|
||||
pub struct DfContextProviderAdapter {
|
||||
engine_state: QueryEngineState,
|
||||
engine_state: Arc<QueryEngineState>,
|
||||
session_state: SessionState,
|
||||
tables: HashMap<String, Arc<dyn TableSource>>,
|
||||
table_provider: DfTableSourceProvider,
|
||||
@@ -45,7 +45,7 @@ pub struct DfContextProviderAdapter {
|
||||
|
||||
impl DfContextProviderAdapter {
|
||||
pub(crate) async fn try_new(
|
||||
engine_state: QueryEngineState,
|
||||
engine_state: Arc<QueryEngineState>,
|
||||
session_state: SessionState,
|
||||
df_stmt: &DfStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
|
||||
@@ -157,7 +157,7 @@ mod test {
|
||||
distinct: false, \
|
||||
top: None, \
|
||||
projection: \
|
||||
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None })], \
|
||||
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None, opt_replace: None })], \
|
||||
into: None, \
|
||||
from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: \"t1\", quote_style: None }]\
|
||||
), \
|
||||
|
||||
@@ -12,12 +12,94 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use common_error::prelude::BoxedError;
|
||||
use datafusion::execution::context::SessionState;
|
||||
use datafusion_sql::planner::{ParserOptions, SqlToRel};
|
||||
use promql::planner::PromPlanner;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error::{PlanSqlSnafu, QueryPlanSnafu, Result, SqlSnafu};
|
||||
use crate::parser::QueryStatement;
|
||||
use crate::plan::LogicalPlan;
|
||||
use crate::query_engine::QueryEngineState;
|
||||
use crate::DfContextProviderAdapter;
|
||||
|
||||
/// SQL logical planner.
|
||||
pub trait Planner: Send + Sync {
|
||||
fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan>;
|
||||
#[async_trait]
|
||||
pub trait LogicalPlanner: Send + Sync {
|
||||
async fn plan(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<LogicalPlan>;
|
||||
}
|
||||
|
||||
pub struct DfLogicalPlanner {
|
||||
engine_state: Arc<QueryEngineState>,
|
||||
session_state: SessionState,
|
||||
}
|
||||
|
||||
impl DfLogicalPlanner {
|
||||
pub fn new(engine_state: Arc<QueryEngineState>) -> Self {
|
||||
let session_state = engine_state.session_state();
|
||||
Self {
|
||||
engine_state,
|
||||
session_state,
|
||||
}
|
||||
}
|
||||
|
||||
async fn plan_sql(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<LogicalPlan> {
|
||||
let df_stmt = (&stmt).try_into().context(SqlSnafu)?;
|
||||
|
||||
let context_provider = DfContextProviderAdapter::try_new(
|
||||
self.engine_state.clone(),
|
||||
self.session_state.clone(),
|
||||
&df_stmt,
|
||||
query_ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let config_options = self.session_state.config().config_options();
|
||||
let parser_options = ParserOptions {
|
||||
enable_ident_normalization: config_options.sql_parser.enable_ident_normalization,
|
||||
parse_float_as_decimal: config_options.sql_parser.parse_float_as_decimal,
|
||||
};
|
||||
|
||||
let sql_to_rel = SqlToRel::new_with_options(&context_provider, parser_options);
|
||||
|
||||
let result = sql_to_rel.statement_to_plan(df_stmt).with_context(|_| {
|
||||
let sql = if let Statement::Query(query) = stmt {
|
||||
query.inner.to_string()
|
||||
} else {
|
||||
format!("{stmt:?}")
|
||||
};
|
||||
PlanSqlSnafu { sql }
|
||||
})?;
|
||||
Ok(LogicalPlan::DfPlan(result))
|
||||
}
|
||||
|
||||
async fn plan_pql(&self, stmt: EvalStmt, query_ctx: QueryContextRef) -> Result<LogicalPlan> {
|
||||
let table_provider = DfTableSourceProvider::new(
|
||||
self.engine_state.catalog_list().clone(),
|
||||
self.engine_state.disallow_cross_schema_query(),
|
||||
query_ctx.as_ref(),
|
||||
);
|
||||
PromPlanner::stmt_to_plan(table_provider, stmt)
|
||||
.await
|
||||
.map(LogicalPlan::DfPlan)
|
||||
.map_err(BoxedError::new)
|
||||
.context(QueryPlanSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LogicalPlanner for DfLogicalPlanner {
|
||||
async fn plan(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<LogicalPlan> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(stmt) => self.plan_sql(stmt, query_ctx).await,
|
||||
QueryStatement::Promql(stmt) => self.plan_pql(stmt, query_ctx).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ use catalog::CatalogListRef;
|
||||
use common_base::Plugins;
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::scalars::{FunctionRef, FUNCTION_REGISTRY};
|
||||
use common_query::physical_plan::PhysicalPlan;
|
||||
use common_query::prelude::ScalarUdf;
|
||||
use common_query::Output;
|
||||
use datatypes::schema::Schema;
|
||||
@@ -33,25 +32,32 @@ use crate::datafusion::DatafusionQueryEngine;
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryStatement;
|
||||
use crate::plan::LogicalPlan;
|
||||
use crate::planner::LogicalPlanner;
|
||||
pub use crate::query_engine::context::QueryEngineContext;
|
||||
pub use crate::query_engine::state::QueryEngineState;
|
||||
|
||||
#[async_trait]
|
||||
pub trait QueryEngine: Send + Sync {
|
||||
fn name(&self) -> &str;
|
||||
pub type StatementHandlerRef = Arc<dyn StatementHandler>;
|
||||
|
||||
async fn statement_to_plan(
|
||||
// TODO(LFC): Gradually make more statements executed in the form of logical plan, and remove this trait. Tracked in #1010.
|
||||
#[async_trait]
|
||||
pub trait StatementHandler: Send + Sync {
|
||||
async fn handle_statement(
|
||||
&self,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<LogicalPlan>;
|
||||
) -> Result<Output>;
|
||||
}
|
||||
|
||||
async fn describe(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<Schema>;
|
||||
#[async_trait]
|
||||
pub trait QueryEngine: Send + Sync {
|
||||
fn planner(&self) -> Arc<dyn LogicalPlanner>;
|
||||
|
||||
fn name(&self) -> &str;
|
||||
|
||||
async fn describe(&self, plan: LogicalPlan) -> Result<Schema>;
|
||||
|
||||
async fn execute(&self, plan: &LogicalPlan) -> Result<Output>;
|
||||
|
||||
async fn execute_physical(&self, plan: &Arc<dyn PhysicalPlan>) -> Result<Output>;
|
||||
|
||||
fn register_udf(&self, udf: ScalarUdf);
|
||||
|
||||
fn register_aggregate_function(&self, func: AggregateFunctionMetaRef);
|
||||
@@ -65,13 +71,12 @@ pub struct QueryEngineFactory {
|
||||
|
||||
impl QueryEngineFactory {
|
||||
pub fn new(catalog_list: CatalogListRef) -> Self {
|
||||
let query_engine = Arc::new(DatafusionQueryEngine::new(catalog_list, Default::default()));
|
||||
register_functions(&query_engine);
|
||||
Self { query_engine }
|
||||
Self::new_with_plugins(catalog_list, Default::default())
|
||||
}
|
||||
|
||||
pub fn new_with_plugins(catalog_list: CatalogListRef, plugins: Arc<Plugins>) -> Self {
|
||||
let query_engine = Arc::new(DatafusionQueryEngine::new(catalog_list, plugins));
|
||||
let state = Arc::new(QueryEngineState::new(catalog_list, plugins));
|
||||
let query_engine = Arc::new(DatafusionQueryEngine::new(state));
|
||||
register_functions(&query_engine);
|
||||
Self { query_engine }
|
||||
}
|
||||
|
||||
@@ -24,14 +24,10 @@ use datatypes::vectors::{Helper, StringVector};
|
||||
use once_cell::sync::Lazy;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::show::{ShowDatabases, ShowKind, ShowTables};
|
||||
use sql::statements::statement::Statement;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::parser::QueryStatement;
|
||||
use crate::QueryEngineRef;
|
||||
|
||||
const SCHEMAS_COLUMN: &str = "Schemas";
|
||||
const TABLES_COLUMN: &str = "Tables";
|
||||
@@ -156,17 +152,6 @@ pub fn show_tables(
|
||||
Ok(Output::RecordBatches(records))
|
||||
}
|
||||
|
||||
pub async fn explain(
|
||||
stmt: Box<Explain>,
|
||||
query_engine: QueryEngineRef,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let plan = query_engine
|
||||
.statement_to_plan(QueryStatement::Sql(Statement::Explain(*stmt)), query_ctx)
|
||||
.await?;
|
||||
query_engine.execute(&plan).await
|
||||
}
|
||||
|
||||
pub fn describe_table(table: TableRef) -> Result<Output> {
|
||||
let table_info = table.table_info();
|
||||
let columns_schemas = table_info.meta.schema.column_schemas();
|
||||
|
||||
@@ -12,6 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use session::context::QueryContext;
|
||||
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::QueryEngineRef;
|
||||
|
||||
mod argmax_test;
|
||||
mod argmin_test;
|
||||
mod mean_test;
|
||||
@@ -25,3 +32,17 @@ mod time_range_filter_test;
|
||||
|
||||
mod function;
|
||||
mod pow;
|
||||
|
||||
async fn exec_selection(engine: QueryEngineRef, sql: &str) -> Vec<RecordBatch> {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let Output::Stream(stream) = engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.unwrap() else { unreachable!() };
|
||||
util::collect(stream).await.unwrap()
|
||||
}
|
||||
|
||||
@@ -14,17 +14,12 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::types::WrapperType;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::QueryEngine;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -52,9 +47,8 @@ async fn test_argmax_success<T>(
|
||||
where
|
||||
T: WrapperType + PartialOrd,
|
||||
{
|
||||
let result = execute_argmax(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!("select ARGMAX({column_name}) as argmax from {table_name}");
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("argmax", result);
|
||||
|
||||
let numbers =
|
||||
@@ -77,23 +71,3 @@ where
|
||||
assert_eq!(value, expected_value);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_argmax<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!("select ARGMAX({column_name}) as argmax from {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
@@ -14,17 +14,12 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::types::WrapperType;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::QueryEngine;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -52,9 +47,8 @@ async fn test_argmin_success<T>(
|
||||
where
|
||||
T: WrapperType + PartialOrd,
|
||||
{
|
||||
let result = execute_argmin(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!("select argmin({column_name}) as argmin from {table_name}");
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("argmin", result);
|
||||
|
||||
let numbers =
|
||||
@@ -77,23 +71,3 @@ where
|
||||
assert_eq!(value, expected_value);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_argmin<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!("select argmin({column_name}) as argmin from {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
@@ -17,18 +17,16 @@ use std::sync::Arc;
|
||||
use catalog::local::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::types::WrapperType;
|
||||
use datatypes::vectors::Helper;
|
||||
use rand::Rng;
|
||||
use session::context::QueryContext;
|
||||
use table::test_util::MemTable;
|
||||
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::exec_selection;
|
||||
use crate::{QueryEngine, QueryEngineFactory};
|
||||
|
||||
pub fn create_query_engine() -> Arc<dyn QueryEngine> {
|
||||
@@ -81,18 +79,7 @@ where
|
||||
T: WrapperType,
|
||||
{
|
||||
let sql = format!("SELECT {column_name} FROM {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let numbers = util::collect(recordbatch_stream).await.unwrap();
|
||||
let numbers = exec_selection(engine, &sql).await;
|
||||
|
||||
let column = numbers[0].column(0);
|
||||
let column: &<T as Scalar>::VectorType = unsafe { Helper::static_cast(column) };
|
||||
|
||||
@@ -14,20 +14,15 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::types::WrapperType;
|
||||
use datatypes::value::OrderedFloat;
|
||||
use format_num::NumberFormat;
|
||||
use num_traits::AsPrimitive;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::QueryEngine;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -55,9 +50,8 @@ async fn test_mean_success<T>(
|
||||
where
|
||||
T: WrapperType + AsPrimitive<f64>,
|
||||
{
|
||||
let result = execute_mean(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!("select MEAN({column_name}) as mean from {table_name}");
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("mean", result);
|
||||
|
||||
let numbers =
|
||||
@@ -73,23 +67,3 @@ where
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_mean<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!("select MEAN({column_name}) as mean from {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
@@ -24,19 +24,17 @@ use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Result as QueryResult};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::types::{LogicalPrimitiveType, WrapperType};
|
||||
use datatypes::vectors::Helper;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num_traits::AsPrimitive;
|
||||
use session::context::QueryContext;
|
||||
use table::test_util::MemTable;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::exec_selection;
|
||||
use crate::QueryEngineFactory;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -220,18 +218,8 @@ where
|
||||
)));
|
||||
|
||||
let sql = format!("select MY_SUM({column_name}) as my_sum from {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await?;
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let batches = util::collect_batches(recordbatch_stream).await.unwrap();
|
||||
let batches = exec_selection(engine, &sql).await;
|
||||
let batches = RecordBatches::try_new(batches.first().unwrap().schema.clone(), batches).unwrap();
|
||||
|
||||
let pretty_print = batches.pretty_print().unwrap();
|
||||
assert_eq!(expected, pretty_print);
|
||||
|
||||
@@ -17,21 +17,17 @@ use std::sync::Arc;
|
||||
use catalog::local::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::Int32Vector;
|
||||
use function::{create_query_engine, get_numbers_from_table};
|
||||
use num_traits::AsPrimitive;
|
||||
use session::context::QueryContext;
|
||||
use table::test_util::MemTable;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::{QueryEngine, QueryEngineFactory};
|
||||
|
||||
#[tokio::test]
|
||||
@@ -55,18 +51,7 @@ async fn test_percentile_aggregator() -> Result<()> {
|
||||
async fn test_percentile_correctness() -> Result<()> {
|
||||
let engine = create_correctness_engine();
|
||||
let sql = String::from("select PERCENTILE(corr_number,88.0) as percentile from corr_numbers");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let record_batch = util::collect(recordbatch_stream).await.unwrap();
|
||||
let record_batch = exec_selection(engine, &sql).await;
|
||||
let column = record_batch[0].column(0);
|
||||
let value = column.get(0);
|
||||
assert_eq!(value, Value::from(9.280_000_000_000_001_f64));
|
||||
@@ -81,9 +66,8 @@ async fn test_percentile_success<T>(
|
||||
where
|
||||
T: WrapperType + AsPrimitive<f64>,
|
||||
{
|
||||
let result = execute_percentile(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!("select PERCENTILE({column_name},50.0) as percentile from {table_name}");
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("percentile", result);
|
||||
|
||||
let numbers = get_numbers_from_table::<T>(column_name, table_name, engine.clone()).await;
|
||||
@@ -95,26 +79,6 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_percentile<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!("select PERCENTILE({column_name},50.0) as percentile from {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
fn create_correctness_engine() -> Arc<dyn QueryEngine> {
|
||||
// create engine
|
||||
let schema_provider = Arc::new(MemorySchemaProvider::new());
|
||||
|
||||
@@ -14,18 +14,13 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::types::WrapperType;
|
||||
use num_traits::AsPrimitive;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::QueryEngine;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -57,9 +52,8 @@ where
|
||||
PolyT::Native: std::ops::Mul<Output = PolyT::Native> + std::iter::Sum,
|
||||
i64: AsPrimitive<PolyT::Native>,
|
||||
{
|
||||
let result = execute_polyval(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!("select POLYVAL({column_name}, 0) as polyval from {table_name}");
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("polyval", result);
|
||||
|
||||
let numbers =
|
||||
@@ -74,23 +68,3 @@ where
|
||||
assert_eq!(value, PolyT::from_native(expected_native).into());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_polyval<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!("select POLYVAL({column_name}, 0) as polyval from {table_name}");
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ use crate::parser::QueryLanguageParser;
|
||||
use crate::plan::LogicalPlan;
|
||||
use crate::query_engine::options::QueryOptions;
|
||||
use crate::query_engine::QueryEngineFactory;
|
||||
use crate::tests::exec_selection;
|
||||
use crate::tests::pow::pow;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -138,13 +139,15 @@ async fn test_query_validate() -> Result<()> {
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql("select number from public.numbers").unwrap();
|
||||
assert!(engine
|
||||
.statement_to_plan(stmt, QueryContext::arc())
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql("select number from wrongschema.numbers").unwrap();
|
||||
assert!(engine
|
||||
.statement_to_plan(stmt, QueryContext::arc())
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.is_err());
|
||||
Ok(())
|
||||
@@ -174,21 +177,8 @@ async fn test_udf() -> Result<()> {
|
||||
|
||||
engine.register_udf(udf);
|
||||
|
||||
let stmt =
|
||||
QueryLanguageParser::parse_sql("select my_pow(number, number) as p from numbers limit 10")
|
||||
.unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await?;
|
||||
let recordbatch = match output {
|
||||
Output::Stream(recordbatch) => recordbatch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let numbers = util::collect(recordbatch).await.unwrap();
|
||||
let sql = "select my_pow(number, number) as p from numbers limit 10";
|
||||
let numbers = exec_selection(engine, sql).await;
|
||||
assert_eq!(1, numbers.len());
|
||||
assert_eq!(numbers[0].num_columns(), 1);
|
||||
assert_eq!(1, numbers[0].schema.num_columns());
|
||||
|
||||
@@ -14,19 +14,14 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::types::WrapperType;
|
||||
use num_traits::AsPrimitive;
|
||||
use session::context::QueryContext;
|
||||
use statrs::distribution::{ContinuousCDF, Normal};
|
||||
use statrs::statistics::Statistics;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::QueryEngine;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -54,9 +49,10 @@ async fn test_scipy_stats_norm_cdf_success<T>(
|
||||
where
|
||||
T: WrapperType + AsPrimitive<f64>,
|
||||
{
|
||||
let result = execute_scipy_stats_norm_cdf(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!(
|
||||
"select SCIPYSTATSNORMCDF({column_name},2.0) as scipy_stats_norm_cdf from {table_name}",
|
||||
);
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("scipy_stats_norm_cdf", result);
|
||||
|
||||
let numbers =
|
||||
@@ -71,25 +67,3 @@ where
|
||||
assert_eq!(value, expected_value.into());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_scipy_stats_norm_cdf<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!(
|
||||
"select SCIPYSTATSNORMCDF({column_name},2.0) as scipy_stats_norm_cdf from {table_name}",
|
||||
);
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
@@ -14,19 +14,14 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::Result as RecordResult;
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use datatypes::for_all_primitive_types;
|
||||
use datatypes::types::WrapperType;
|
||||
use num_traits::AsPrimitive;
|
||||
use session::context::QueryContext;
|
||||
use statrs::distribution::{Continuous, Normal};
|
||||
use statrs::statistics::Statistics;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::function;
|
||||
use crate::tests::{exec_selection, function};
|
||||
use crate::QueryEngine;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -54,9 +49,10 @@ async fn test_scipy_stats_norm_pdf_success<T>(
|
||||
where
|
||||
T: WrapperType + AsPrimitive<f64>,
|
||||
{
|
||||
let result = execute_scipy_stats_norm_pdf(column_name, table_name, engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let sql = format!(
|
||||
"select SCIPYSTATSNORMPDF({column_name},2.0) as scipy_stats_norm_pdf from {table_name}"
|
||||
);
|
||||
let result = exec_selection(engine.clone(), &sql).await;
|
||||
let value = function::get_value_from_batches("scipy_stats_norm_pdf", result);
|
||||
|
||||
let numbers =
|
||||
@@ -71,25 +67,3 @@ where
|
||||
assert_eq!(value, expected_value.into());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_scipy_stats_norm_pdf<'a>(
|
||||
column_name: &'a str,
|
||||
table_name: &'a str,
|
||||
engine: Arc<dyn QueryEngine>,
|
||||
) -> RecordResult<Vec<RecordBatch>> {
|
||||
let sql = format!(
|
||||
"select SCIPYSTATSNORMPDF({column_name},2.0) as scipy_stats_norm_pdf from {table_name}"
|
||||
);
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let recordbatch_stream = match output {
|
||||
Output::Stream(batch) => batch,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
util::collect(recordbatch_stream).await
|
||||
}
|
||||
|
||||
@@ -26,14 +26,13 @@ use common_time::Timestamp;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::{FilterPushDownType, TableInfoRef};
|
||||
use table::predicate::TimeRangePredicateBuilder;
|
||||
use table::test_util::MemTable;
|
||||
use table::Table;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::parser::QueryLanguageParser;
|
||||
use crate::tests::exec_selection;
|
||||
use crate::{QueryEngineFactory, QueryEngineRef};
|
||||
|
||||
struct MemTableWrapper {
|
||||
@@ -71,8 +70,11 @@ impl Table for MemTableWrapper {
|
||||
self.inner.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Exact)
|
||||
fn supports_filters_pushdown(
|
||||
&self,
|
||||
filters: &[&Expr],
|
||||
) -> table::Result<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Exact; filters.len()])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,18 +130,7 @@ struct TimeRangeTester {
|
||||
|
||||
impl TimeRangeTester {
|
||||
async fn check(&self, sql: &str, expect: TimestampRange) {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let _ = self
|
||||
.engine
|
||||
.execute(
|
||||
&self
|
||||
.engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.await
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let _ = exec_selection(self.engine.clone(), sql).await;
|
||||
let filters = self.table.get_filters().await;
|
||||
|
||||
let range = TimeRangePredicateBuilder::new("ts", &filters).build();
|
||||
|
||||
@@ -6,7 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
pyo3_backend = ["pyo3"]
|
||||
pyo3_backend = ["dep:pyo3", "arrow/pyarrow"]
|
||||
python = [
|
||||
"dep:datafusion",
|
||||
"dep:datafusion-common",
|
||||
@@ -60,7 +60,7 @@ rustpython-vm = { git = "https://github.com/discord9/RustPython", optional = tru
|
||||
"default",
|
||||
"codegen",
|
||||
] }
|
||||
pyo3 = { version = "0.18", optional = true }
|
||||
pyo3 = { version = "0.18", optional = true, features = ["abi3", "abi3-py37"] }
|
||||
session = { path = "../session" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sql = { path = "../sql" }
|
||||
|
||||
@@ -279,7 +279,8 @@ impl Script for PyScript {
|
||||
);
|
||||
let plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await?;
|
||||
let res = self.query_engine.execute(&plan).await?;
|
||||
let copr = self.copr.clone();
|
||||
|
||||
@@ -367,7 +367,8 @@ impl PyQueryEngine {
|
||||
let handle = rt.handle().clone();
|
||||
let res = handle.block_on(async {
|
||||
let plan = engine
|
||||
.statement_to_plan(stmt, Default::default())
|
||||
.planner()
|
||||
.plan(stmt, Default::default())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
let res = engine
|
||||
|
||||
@@ -160,7 +160,8 @@ impl ScriptsTable {
|
||||
|
||||
let plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -263,8 +263,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: common_mem_prof::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid prepare statement: {}", err_msg))]
|
||||
InvalidPrepareStatement { err_msg: String },
|
||||
|
||||
#[snafu(display("Invalid flush argument: {}", err_msg))]
|
||||
InvalidFlushArgument { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -327,6 +331,7 @@ impl ErrorExt for Error {
|
||||
DatabaseNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
#[cfg(feature = "mem-prof")]
|
||||
DumpProfileData { source, .. } => source.status_code(),
|
||||
InvalidFlushArgument { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod database;
|
||||
pub mod flight;
|
||||
pub mod handler;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_server::{GreptimeDatabase, GreptimeDatabaseServer};
|
||||
use arrow_flight::flight_service_server::{FlightService, FlightServiceServer};
|
||||
use async_trait::async_trait;
|
||||
use common_runtime::Runtime;
|
||||
@@ -27,18 +30,21 @@ use tokio::net::TcpListener;
|
||||
use tokio::sync::oneshot::{self, Sender};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_stream::wrappers::TcpListenerStream;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::auth::UserProviderRef;
|
||||
use crate::error::{AlreadyStartedSnafu, Result, StartGrpcSnafu, TcpBindSnafu};
|
||||
use crate::grpc::database::DatabaseService;
|
||||
use crate::grpc::flight::FlightHandler;
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::server::Server;
|
||||
|
||||
type TonicResult<T> = std::result::Result<T, Status>;
|
||||
|
||||
pub struct GrpcServer {
|
||||
query_handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
shutdown_tx: Mutex<Option<Sender<()>>>,
|
||||
runtime: Arc<Runtime>,
|
||||
request_handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl GrpcServer {
|
||||
@@ -47,24 +53,28 @@ impl GrpcServer {
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
let request_handler = Arc::new(GreptimeRequestHandler::new(
|
||||
query_handler,
|
||||
user_provider,
|
||||
shutdown_tx: Mutex::new(None),
|
||||
runtime,
|
||||
));
|
||||
Self {
|
||||
shutdown_tx: Mutex::new(None),
|
||||
request_handler,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
let service = FlightHandler::new(
|
||||
self.query_handler.clone(),
|
||||
self.user_provider.clone(),
|
||||
self.runtime.clone(),
|
||||
);
|
||||
FlightServiceServer::new(service)
|
||||
pub fn create_flight_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
FlightServiceServer::new(FlightHandler::new(self.request_handler.clone()))
|
||||
}
|
||||
|
||||
pub fn create_database_service(&self) -> GreptimeDatabaseServer<impl GreptimeDatabase> {
|
||||
GreptimeDatabaseServer::new(DatabaseService::new(self.request_handler.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
pub const GRPC_SERVER: &str = "GRPC_SERVER";
|
||||
|
||||
#[async_trait]
|
||||
impl Server for GrpcServer {
|
||||
async fn shutdown(&self) -> Result<()> {
|
||||
@@ -101,11 +111,16 @@ impl Server for GrpcServer {
|
||||
|
||||
// Would block to serve requests.
|
||||
tonic::transport::Server::builder()
|
||||
.add_service(self.create_service())
|
||||
.add_service(self.create_flight_service())
|
||||
.add_service(self.create_database_service())
|
||||
.serve_with_incoming_shutdown(TcpListenerStream::new(listener), rx.map(drop))
|
||||
.await
|
||||
.context(StartGrpcSnafu)?;
|
||||
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
GRPC_SERVER
|
||||
}
|
||||
}
|
||||
|
||||
57
src/servers/src/grpc/database.rs
Normal file
57
src/servers/src/grpc/database.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_server::GreptimeDatabase;
|
||||
use api::v1::{greptime_response, AffectedRows, GreptimeRequest, GreptimeResponse};
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::grpc::TonicResult;
|
||||
|
||||
pub(crate) struct DatabaseService {
|
||||
handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl DatabaseService {
|
||||
pub(crate) fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GreptimeDatabase for DatabaseService {
|
||||
async fn handle(
|
||||
&self,
|
||||
request: Request<GreptimeRequest>,
|
||||
) -> TonicResult<Response<GreptimeResponse>> {
|
||||
let request = request.into_inner();
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
let response = match output {
|
||||
Output::AffectedRows(rows) => GreptimeResponse {
|
||||
header: None,
|
||||
response: Some(greptime_response::Response::AffectedRows(AffectedRows {
|
||||
value: rows as _,
|
||||
})),
|
||||
},
|
||||
Output::Stream(_) | Output::RecordBatches(_) => {
|
||||
return Err(Status::unimplemented("GreptimeDatabase::handle for query"));
|
||||
}
|
||||
};
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,7 @@ mod stream;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{Basic, GreptimeRequest, RequestHeader};
|
||||
use api::v1::GreptimeRequest;
|
||||
use arrow_flight::flight_service_server::FlightService;
|
||||
use arrow_flight::{
|
||||
Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo,
|
||||
@@ -27,40 +26,25 @@ use arrow_flight::{
|
||||
use async_trait::async_trait;
|
||||
use common_grpc::flight::{FlightEncoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use futures::Stream;
|
||||
use prost::Message;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
|
||||
use crate::auth::{Identity, UserProviderRef};
|
||||
use crate::error;
|
||||
use crate::error::Error::Auth;
|
||||
use crate::error::{NotFoundAuthHeaderSnafu, UnsupportedAuthSchemeSnafu};
|
||||
use crate::grpc::flight::stream::FlightRecordBatchStream;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::grpc::TonicResult;
|
||||
|
||||
type TonicResult<T> = Result<T, Status>;
|
||||
type TonicStream<T> = Pin<Box<dyn Stream<Item = TonicResult<T>> + Send + Sync + 'static>>;
|
||||
|
||||
pub struct FlightHandler {
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl FlightHandler {
|
||||
pub fn new(
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handler,
|
||||
user_provider,
|
||||
runtime,
|
||||
}
|
||||
pub fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,40 +89,8 @@ impl FlightService for FlightHandler {
|
||||
let request =
|
||||
GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
|
||||
|
||||
let query = request.request.context(error::InvalidQuerySnafu {
|
||||
reason: "Expecting non-empty GreptimeRequest.",
|
||||
})?;
|
||||
let query_ctx = create_query_context(request.header.as_ref());
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
|
||||
auth(
|
||||
self.user_provider.as_ref(),
|
||||
request.header.as_ref(),
|
||||
&query_ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let handler = self.handler.clone();
|
||||
|
||||
// Executes requests in another runtime to
|
||||
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
|
||||
// - Refer to our blog for the rational behind it:
|
||||
// https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
|
||||
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
|
||||
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
|
||||
// 2. avoid the handler blocks the gRPC runtime incidentally.
|
||||
let handle = self
|
||||
.runtime
|
||||
.spawn(async move { handler.do_query(query, query_ctx).await });
|
||||
|
||||
let output = handle.await.map_err(|e| {
|
||||
if e.is_cancelled() {
|
||||
Status::cancelled(e.to_string())
|
||||
} else if e.is_panic() {
|
||||
Status::internal(format!("{:?}", e.into_panic()))
|
||||
} else {
|
||||
Status::unknown(e.to_string())
|
||||
}
|
||||
})??;
|
||||
let stream = to_flight_data_stream(output);
|
||||
Ok(Response::new(stream))
|
||||
}
|
||||
@@ -195,56 +147,3 @@ fn to_flight_data_stream(output: Output) -> TonicStream<FlightData> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
|
||||
let ctx = QueryContext::arc();
|
||||
if let Some(header) = header {
|
||||
if !header.catalog.is_empty() {
|
||||
ctx.set_current_catalog(&header.catalog);
|
||||
}
|
||||
|
||||
if !header.schema.is_empty() {
|
||||
ctx.set_current_schema(&header.schema);
|
||||
}
|
||||
};
|
||||
ctx
|
||||
}
|
||||
|
||||
async fn auth(
|
||||
user_provider: Option<&UserProviderRef>,
|
||||
request_header: Option<&RequestHeader>,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> TonicResult<()> {
|
||||
let Some(user_provider) = user_provider else { return Ok(()) };
|
||||
|
||||
let user_info = match request_header
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
.clone()
|
||||
.authorization
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
.auth_scheme
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
{
|
||||
AuthScheme::Basic(Basic { username, password }) => user_provider
|
||||
.authenticate(
|
||||
Identity::UserId(&username, None),
|
||||
crate::auth::Password::PlainText(&password),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Auth { source: e }),
|
||||
AuthScheme::Token(_) => UnsupportedAuthSchemeSnafu {
|
||||
name: "Token AuthScheme",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
.map_err(|e| Status::unauthenticated(e.to_string()))?;
|
||||
|
||||
user_provider
|
||||
.authorize(
|
||||
&query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
&user_info,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::permission_denied(e.to_string()))
|
||||
}
|
||||
|
||||
137
src/servers/src/grpc/handler.rs
Normal file
137
src/servers/src/grpc/handler.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{Basic, GreptimeRequest, RequestHeader};
|
||||
use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::OptionExt;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::auth::{Identity, Password, UserProviderRef};
|
||||
use crate::error::Error::{Auth, UnsupportedAuthScheme};
|
||||
use crate::error::{InvalidQuerySnafu, NotFoundAuthHeaderSnafu};
|
||||
use crate::grpc::TonicResult;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
|
||||
pub struct GreptimeRequestHandler {
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
}
|
||||
|
||||
impl GreptimeRequestHandler {
|
||||
pub fn new(
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handler,
|
||||
user_provider,
|
||||
runtime,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_request(&self, request: GreptimeRequest) -> TonicResult<Output> {
|
||||
let query = request.request.context(InvalidQuerySnafu {
|
||||
reason: "Expecting non-empty GreptimeRequest.",
|
||||
})?;
|
||||
|
||||
let header = request.header.as_ref();
|
||||
let query_ctx = create_query_context(header);
|
||||
|
||||
self.auth(header, &query_ctx).await?;
|
||||
|
||||
let handler = self.handler.clone();
|
||||
|
||||
// Executes requests in another runtime to
|
||||
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
|
||||
// - Refer to our blog for the rational behind it:
|
||||
// https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
|
||||
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
|
||||
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
|
||||
// 2. avoid the handler blocks the gRPC runtime incidentally.
|
||||
let handle = self
|
||||
.runtime
|
||||
.spawn(async move { handler.do_query(query, query_ctx).await });
|
||||
|
||||
let output = handle.await.map_err(|e| {
|
||||
if e.is_cancelled() {
|
||||
Status::cancelled(e.to_string())
|
||||
} else if e.is_panic() {
|
||||
Status::internal(format!("{:?}", e.into_panic()))
|
||||
} else {
|
||||
Status::unknown(e.to_string())
|
||||
}
|
||||
})??;
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
async fn auth(
|
||||
&self,
|
||||
header: Option<&RequestHeader>,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> TonicResult<()> {
|
||||
let Some(user_provider) = self.user_provider.as_ref() else { return Ok(()) };
|
||||
|
||||
let auth_scheme = header
|
||||
.and_then(|header| {
|
||||
header
|
||||
.authorization
|
||||
.as_ref()
|
||||
.and_then(|x| x.auth_scheme.clone())
|
||||
})
|
||||
.context(NotFoundAuthHeaderSnafu)?;
|
||||
|
||||
let user_info = match auth_scheme {
|
||||
AuthScheme::Basic(Basic { username, password }) => user_provider
|
||||
.authenticate(
|
||||
Identity::UserId(&username, None),
|
||||
Password::PlainText(&password),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Auth { source: e }),
|
||||
AuthScheme::Token(_) => Err(UnsupportedAuthScheme {
|
||||
name: "Token AuthScheme".to_string(),
|
||||
}),
|
||||
}
|
||||
.map_err(|e| Status::unauthenticated(e.to_string()))?;
|
||||
|
||||
user_provider
|
||||
.authorize(
|
||||
&query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
&user_info,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::permission_denied(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
|
||||
let ctx = QueryContext::arc();
|
||||
if let Some(header) = header {
|
||||
if !header.catalog.is_empty() {
|
||||
ctx.set_current_catalog(&header.catalog);
|
||||
}
|
||||
if !header.schema.is_empty() {
|
||||
ctx.set_current_schema(&header.schema);
|
||||
}
|
||||
};
|
||||
ctx
|
||||
}
|
||||
@@ -19,6 +19,7 @@ pub mod opentsdb;
|
||||
pub mod prometheus;
|
||||
pub mod script;
|
||||
|
||||
mod admin;
|
||||
#[cfg(feature = "mem-prof")]
|
||||
pub mod mem_prof;
|
||||
|
||||
@@ -56,6 +57,8 @@ use self::authorize::HttpAuth;
|
||||
use self::influxdb::{influxdb_health, influxdb_ping, influxdb_write};
|
||||
use crate::auth::UserProviderRef;
|
||||
use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu};
|
||||
use crate::http::admin::flush;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
|
||||
use crate::query_handler::{
|
||||
InfluxdbLineProtocolHandlerRef, OpentsdbProtocolHandlerRef, PrometheusProtocolHandlerRef,
|
||||
@@ -96,6 +99,7 @@ pub static PUBLIC_APIS: [&str; 2] = ["/v1/influxdb/ping", "/v1/influxdb/health"]
|
||||
|
||||
pub struct HttpServer {
|
||||
sql_handler: ServerSqlQueryHandlerRef,
|
||||
grpc_handler: ServerGrpcQueryHandlerRef,
|
||||
options: HttpOptions,
|
||||
influxdb_handler: Option<InfluxdbLineProtocolHandlerRef>,
|
||||
opentsdb_handler: Option<OpentsdbProtocolHandlerRef>,
|
||||
@@ -349,9 +353,14 @@ pub struct ApiState {
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
pub fn new(sql_handler: ServerSqlQueryHandlerRef, options: HttpOptions) -> Self {
|
||||
pub fn new(
|
||||
sql_handler: ServerSqlQueryHandlerRef,
|
||||
grpc_handler: ServerGrpcQueryHandlerRef,
|
||||
options: HttpOptions,
|
||||
) -> Self {
|
||||
Self {
|
||||
sql_handler,
|
||||
grpc_handler,
|
||||
options,
|
||||
opentsdb_handler: None,
|
||||
influxdb_handler: None,
|
||||
@@ -426,6 +435,10 @@ impl HttpServer {
|
||||
.layer(Extension(api));
|
||||
|
||||
let mut router = Router::new().nest(&format!("/{HTTP_API_VERSION}"), sql_router);
|
||||
router = router.nest(
|
||||
&format!("/{HTTP_API_VERSION}/admin"),
|
||||
self.route_admin(self.grpc_handler.clone()),
|
||||
);
|
||||
|
||||
if let Some(opentsdb_handler) = self.opentsdb_handler.clone() {
|
||||
router = router.nest(
|
||||
@@ -517,8 +530,16 @@ impl HttpServer {
|
||||
.route("/api/put", routing::post(opentsdb::put))
|
||||
.with_state(opentsdb_handler)
|
||||
}
|
||||
|
||||
fn route_admin<S>(&self, grpc_handler: ServerGrpcQueryHandlerRef) -> Router<S> {
|
||||
Router::new()
|
||||
.route("/flush", routing::post(flush))
|
||||
.with_state(grpc_handler)
|
||||
}
|
||||
}
|
||||
|
||||
pub const HTTP_SERVER: &str = "HTTP_SERVER";
|
||||
|
||||
#[async_trait]
|
||||
impl Server for HttpServer {
|
||||
async fn shutdown(&self) -> Result<()> {
|
||||
@@ -557,6 +578,10 @@ impl Server for HttpServer {
|
||||
|
||||
Ok(listening)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
HTTP_SERVER
|
||||
}
|
||||
}
|
||||
|
||||
/// handle error middleware
|
||||
@@ -572,6 +597,7 @@ mod test {
|
||||
use std::future::pending;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use axum::handler::Handler;
|
||||
use axum::http::StatusCode;
|
||||
use axum::routing::get;
|
||||
@@ -586,12 +612,26 @@ mod test {
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::query_handler::grpc::{GrpcQueryHandler, ServerGrpcQueryHandlerAdaptor};
|
||||
use crate::query_handler::sql::{ServerSqlQueryHandlerAdaptor, SqlQueryHandler};
|
||||
|
||||
struct DummyInstance {
|
||||
_tx: mpsc::Sender<(String, Vec<u8>)>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DummyInstance {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for DummyInstance {
|
||||
type Error = Error;
|
||||
@@ -608,14 +648,6 @@ mod test {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn do_statement_query(
|
||||
&self,
|
||||
_stmt: sql::statements::statement::Statement,
|
||||
_query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn do_describe(
|
||||
&self,
|
||||
_stmt: sql::statements::statement::Statement,
|
||||
@@ -639,8 +671,10 @@ mod test {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { _tx: tx });
|
||||
let instance = ServerSqlQueryHandlerAdaptor::arc(instance);
|
||||
let server = HttpServer::new(instance, HttpOptions::default());
|
||||
let sql_instance = ServerSqlQueryHandlerAdaptor::arc(instance.clone());
|
||||
let grpc_instance = ServerGrpcQueryHandlerAdaptor::arc(instance);
|
||||
|
||||
let server = HttpServer::new(sql_instance, grpc_instance, HttpOptions::default());
|
||||
server.make_app().route(
|
||||
"/test/timeout",
|
||||
get(forever.layer(
|
||||
|
||||
69
src/servers/src/http/admin.rs
Normal file
69
src/servers/src/http/admin.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::ddl_request::Expr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{DdlRequest, FlushTableExpr};
|
||||
use axum::extract::{Query, RawBody, State};
|
||||
use axum::http::StatusCode as HttpStatusCode;
|
||||
use axum::Json;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
|
||||
#[axum_macros::debug_handler]
|
||||
pub async fn flush(
|
||||
State(grpc_handler): State<ServerGrpcQueryHandlerRef>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
RawBody(_): RawBody,
|
||||
) -> Result<(HttpStatusCode, Json<String>)> {
|
||||
let catalog_name = params
|
||||
.get("catalog_name")
|
||||
.cloned()
|
||||
.unwrap_or("greptime".to_string());
|
||||
let schema_name =
|
||||
params
|
||||
.get("schema_name")
|
||||
.cloned()
|
||||
.context(error::InvalidFlushArgumentSnafu {
|
||||
err_msg: "schema_name is not present",
|
||||
})?;
|
||||
|
||||
// if table name is not present, flush all tables inside schema
|
||||
let table_name = params.get("table_name").cloned().unwrap_or_default();
|
||||
|
||||
let region_id: Option<u32> = params
|
||||
.get("region")
|
||||
.map(|v| v.parse())
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten();
|
||||
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(Expr::FlushTable(FlushTableExpr {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
region_id,
|
||||
})),
|
||||
});
|
||||
|
||||
grpc_handler.do_query(request, QueryContext::arc()).await?;
|
||||
Ok((HttpStatusCode::OK, Json::from("done".to_string())))
|
||||
}
|
||||
@@ -43,6 +43,7 @@ pub async fn sql(
|
||||
Form(form_params): Form<SqlQuery>,
|
||||
) -> Json<JsonResponse> {
|
||||
let sql_handler = &state.sql_handler;
|
||||
|
||||
let start = Instant::now();
|
||||
let sql = query_params.sql.or(form_params.sql);
|
||||
let db = query_params.db.or(form_params.db);
|
||||
|
||||
@@ -200,6 +200,8 @@ impl MysqlServer {
|
||||
}
|
||||
}
|
||||
|
||||
pub const MYSQL_SERVER: &str = "MYSQL_SERVER";
|
||||
|
||||
#[async_trait]
|
||||
impl Server for MysqlServer {
|
||||
async fn shutdown(&self) -> Result<()> {
|
||||
@@ -214,4 +216,8 @@ impl Server for MysqlServer {
|
||||
self.base_server.start_with(join_handle).await?;
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
MYSQL_SERVER
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,8 @@ impl OpentsdbServer {
|
||||
}
|
||||
}
|
||||
|
||||
pub const OPENTSDB_SERVER: &str = "OPENTSDB_SERVER";
|
||||
|
||||
#[async_trait]
|
||||
impl Server for OpentsdbServer {
|
||||
async fn shutdown(&self) -> Result<()> {
|
||||
@@ -117,4 +119,7 @@ impl Server for OpentsdbServer {
|
||||
self.base_server.start_with(join_handle).await?;
|
||||
Ok(addr)
|
||||
}
|
||||
fn name(&self) -> &str {
|
||||
OPENTSDB_SERVER
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user