mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-08 06:12:55 +00:00
Compare commits
36 Commits
show
...
v0.1.2-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62e2a60b7b | ||
|
|
128c5cabe1 | ||
|
|
9a001d3392 | ||
|
|
facdda4d9f | ||
|
|
17eb99bc52 | ||
|
|
cd8be77968 | ||
|
|
b530ac9e60 | ||
|
|
76f1a79f1b | ||
|
|
4705245d60 | ||
|
|
f712f978cf | ||
|
|
cbf64e65b9 | ||
|
|
242ce5c2aa | ||
|
|
e8d2e82335 | ||
|
|
0086cc2d3d | ||
|
|
cdc111b607 | ||
|
|
81ca1d8399 | ||
|
|
8d3999df5f | ||
|
|
a60788e92e | ||
|
|
296c6dfcbf | ||
|
|
604c20a83d | ||
|
|
c7f114c8fa | ||
|
|
8a83de4ea5 | ||
|
|
3377930a50 | ||
|
|
85dd7e4f24 | ||
|
|
f790fa05c1 | ||
|
|
dfd91a1bf8 | ||
|
|
ded31fb069 | ||
|
|
6a574fc52b | ||
|
|
58bdf27068 | ||
|
|
610a895b66 | ||
|
|
a9ccc06449 | ||
|
|
38fe1a2f01 | ||
|
|
3414ac46b0 | ||
|
|
757b4a87a0 | ||
|
|
ba1517fceb | ||
|
|
5b5d953d56 |
@@ -2,7 +2,7 @@
|
|||||||
GT_S3_BUCKET=S3 bucket
|
GT_S3_BUCKET=S3 bucket
|
||||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||||
GT_S3_ACCESS_KEY=S3 secret access key
|
GT_S3_ACCESS_KEY=S3 secret access key
|
||||||
|
GT_S3_ENDPOINT_URL=S3 endpoint url
|
||||||
# Settings for oss test
|
# Settings for oss test
|
||||||
GT_OSS_BUCKET=OSS bucket
|
GT_OSS_BUCKET=OSS bucket
|
||||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||||
|
|||||||
267
.github/workflows/release.yml
vendored
267
.github/workflows/release.yml
vendored
@@ -12,13 +12,14 @@ name: Release
|
|||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||||
|
|
||||||
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
|
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
|
||||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
|
|
||||||
|
|
||||||
# In the future, we can change SCHEDULED_PERIOD to nightly.
|
SCHEDULED_PERIOD: nightly
|
||||||
SCHEDULED_PERIOD: weekly
|
|
||||||
|
|
||||||
CARGO_PROFILE: weekly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
|
## FIXME(zyy17): Enable it after the tests are stabled.
|
||||||
|
DISABLE_RUN_TESTS: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
@@ -30,16 +31,25 @@ jobs:
|
|||||||
- arch: x86_64-unknown-linux-gnu
|
- arch: x86_64-unknown-linux-gnu
|
||||||
os: ubuntu-2004-16-cores
|
os: ubuntu-2004-16-cores
|
||||||
file: greptime-linux-amd64
|
file: greptime-linux-amd64
|
||||||
# - arch: aarch64-unknown-linux-gnu
|
continue-on-error: false
|
||||||
# os: ubuntu-2004-16-cores
|
opts: "-F pyo3_backend"
|
||||||
# file: greptime-linux-arm64
|
- arch: aarch64-unknown-linux-gnu
|
||||||
# - arch: aarch64-apple-darwin
|
os: ubuntu-2004-16-cores
|
||||||
# os: macos-latest
|
file: greptime-linux-arm64
|
||||||
# file: greptime-darwin-arm64
|
continue-on-error: false
|
||||||
# - arch: x86_64-apple-darwin
|
opts: "-F pyo3_backend"
|
||||||
# os: macos-latest
|
- arch: aarch64-apple-darwin
|
||||||
# file: greptime-darwin-amd64
|
os: macos-latest
|
||||||
|
file: greptime-darwin-arm64
|
||||||
|
continue-on-error: false
|
||||||
|
opts: "-F pyo3_backend"
|
||||||
|
- arch: x86_64-apple-darwin
|
||||||
|
os: macos-latest
|
||||||
|
file: greptime-darwin-amd64
|
||||||
|
continue-on-error: false
|
||||||
|
opts: "-F pyo3_backend"
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
continue-on-error: ${{ matrix.continue-on-error }}
|
||||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
@@ -93,7 +103,13 @@ jobs:
|
|||||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -y update
|
sudo apt-get -y update
|
||||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
|
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||||
|
|
||||||
|
- name: Compile Python 3.10.10 from source for Aarch64
|
||||||
|
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||||
|
run: |
|
||||||
|
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||||
|
sudo ./docker/aarch64/compile-python.sh
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: dtolnay/rust-toolchain@master
|
||||||
@@ -105,10 +121,21 @@ jobs:
|
|||||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
if: env.DISABLE_RUN_TESTS == 'false'
|
||||||
run: make unit-test integration-test sqlness-test
|
run: make unit-test integration-test sqlness-test
|
||||||
|
|
||||||
|
- name: Run cargo build for aarch64-linux
|
||||||
|
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||||
|
run: |
|
||||||
|
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||||
|
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
|
||||||
|
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||||
|
alias python=python3
|
||||||
|
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||||
|
|
||||||
- name: Run cargo build
|
- name: Run cargo build
|
||||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
|
||||||
|
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||||
|
|
||||||
- name: Calculate checksum and rename binary
|
- name: Calculate checksum and rename binary
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -129,48 +156,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: ${{ matrix.file }}.sha256sum
|
name: ${{ matrix.file }}.sha256sum
|
||||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||||
release:
|
|
||||||
name: Release artifacts
|
|
||||||
needs: [build]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Download artifacts
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
|
|
||||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
|
|
||||||
shell: bash
|
|
||||||
if: github.event_name == 'schedule'
|
|
||||||
run: |
|
|
||||||
buildTime=`date "+%Y%m%d"`
|
|
||||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
|
||||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Create scheduled build git tag
|
|
||||||
if: github.event_name == 'schedule'
|
|
||||||
run: |
|
|
||||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
|
||||||
|
|
||||||
- name: Publish scheduled release # configure the different release title and tags.
|
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
if: github.event_name == 'schedule'
|
|
||||||
with:
|
|
||||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
|
||||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
|
||||||
generate_release_notes: true
|
|
||||||
files: |
|
|
||||||
**/greptime-*
|
|
||||||
|
|
||||||
- name: Publish release
|
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
if: github.event_name != 'schedule'
|
|
||||||
with:
|
|
||||||
name: "Release ${{ github.ref_name }}"
|
|
||||||
files: |
|
|
||||||
**/greptime-*
|
|
||||||
|
|
||||||
docker:
|
docker:
|
||||||
name: Build docker image
|
name: Build docker image
|
||||||
@@ -181,37 +166,6 @@ jobs:
|
|||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Download amd64 binary
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: greptime-linux-amd64
|
|
||||||
path: amd64
|
|
||||||
|
|
||||||
- name: Unzip the amd64 artifacts
|
|
||||||
run: |
|
|
||||||
cd amd64
|
|
||||||
tar xvf greptime-linux-amd64.tgz
|
|
||||||
rm greptime-linux-amd64.tgz
|
|
||||||
|
|
||||||
# - name: Download arm64 binary
|
|
||||||
# uses: actions/download-artifact@v3
|
|
||||||
# with:
|
|
||||||
# name: greptime-linux-arm64
|
|
||||||
# path: arm64
|
|
||||||
|
|
||||||
# - name: Unzip the arm64 artifacts
|
|
||||||
# run: |
|
|
||||||
# cd arm64
|
|
||||||
# tar xvf greptime-linux-arm64.tgz
|
|
||||||
# rm greptime-linux-arm64.tgz
|
|
||||||
|
|
||||||
- name: Login to UCloud Container Registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: uhub.service.ucloud.cn
|
|
||||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
|
||||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Login to Dockerhub
|
- name: Login to Dockerhub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
@@ -239,16 +193,143 @@ jobs:
|
|||||||
- name: Set up buildx
|
- name: Set up buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- name: Build and push
|
- name: Download amd64 binary
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: greptime-linux-amd64
|
||||||
|
path: amd64
|
||||||
|
|
||||||
|
- name: Unzip the amd64 artifacts
|
||||||
|
run: |
|
||||||
|
cd amd64
|
||||||
|
tar xvf greptime-linux-amd64.tgz
|
||||||
|
rm greptime-linux-amd64.tgz
|
||||||
|
|
||||||
|
- name: Download arm64 binary
|
||||||
|
id: download-arm64
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: greptime-linux-arm64
|
||||||
|
path: arm64
|
||||||
|
|
||||||
|
- name: Unzip the arm64 artifacts
|
||||||
|
id: unzip-arm64
|
||||||
|
if: success() || steps.download-arm64.conclusion == 'success'
|
||||||
|
run: |
|
||||||
|
cd arm64
|
||||||
|
tar xvf greptime-linux-arm64.tgz
|
||||||
|
rm greptime-linux-arm64.tgz
|
||||||
|
|
||||||
|
- name: Build and push all
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
|
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ./docker/ci/Dockerfile
|
||||||
|
push: true
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
tags: |
|
||||||
|
greptime/greptimedb:latest
|
||||||
|
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||||
|
|
||||||
|
- name: Build and push amd64 only
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./docker/ci/Dockerfile
|
file: ./docker/ci/Dockerfile
|
||||||
push: true
|
push: true
|
||||||
# platforms: linux/amd64,linux/arm64
|
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64
|
||||||
tags: |
|
tags: |
|
||||||
greptime/greptimedb:latest
|
greptime/greptimedb:latest
|
||||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
|
||||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
release:
|
||||||
|
name: Release artifacts
|
||||||
|
# Release artifacts only when all the artifacts are built successfully.
|
||||||
|
needs: [build,docker]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
|
||||||
|
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||||
|
shell: bash
|
||||||
|
if: github.event_name == 'schedule'
|
||||||
|
run: |
|
||||||
|
buildTime=`date "+%Y%m%d"`
|
||||||
|
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||||
|
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Create scheduled build git tag
|
||||||
|
if: github.event_name == 'schedule'
|
||||||
|
run: |
|
||||||
|
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||||
|
|
||||||
|
- name: Publish scheduled release # configure the different release title and tags.
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
if: github.event_name == 'schedule'
|
||||||
|
with:
|
||||||
|
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||||
|
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||||
|
generate_release_notes: true
|
||||||
|
files: |
|
||||||
|
**/greptime-*
|
||||||
|
|
||||||
|
- name: Publish release
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
if: github.event_name != 'schedule'
|
||||||
|
with:
|
||||||
|
name: "Release ${{ github.ref_name }}"
|
||||||
|
files: |
|
||||||
|
**/greptime-*
|
||||||
|
|
||||||
|
docker-push-uhub:
|
||||||
|
name: Push docker image to UCloud Container Registry
|
||||||
|
needs: [docker]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||||
|
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||||
|
continue-on-error: true
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- name: Login to UCloud Container Registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: uhub.service.ucloud.cn
|
||||||
|
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||||
|
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||||
|
shell: bash
|
||||||
|
if: github.event_name == 'schedule'
|
||||||
|
run: |
|
||||||
|
buildTime=`date "+%Y%m%d"`
|
||||||
|
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||||
|
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||||
|
shell: bash
|
||||||
|
if: github.event_name != 'schedule'
|
||||||
|
run: |
|
||||||
|
VERSION=${{ github.ref_name }}
|
||||||
|
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create \
|
||||||
|
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
|
||||||
|
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||||
|
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||||
|
|||||||
1016
Cargo.lock
generated
1016
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
28
Cargo.toml
28
Cargo.toml
@@ -45,33 +45,33 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
arrow = { version = "33.0", features = ["pyarrow"] }
|
arrow = { version = "34.0" }
|
||||||
arrow-array = "33.0"
|
arrow-array = "34.0"
|
||||||
arrow-flight = "33.0"
|
arrow-flight = "34.0"
|
||||||
arrow-schema = { version = "33.0", features = ["serde"] }
|
arrow-schema = { version = "34.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
parquet = "33.0"
|
parquet = "34.0"
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
prost = "0.11"
|
prost = "0.11"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu = { version = "0.7", features = ["backtraces"] }
|
||||||
sqlparser = "0.30"
|
sqlparser = "0.32"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.24.2", features = ["full"] }
|
tokio = { version = "1.24.2", features = ["full"] }
|
||||||
tokio-util = "0.7"
|
tokio-util = "0.7"
|
||||||
@@ -81,7 +81,7 @@ uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
[profile.weekly]
|
[profile.nightly]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
strip = true
|
strip = true
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
|
|||||||
@@ -10,10 +10,6 @@ rpc_addr = "127.0.0.1:3001"
|
|||||||
rpc_hostname = "127.0.0.1"
|
rpc_hostname = "127.0.0.1"
|
||||||
# The number of gRPC server worker threads, 8 by default.
|
# The number of gRPC server worker threads, 8 by default.
|
||||||
rpc_runtime_size = 8
|
rpc_runtime_size = 8
|
||||||
# MySQL server address, "127.0.0.1:4406" by default.
|
|
||||||
mysql_addr = "127.0.0.1:4406"
|
|
||||||
# The number of MySQL server worker threads, 2 by default.
|
|
||||||
mysql_runtime_size = 2
|
|
||||||
|
|
||||||
# Metasrv client options.
|
# Metasrv client options.
|
||||||
[meta_client_options]
|
[meta_client_options]
|
||||||
@@ -48,5 +44,7 @@ max_purge_tasks = 32
|
|||||||
|
|
||||||
# Procedure storage options, see `standalone.example.toml`.
|
# Procedure storage options, see `standalone.example.toml`.
|
||||||
# [procedure.store]
|
# [procedure.store]
|
||||||
# type = 'File'
|
# type = "File"
|
||||||
# data_dir = '/tmp/greptimedb/procedure/'
|
# data_dir = "/tmp/greptimedb/procedure/"
|
||||||
|
# max_retry_times = 3
|
||||||
|
# retry_delay = "500ms"
|
||||||
|
|||||||
@@ -114,3 +114,7 @@ max_purge_tasks = 32
|
|||||||
# type = "File"
|
# type = "File"
|
||||||
# # Procedure data path.
|
# # Procedure data path.
|
||||||
# data_dir = "/tmp/greptimedb/procedure/"
|
# data_dir = "/tmp/greptimedb/procedure/"
|
||||||
|
# # Procedure max retry time.
|
||||||
|
# max_retry_times = 3
|
||||||
|
# # Initial retry delay of procedures, increases exponentially
|
||||||
|
# retry_delay = "500ms"
|
||||||
|
|||||||
57
docker/aarch64/Dockerfile
Normal file
57
docker/aarch64/Dockerfile
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
FROM ubuntu:22.04 as builder
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Install dependencies.
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
protobuf-compiler \
|
||||||
|
curl \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
wget
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH /root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Install cross platform toolchain
|
||||||
|
RUN apt-get -y update && \
|
||||||
|
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||||
|
apt-get install binutils-aarch64-linux-gnu
|
||||||
|
|
||||||
|
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||||
|
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||||
|
./docker/aarch64/compile-python.sh
|
||||||
|
|
||||||
|
COPY ./rust-toolchain.toml .
|
||||||
|
# Install rustup target for cross compiling.
|
||||||
|
RUN rustup target add aarch64-unknown-linux-gnu
|
||||||
|
COPY . .
|
||||||
|
# Update dependency, using separate `RUN` to separate cache
|
||||||
|
RUN cargo fetch
|
||||||
|
|
||||||
|
# This three env var is set in script, so I set it manually in dockerfile.
|
||||||
|
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||||
|
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||||
|
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||||
|
|
||||||
|
# Set the environment variable for cross compiling and compile it
|
||||||
|
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||||
|
# Build the project in release mode.
|
||||||
|
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||||
|
alias python=python3 && \
|
||||||
|
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||||
|
|
||||||
|
# Exporting the binary to the clean image
|
||||||
|
FROM ubuntu:22.04 as base
|
||||||
|
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||||
|
|
||||||
|
WORKDIR /greptime
|
||||||
|
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||||
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENTRYPOINT ["greptime"]
|
||||||
46
docker/aarch64/compile-python.sh
Normal file
46
docker/aarch64/compile-python.sh
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||||
|
# then use this python to compile cross-compiled python for aarch64
|
||||||
|
|
||||||
|
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
|
||||||
|
tar -xvf Python-3.10.10.tgz
|
||||||
|
cd Python-3.10.10
|
||||||
|
# explain Python compile options here a bit:s
|
||||||
|
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||||
|
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||||
|
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||||
|
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||||
|
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||||
|
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||||
|
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||||
|
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||||
|
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||||
|
|
||||||
|
# Build local python first, then build cross-compiled python.
|
||||||
|
./configure \
|
||||||
|
--enable-shared \
|
||||||
|
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||||
|
ac_cv_have_long_long_format=yes \
|
||||||
|
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||||
|
make
|
||||||
|
make install
|
||||||
|
cd ..
|
||||||
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||||
|
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||||
|
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
|
||||||
|
cd Python-3.10.10 && \
|
||||||
|
make clean && \
|
||||||
|
make distclean && \
|
||||||
|
alias python=python3 && \
|
||||||
|
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||||
|
--prefix=$PY_INSTALL_PATH --enable-optimizations \
|
||||||
|
CC=aarch64-linux-gnu-gcc \
|
||||||
|
CXX=aarch64-linux-gnu-g++ \
|
||||||
|
AR=aarch64-linux-gnu-ar \
|
||||||
|
LD=aarch64-linux-gnu-ld \
|
||||||
|
RANLIB=aarch64-linux-gnu-ranlib \
|
||||||
|
--enable-shared \
|
||||||
|
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||||
|
ac_cv_have_long_long_format=yes \
|
||||||
|
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||||
|
make && make altinstall && \
|
||||||
|
cd ..
|
||||||
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
---
|
||||||
|
Feature Name: "Fault Tolerance for Region"
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
|
||||||
|
Date: 2023-03-08
|
||||||
|
Author: "Luo Fucong <luofucong@greptime.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
Fault Tolerance for Region
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
|
||||||
|
|
||||||
|
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
|
||||||
|
|
||||||
|
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
|
||||||
|
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
Some backgrounds about region in distributed mode:
|
||||||
|
|
||||||
|
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
|
||||||
|
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
|
||||||
|
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
|
||||||
|
|
||||||
|
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
|
||||||
|
|
||||||
|
## Failure Detection
|
||||||
|
|
||||||
|
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
|
||||||
|
|
||||||
|
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
message HeartbeatRequest {
|
||||||
|
...
|
||||||
|
// Region stats on this node
|
||||||
|
repeated RegionStat region_stats = 6;
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
message RegionStat {
|
||||||
|
uint64 region_id = 1;
|
||||||
|
TableName table_name = 2;
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
|
||||||
|
|
||||||
|
> The heartbeat interval is now hardcoded to five seconds.
|
||||||
|
|
||||||
|
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
|
||||||
|
|
||||||
|
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
|
||||||
|
|
||||||
|
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
|
||||||
|
|
||||||
|
To conclude, the failure detection pseudo-codes are like this:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// in Metasrv:
|
||||||
|
fn failure_detection() {
|
||||||
|
loop {
|
||||||
|
// passive detection
|
||||||
|
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
|
||||||
|
|
||||||
|
// find the datanodes that contains the failed regions
|
||||||
|
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
|
||||||
|
|
||||||
|
// active detection
|
||||||
|
for (datanode, regions) in datanodes_and_regions {
|
||||||
|
if !datanode.is_healthy(regions) {
|
||||||
|
do_failover(datanode, regions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(config.detect_interval);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Some design considerations:
|
||||||
|
|
||||||
|
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
|
||||||
|
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
|
||||||
|
|
||||||
|
So we detect some regions are not available. How to regain the availability back?
|
||||||
|
|
||||||
|
## Region Failover
|
||||||
|
|
||||||
|
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
|
||||||
|
|
||||||
|
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
|
||||||
|
|
||||||
|
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
|
||||||
|
|
||||||
|
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
message Region {
|
||||||
|
uint64 id = 1;
|
||||||
|
string name = 2;
|
||||||
|
Partition partition = 3;
|
||||||
|
|
||||||
|
message State {
|
||||||
|
Active,
|
||||||
|
Passive,
|
||||||
|
}
|
||||||
|
State state = 4;
|
||||||
|
|
||||||
|
map<string, string> attrs = 100;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
|
||||||
|
|
||||||
|
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
|
||||||
|
|
||||||
|
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
|
||||||
|
|
||||||
|
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
|
||||||
|
|
||||||
|
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
|
||||||
|
|
||||||
|
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
|
||||||
|
|
||||||
|
A picture is worth a 1000 words:
|
||||||
|
|
||||||
|
```text
|
||||||
|
+-------------------------+
|
||||||
|
| Metasrv detects region |
|
||||||
|
| failure |
|
||||||
|
+-------------------------+
|
||||||
|
|
|
||||||
|
v
|
||||||
|
+----------------------------+
|
||||||
|
| Metasrv chooses candidates |
|
||||||
|
| to hold failed regions |
|
||||||
|
+----------------------------+
|
||||||
|
|
|
||||||
|
v
|
||||||
|
+-------------------------+ +-------------------------+
|
||||||
|
| Metasrv "passive" the |------>| Frontend rejects writes |
|
||||||
|
| failed regions | | to "passive" regions |
|
||||||
|
+-------------------------+ +-------------------------+
|
||||||
|
|
|
||||||
|
v
|
||||||
|
+--------------------------+ +---------------------------+
|
||||||
|
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
|
||||||
|
| regions from remote WAL | | "open" region requests |
|
||||||
|
+--------------------------+ +---------------------------+
|
||||||
|
|
|
||||||
|
|
|
||||||
|
| +-------------------------+ +-------------------------+
|
||||||
|
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
|
||||||
|
| failed regions | | restriction to regions |
|
||||||
|
+-------------------------+ +-------------------------+
|
||||||
|
|
|
||||||
|
v
|
||||||
|
+-------------------------+
|
||||||
|
| Region failover done, |
|
||||||
|
| HA regain |
|
||||||
|
+-------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
# Alternatives
|
||||||
|
|
||||||
|
## The "Neon" Way
|
||||||
|
|
||||||
|
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
|
||||||
|
|
||||||
|
However, the main concerns we don't adopt this method are two-fold:
|
||||||
|
|
||||||
|
1. Remote WAL is planned to be quorum based, it can be efficiently written;
|
||||||
|
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
|
||||||
|
|
||||||
|
## No WAL, Replication instead
|
||||||
|
|
||||||
|
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
|
||||||
|
|
||||||
|
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
|
||||||
|
|
||||||
|
# Future Work
|
||||||
|
|
||||||
|
Some optimizations we could take:
|
||||||
|
|
||||||
|
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
|
||||||
|
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.
|
||||||
@@ -59,5 +59,5 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
|||||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
|
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
|||||||
common-error = { path = "../common/error" }
|
common-error = { path = "../common/error" }
|
||||||
common-time = { path = "../common/time" }
|
common-time = { path = "../common/time" }
|
||||||
datatypes = { path = "../datatypes" }
|
datatypes = { path = "../datatypes" }
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ad0187295035e83f76272da553453e649b7570de" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3a715150563b89d5dfc81a5838eac1f66a5658a1" }
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu = { version = "0.7", features = ["backtraces"] }
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
|
|||||||
@@ -204,6 +204,21 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||||
QueryAccessDenied { catalog: String, schema: String },
|
QueryAccessDenied { catalog: String, schema: String },
|
||||||
|
|
||||||
|
#[snafu(display(
|
||||||
|
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
|
||||||
|
catalog,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
source
|
||||||
|
))]
|
||||||
|
RegionStats {
|
||||||
|
catalog: String,
|
||||||
|
schema: String,
|
||||||
|
table: String,
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: table::error::Error,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -238,7 +253,8 @@ impl ErrorExt for Error {
|
|||||||
| Error::InsertCatalogRecord { source, .. }
|
| Error::InsertCatalogRecord { source, .. }
|
||||||
| Error::OpenTable { source, .. }
|
| Error::OpenTable { source, .. }
|
||||||
| Error::CreateTable { source, .. }
|
| Error::CreateTable { source, .. }
|
||||||
| Error::DeregisterTable { source, .. } => source.status_code(),
|
| Error::DeregisterTable { source, .. }
|
||||||
|
| Error::RegionStats { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::MetaSrv { source, .. } => source.status_code(),
|
Error::MetaSrv { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use std::any::Any;
|
|||||||
use std::fmt::{Debug, Formatter};
|
use std::fmt::{Debug, Formatter};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use api::v1::meta::{RegionStat, TableName};
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use table::engine::{EngineContext, TableEngineRef};
|
use table::engine::{EngineContext, TableEngineRef};
|
||||||
@@ -225,10 +226,10 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of regions in the datanode node.
|
/// The stat of regions in the datanode node.
|
||||||
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
/// The number of regions can be got from len of vec.
|
||||||
let mut region_number: u64 = 0;
|
pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<RegionStat>> {
|
||||||
|
let mut region_stats = Vec::new();
|
||||||
for catalog_name in catalog_manager.catalog_names()? {
|
for catalog_name in catalog_manager.catalog_names()? {
|
||||||
let catalog =
|
let catalog =
|
||||||
catalog_manager
|
catalog_manager
|
||||||
@@ -254,10 +255,28 @@ pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
|||||||
table_info: &table_name,
|
table_info: &table_name,
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let region_numbers = &table.table_info().meta.region_numbers;
|
region_stats.extend(
|
||||||
region_number += region_numbers.len() as u64;
|
table
|
||||||
|
.region_stats()
|
||||||
|
.context(error::RegionStatsSnafu {
|
||||||
|
catalog: &catalog_name,
|
||||||
|
schema: &schema_name,
|
||||||
|
table: &table_name,
|
||||||
|
})?
|
||||||
|
.into_iter()
|
||||||
|
.map(|stat| RegionStat {
|
||||||
|
region_id: stat.region_id,
|
||||||
|
table_name: Some(TableName {
|
||||||
|
catalog_name: catalog_name.clone(),
|
||||||
|
schema_name: schema_name.clone(),
|
||||||
|
table_name: table_name.clone(),
|
||||||
|
}),
|
||||||
|
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(region_number)
|
Ok(region_stats)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -221,4 +221,8 @@ impl TableEngine for MockTableEngine {
|
|||||||
) -> table::Result<bool> {
|
) -> table::Result<bool> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn close(&self) -> table::Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ common-grpc-expr = { path = "../common/grpc-expr" }
|
|||||||
common-query = { path = "../common/query" }
|
common-query = { path = "../common/query" }
|
||||||
common-recordbatch = { path = "../common/recordbatch" }
|
common-recordbatch = { path = "../common/recordbatch" }
|
||||||
common-time = { path = "../common/time" }
|
common-time = { path = "../common/time" }
|
||||||
|
common-telemetry = { path = "../common/telemetry" }
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { path = "../datatypes" }
|
datatypes = { path = "../datatypes" }
|
||||||
enum_dispatch = "0.3"
|
enum_dispatch = "0.3"
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ use api::v1::ddl_request::Expr as DdlExpr;
|
|||||||
use api::v1::greptime_request::Request;
|
use api::v1::greptime_request::Request;
|
||||||
use api::v1::query_request::Query;
|
use api::v1::query_request::Query;
|
||||||
use api::v1::{
|
use api::v1::{
|
||||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||||
InsertRequest, QueryRequest, RequestHeader,
|
GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
|
||||||
};
|
};
|
||||||
use arrow_flight::{FlightData, Ticket};
|
use arrow_flight::{FlightData, Ticket};
|
||||||
use common_error::prelude::*;
|
use common_error::prelude::*;
|
||||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
|
use common_telemetry::logging;
|
||||||
use futures_util::{TryFutureExt, TryStreamExt};
|
use futures_util::{TryFutureExt, TryStreamExt};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
@@ -95,6 +96,24 @@ impl Database {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn prom_range_query(
|
||||||
|
&self,
|
||||||
|
promql: &str,
|
||||||
|
start: &str,
|
||||||
|
end: &str,
|
||||||
|
step: &str,
|
||||||
|
) -> Result<Output> {
|
||||||
|
self.do_get(Request::Query(QueryRequest {
|
||||||
|
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||||
|
query: promql.to_string(),
|
||||||
|
start: start.to_string(),
|
||||||
|
end: end.to_string(),
|
||||||
|
step: step.to_string(),
|
||||||
|
})),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||||
self.do_get(Request::Ddl(DdlRequest {
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
expr: Some(DdlExpr::CreateTable(expr)),
|
expr: Some(DdlExpr::CreateTable(expr)),
|
||||||
@@ -116,6 +135,13 @@ impl Database {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||||
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
|
expr: Some(DdlExpr::FlushTable(expr)),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||||
let request = GreptimeRequest {
|
let request = GreptimeRequest {
|
||||||
header: Some(RequestHeader {
|
header: Some(RequestHeader {
|
||||||
@@ -149,6 +175,15 @@ impl Database {
|
|||||||
tonic_code: e.code(),
|
tonic_code: e.code(),
|
||||||
addr: client.addr(),
|
addr: client.addr(),
|
||||||
})
|
})
|
||||||
|
.map_err(|error| {
|
||||||
|
logging::error!(
|
||||||
|
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||||
|
client.addr(),
|
||||||
|
e.code(),
|
||||||
|
error
|
||||||
|
);
|
||||||
|
error
|
||||||
|
})
|
||||||
.unwrap_err()
|
.unwrap_err()
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
|||||||
@@ -26,12 +26,7 @@ pub enum Error {
|
|||||||
backtrace: Backtrace,
|
backtrace: Backtrace,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(
|
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
|
||||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
|
||||||
addr,
|
|
||||||
tonic_code,
|
|
||||||
source
|
|
||||||
))]
|
|
||||||
FlightGet {
|
FlightGet {
|
||||||
addr: String,
|
addr: String,
|
||||||
tonic_code: Code,
|
tonic_code: Code,
|
||||||
|
|||||||
@@ -30,9 +30,39 @@ struct Command {
|
|||||||
subcmd: SubCommand,
|
subcmd: SubCommand,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub enum Application {
|
||||||
|
Datanode(datanode::Instance),
|
||||||
|
Frontend(frontend::Instance),
|
||||||
|
Metasrv(metasrv::Instance),
|
||||||
|
Standalone(standalone::Instance),
|
||||||
|
Cli(cli::Instance),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Application {
|
||||||
|
async fn run(&mut self) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
Application::Datanode(instance) => instance.run().await,
|
||||||
|
Application::Frontend(instance) => instance.run().await,
|
||||||
|
Application::Metasrv(instance) => instance.run().await,
|
||||||
|
Application::Standalone(instance) => instance.run().await,
|
||||||
|
Application::Cli(instance) => instance.run().await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn stop(&self) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
Application::Datanode(instance) => instance.stop().await,
|
||||||
|
Application::Frontend(instance) => instance.stop().await,
|
||||||
|
Application::Metasrv(instance) => instance.stop().await,
|
||||||
|
Application::Standalone(instance) => instance.stop().await,
|
||||||
|
Application::Cli(instance) => instance.stop().await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Application> {
|
||||||
self.subcmd.run().await
|
self.subcmd.build().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,13 +81,28 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Application> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Datanode(cmd) => cmd.run().await,
|
SubCommand::Datanode(cmd) => {
|
||||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
let app = cmd.build().await?;
|
||||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
Ok(Application::Datanode(app))
|
||||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
}
|
||||||
SubCommand::Cli(cmd) => cmd.run().await,
|
SubCommand::Frontend(cmd) => {
|
||||||
|
let app = cmd.build().await?;
|
||||||
|
Ok(Application::Frontend(app))
|
||||||
|
}
|
||||||
|
SubCommand::Metasrv(cmd) => {
|
||||||
|
let app = cmd.build().await?;
|
||||||
|
Ok(Application::Metasrv(app))
|
||||||
|
}
|
||||||
|
SubCommand::Standalone(cmd) => {
|
||||||
|
let app = cmd.build().await?;
|
||||||
|
Ok(Application::Standalone(app))
|
||||||
|
}
|
||||||
|
SubCommand::Cli(cmd) => {
|
||||||
|
let app = cmd.build().await?;
|
||||||
|
Ok(Application::Cli(app))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -104,13 +149,18 @@ async fn main() -> Result<()> {
|
|||||||
common_telemetry::init_default_metrics_recorder();
|
common_telemetry::init_default_metrics_recorder();
|
||||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||||
|
|
||||||
|
let mut app = cmd.build().await?;
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
result = cmd.run() => {
|
result = app.run() => {
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
error!(err; "Fatal error occurs!");
|
error!(err; "Fatal error occurs!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = tokio::signal::ctrl_c() => {
|
_ = tokio::signal::ctrl_c() => {
|
||||||
|
if let Err(err) = app.stop().await {
|
||||||
|
error!(err; "Fatal error occurs!");
|
||||||
|
}
|
||||||
info!("Goodbye!");
|
info!("Goodbye!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,10 +17,25 @@ mod helper;
|
|||||||
mod repl;
|
mod repl;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use repl::Repl;
|
pub use repl::Repl;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
|
pub struct Instance {
|
||||||
|
repl: Repl,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Instance {
|
||||||
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
|
self.repl.run().await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop(&self) -> Result<()> {
|
||||||
|
// TODO: handle cli shutdown
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
@@ -28,8 +43,8 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn run(self) -> Result<()> {
|
pub async fn build(self) -> Result<Instance> {
|
||||||
self.cmd.run().await
|
self.cmd.build().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,9 +54,9 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Attach(cmd) => cmd.run().await,
|
SubCommand::Attach(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -57,8 +72,8 @@ pub(crate) struct AttachCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AttachCommand {
|
impl AttachCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
let mut repl = Repl::try_new(&self).await?;
|
let repl = Repl::try_new(&self).await?;
|
||||||
repl.run().await
|
Ok(Instance { repl })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ use query::datafusion::DatafusionQueryEngine;
|
|||||||
use query::logical_optimizer::LogicalOptimizer;
|
use query::logical_optimizer::LogicalOptimizer;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::plan::LogicalPlan;
|
use query::plan::LogicalPlan;
|
||||||
|
use query::query_engine::QueryEngineState;
|
||||||
use query::QueryEngine;
|
use query::QueryEngine;
|
||||||
use rustyline::error::ReadlineError;
|
use rustyline::error::ReadlineError;
|
||||||
use rustyline::Editor;
|
use rustyline::Editor;
|
||||||
@@ -49,7 +50,7 @@ use crate::error::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||||
pub(crate) struct Repl {
|
pub struct Repl {
|
||||||
/// Rustyline editor for interacting with user on command line
|
/// Rustyline editor for interacting with user on command line
|
||||||
rl: Editor<RustylineHelper>,
|
rl: Editor<RustylineHelper>,
|
||||||
|
|
||||||
@@ -166,12 +167,16 @@ impl Repl {
|
|||||||
self.database.catalog(),
|
self.database.catalog(),
|
||||||
self.database.schema(),
|
self.database.schema(),
|
||||||
));
|
));
|
||||||
let LogicalPlan::DfPlan(plan) = query_engine
|
|
||||||
.statement_to_plan(stmt, query_ctx)
|
let plan = query_engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, query_ctx)
|
||||||
.await
|
.await
|
||||||
.and_then(|x| query_engine.optimize(&x))
|
|
||||||
.context(PlanStatementSnafu)?;
|
.context(PlanStatementSnafu)?;
|
||||||
|
|
||||||
|
let LogicalPlan::DfPlan(plan) =
|
||||||
|
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
|
||||||
|
|
||||||
let plan = DFLogicalSubstraitConvertor {}
|
let plan = DFLogicalSubstraitConvertor {}
|
||||||
.encode(plan)
|
.encode(plan)
|
||||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||||
@@ -262,6 +267,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
partition_manager,
|
partition_manager,
|
||||||
datanode_clients,
|
datanode_clients,
|
||||||
));
|
));
|
||||||
|
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
|
||||||
|
|
||||||
Ok(DatafusionQueryEngine::new(catalog_list, Default::default()))
|
Ok(DatafusionQueryEngine::new(state))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,21 @@ use snafu::ResultExt;
|
|||||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||||
use crate::toml_loader;
|
use crate::toml_loader;
|
||||||
|
|
||||||
|
pub struct Instance {
|
||||||
|
datanode: Datanode,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Instance {
|
||||||
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
|
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop(&self) -> Result<()> {
|
||||||
|
// TODO: handle datanode shutdown
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
@@ -31,8 +46,8 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn run(self) -> Result<()> {
|
pub async fn build(self) -> Result<Instance> {
|
||||||
self.subcmd.run().await
|
self.subcmd.build().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,9 +57,9 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.run().await,
|
SubCommand::Start(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -72,19 +87,16 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
logging::info!("Datanode start command: {:#?}", self);
|
logging::info!("Datanode start command: {:#?}", self);
|
||||||
|
|
||||||
let opts: DatanodeOptions = self.try_into()?;
|
let opts: DatanodeOptions = self.try_into()?;
|
||||||
|
|
||||||
logging::info!("Datanode options: {:#?}", opts);
|
logging::info!("Datanode options: {:#?}", opts);
|
||||||
|
|
||||||
Datanode::new(opts)
|
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||||
.await
|
|
||||||
.context(StartDatanodeSnafu)?
|
Ok(Instance { datanode })
|
||||||
.start()
|
|
||||||
.await
|
|
||||||
.context(StartDatanodeSnafu)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,7 +150,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
|||||||
if let Some(wal_dir) = cmd.wal_dir {
|
if let Some(wal_dir) = cmd.wal_dir {
|
||||||
opts.wal.dir = wal_dir;
|
opts.wal.dir = wal_dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,12 @@ pub enum Error {
|
|||||||
source: frontend::error::Error,
|
source: frontend::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||||
|
BuildMetaServer {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: meta_srv::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||||
StartMetaServer {
|
StartMetaServer {
|
||||||
#[snafu(backtrace)]
|
#[snafu(backtrace)]
|
||||||
@@ -138,6 +144,7 @@ impl ErrorExt for Error {
|
|||||||
Error::StartDatanode { source } => source.status_code(),
|
Error::StartDatanode { source } => source.status_code(),
|
||||||
Error::StartFrontend { source } => source.status_code(),
|
Error::StartFrontend { source } => source.status_code(),
|
||||||
Error::StartMetaServer { source } => source.status_code(),
|
Error::StartMetaServer { source } => source.status_code(),
|
||||||
|
Error::BuildMetaServer { source } => source.status_code(),
|
||||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||||
StatusCode::InvalidArguments
|
StatusCode::InvalidArguments
|
||||||
|
|||||||
@@ -16,10 +16,10 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use frontend::frontend::{Frontend, FrontendOptions};
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::grpc::GrpcOptions;
|
use frontend::grpc::GrpcOptions;
|
||||||
use frontend::influxdb::InfluxdbOptions;
|
use frontend::influxdb::InfluxdbOptions;
|
||||||
use frontend::instance::Instance;
|
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||||
use frontend::mysql::MysqlOptions;
|
use frontend::mysql::MysqlOptions;
|
||||||
use frontend::opentsdb::OpentsdbOptions;
|
use frontend::opentsdb::OpentsdbOptions;
|
||||||
use frontend::postgres::PostgresOptions;
|
use frontend::postgres::PostgresOptions;
|
||||||
@@ -34,6 +34,24 @@ use snafu::ResultExt;
|
|||||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||||
use crate::toml_loader;
|
use crate::toml_loader;
|
||||||
|
|
||||||
|
pub struct Instance {
|
||||||
|
frontend: FeInstance,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Instance {
|
||||||
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
|
self.frontend
|
||||||
|
.start()
|
||||||
|
.await
|
||||||
|
.context(error::StartFrontendSnafu)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop(&self) -> Result<()> {
|
||||||
|
// TODO: handle frontend shutdown
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
@@ -41,8 +59,8 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn run(self) -> Result<()> {
|
pub async fn build(self) -> Result<Instance> {
|
||||||
self.subcmd.run().await
|
self.subcmd.build().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,9 +70,9 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.run().await,
|
SubCommand::Start(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -90,16 +108,20 @@ pub struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||||
let opts: FrontendOptions = self.try_into()?;
|
let opts: FrontendOptions = self.try_into()?;
|
||||||
|
|
||||||
let instance = Instance::try_new_distributed(&opts, plugins.clone())
|
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||||
.await
|
.await
|
||||||
.context(error::StartFrontendSnafu)?;
|
.context(error::StartFrontendSnafu)?;
|
||||||
|
|
||||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
instance
|
||||||
frontend.start().await.context(error::StartFrontendSnafu)
|
.build_servers(&opts, plugins)
|
||||||
|
.await
|
||||||
|
.context(error::StartFrontendSnafu)?;
|
||||||
|
|
||||||
|
Ok(Instance { frontend: instance })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,13 +14,32 @@
|
|||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::{info, logging, warn};
|
use common_telemetry::{info, logging, warn};
|
||||||
use meta_srv::bootstrap;
|
use meta_srv::bootstrap::MetaSrvInstance;
|
||||||
use meta_srv::metasrv::MetaSrvOptions;
|
use meta_srv::metasrv::MetaSrvOptions;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::{error, toml_loader};
|
use crate::{error, toml_loader};
|
||||||
|
|
||||||
|
pub struct Instance {
|
||||||
|
instance: MetaSrvInstance,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Instance {
|
||||||
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
|
self.instance
|
||||||
|
.start()
|
||||||
|
.await
|
||||||
|
.context(error::StartMetaServerSnafu)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop(&self) -> Result<()> {
|
||||||
|
// TODO: handle metasrv shutdown
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
@@ -28,8 +47,8 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn run(self) -> Result<()> {
|
pub async fn build(self) -> Result<Instance> {
|
||||||
self.subcmd.run().await
|
self.subcmd.build().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,9 +58,9 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.run().await,
|
SubCommand::Start(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -63,16 +82,17 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
logging::info!("MetaSrv start command: {:#?}", self);
|
logging::info!("MetaSrv start command: {:#?}", self);
|
||||||
|
|
||||||
let opts: MetaSrvOptions = self.try_into()?;
|
let opts: MetaSrvOptions = self.try_into()?;
|
||||||
|
|
||||||
logging::info!("MetaSrv options: {:#?}", opts);
|
logging::info!("MetaSrv options: {:#?}", opts);
|
||||||
|
let instance = MetaSrvInstance::new(opts)
|
||||||
bootstrap::bootstrap_meta_srv(opts)
|
|
||||||
.await
|
.await
|
||||||
.context(error::StartMetaServerSnafu)
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|
||||||
|
Ok(Instance { instance })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ use datanode::datanode::{
|
|||||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||||
};
|
};
|
||||||
use datanode::instance::InstanceRef;
|
use datanode::instance::InstanceRef;
|
||||||
use frontend::frontend::{Frontend, FrontendOptions};
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::grpc::GrpcOptions;
|
use frontend::grpc::GrpcOptions;
|
||||||
use frontend::influxdb::InfluxdbOptions;
|
use frontend::influxdb::InfluxdbOptions;
|
||||||
use frontend::instance::Instance as FeInstance;
|
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||||
use frontend::mysql::MysqlOptions;
|
use frontend::mysql::MysqlOptions;
|
||||||
use frontend::opentsdb::OpentsdbOptions;
|
use frontend::opentsdb::OpentsdbOptions;
|
||||||
use frontend::postgres::PostgresOptions;
|
use frontend::postgres::PostgresOptions;
|
||||||
@@ -47,8 +47,8 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn run(self) -> Result<()> {
|
pub async fn build(self) -> Result<Instance> {
|
||||||
self.subcmd.run().await
|
self.subcmd.build().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,9 +58,9 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.run().await,
|
SubCommand::Start(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -133,6 +133,30 @@ impl StandaloneOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct Instance {
|
||||||
|
datanode: Datanode,
|
||||||
|
frontend: FeInstance,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Instance {
|
||||||
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
|
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||||
|
self.datanode
|
||||||
|
.start_instance()
|
||||||
|
.await
|
||||||
|
.context(StartDatanodeSnafu)?;
|
||||||
|
info!("Datanode instance started");
|
||||||
|
|
||||||
|
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop(&self) -> Result<()> {
|
||||||
|
// TODO: handle standalone shutdown
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Parser)]
|
#[derive(Debug, Parser)]
|
||||||
struct StartCommand {
|
struct StartCommand {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
@@ -164,7 +188,7 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
async fn run(self) -> Result<()> {
|
async fn build(self) -> Result<Instance> {
|
||||||
let enable_memory_catalog = self.enable_memory_catalog;
|
let enable_memory_catalog = self.enable_memory_catalog;
|
||||||
let config_file = self.config_file.clone();
|
let config_file = self.config_file.clone();
|
||||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||||
@@ -184,33 +208,30 @@ impl StartCommand {
|
|||||||
fe_opts, dn_opts
|
fe_opts, dn_opts
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut datanode = Datanode::new(dn_opts.clone())
|
let datanode = Datanode::new(dn_opts.clone())
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
|
|
||||||
|
|
||||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||||
datanode
|
|
||||||
.start_instance()
|
frontend
|
||||||
|
.build_servers(&fe_opts, plugins)
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
info!("Datanode instance started");
|
|
||||||
|
|
||||||
frontend.start().await.context(StartFrontendSnafu)?;
|
Ok(Instance { datanode, frontend })
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build frontend instance in standalone mode
|
/// Build frontend instance in standalone mode
|
||||||
async fn build_frontend(
|
async fn build_frontend(
|
||||||
fe_opts: FrontendOptions,
|
|
||||||
plugins: Arc<Plugins>,
|
plugins: Arc<Plugins>,
|
||||||
datanode_instance: InstanceRef,
|
datanode_instance: InstanceRef,
|
||||||
) -> Result<Frontend<FeInstance>> {
|
) -> Result<FeInstance> {
|
||||||
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
||||||
frontend_instance.set_script_handler(datanode_instance);
|
frontend_instance.set_script_handler(datanode_instance);
|
||||||
frontend_instance.set_plugins(plugins.clone());
|
frontend_instance.set_plugins(plugins.clone());
|
||||||
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
|
Ok(frontend_instance)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<StartCommand> for FrontendOptions {
|
impl TryFrom<StartCommand> for FrontendOptions {
|
||||||
|
|||||||
@@ -46,6 +46,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(LFC): Un-ignore this REPL test.
|
||||||
|
// Ignore this REPL test because some logical plans like create database are not supported yet in Datanode.
|
||||||
|
#[ignore]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_repl() {
|
fn test_repl() {
|
||||||
let data_dir = create_temp_dir("data");
|
let data_dir = create_temp_dir("data");
|
||||||
|
|||||||
@@ -12,17 +12,10 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
mod from_unixtime;
|
|
||||||
|
|
||||||
use from_unixtime::FromUnixtimeFunction;
|
|
||||||
|
|
||||||
use crate::scalars::function_registry::FunctionRegistry;
|
use crate::scalars::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
pub(crate) struct TimestampFunction;
|
pub(crate) struct TimestampFunction;
|
||||||
|
|
||||||
impl TimestampFunction {
|
impl TimestampFunction {
|
||||||
pub fn register(registry: &FunctionRegistry) {
|
pub fn register(_registry: &FunctionRegistry) {}
|
||||||
registry.register(Arc::new(FromUnixtimeFunction::default()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,133 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//! from_unixtime function.
|
|
||||||
/// TODO(dennis) It can be removed after we upgrade datafusion.
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_query::error::{
|
|
||||||
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
|
||||||
use common_query::prelude::{Signature, Volatility};
|
|
||||||
use datatypes::arrow::compute;
|
|
||||||
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
|
|
||||||
use datatypes::data_type::DataType;
|
|
||||||
use datatypes::prelude::ConcreteDataType;
|
|
||||||
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
|
|
||||||
use snafu::ResultExt;
|
|
||||||
|
|
||||||
use crate::scalars::function::{Function, FunctionContext};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
|
||||||
pub struct FromUnixtimeFunction;
|
|
||||||
|
|
||||||
const NAME: &str = "from_unixtime";
|
|
||||||
|
|
||||||
impl Function for FromUnixtimeFunction {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
NAME
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
|
||||||
Ok(ConcreteDataType::timestamp_millisecond_datatype())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature(&self) -> Signature {
|
|
||||||
Signature::uniform(
|
|
||||||
1,
|
|
||||||
vec![ConcreteDataType::int64_datatype()],
|
|
||||||
Volatility::Immutable,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
match columns[0].data_type() {
|
|
||||||
ConcreteDataType::Int64(_) => {
|
|
||||||
let array = columns[0].to_arrow_array();
|
|
||||||
// Our timestamp vector's time unit is millisecond
|
|
||||||
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
|
|
||||||
.context(ArrowComputeSnafu)?;
|
|
||||||
|
|
||||||
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
|
|
||||||
Ok(Arc::new(
|
|
||||||
TimestampMillisecondVector::try_from_arrow_array(
|
|
||||||
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
|
|
||||||
typ: ArrowDatatype::Int64,
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
.context(IntoVectorSnafu {
|
|
||||||
data_type: arrow_datatype.clone(),
|
|
||||||
})?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
_ => UnsupportedInputDataTypeSnafu {
|
|
||||||
function: NAME,
|
|
||||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
|
||||||
}
|
|
||||||
.fail(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for FromUnixtimeFunction {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "FROM_UNIXTIME")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use common_query::prelude::TypeSignature;
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use datatypes::vectors::Int64Vector;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_from_unixtime() {
|
|
||||||
let f = FromUnixtimeFunction::default();
|
|
||||||
assert_eq!("from_unixtime", f.name());
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
f.return_type(&[]).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(matches!(f.signature(),
|
|
||||||
Signature {
|
|
||||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
|
||||||
volatility: Volatility::Immutable
|
|
||||||
} if valid_types == vec![ConcreteDataType::int64_datatype()]
|
|
||||||
));
|
|
||||||
|
|
||||||
let times = vec![Some(1494410783), None, Some(1494410983)];
|
|
||||||
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
|
|
||||||
|
|
||||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
|
||||||
assert_eq!(3, vector.len());
|
|
||||||
for (i, t) in times.iter().enumerate() {
|
|
||||||
let v = vector.get(i);
|
|
||||||
if i == 1 {
|
|
||||||
assert_eq!(Value::Null, v);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
match v {
|
|
||||||
Value::Timestamp(ts) => {
|
|
||||||
assert_eq!(ts.value(), t.unwrap() * 1000);
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -15,6 +15,3 @@ tokio.workspace = true
|
|||||||
[dependencies.tikv-jemalloc-sys]
|
[dependencies.tikv-jemalloc-sys]
|
||||||
version = "0.5"
|
version = "0.5"
|
||||||
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]
|
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]
|
||||||
|
|
||||||
[profile.release]
|
|
||||||
debug = true
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ object-store = { path = "../../object-store" }
|
|||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
smallvec = "1"
|
smallvec = "1"
|
||||||
|
backon = "0.4.0"
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
|
|||||||
@@ -97,6 +97,16 @@ pub enum Error {
|
|||||||
source: Arc<Error>,
|
source: Arc<Error>,
|
||||||
backtrace: Backtrace,
|
backtrace: Backtrace,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display(
|
||||||
|
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
|
||||||
|
procedure_id,
|
||||||
|
source
|
||||||
|
))]
|
||||||
|
RetryTimesExceeded {
|
||||||
|
source: Arc<Error>,
|
||||||
|
procedure_id: ProcedureId,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -111,6 +121,7 @@ impl ErrorExt for Error {
|
|||||||
| Error::ListState { .. }
|
| Error::ListState { .. }
|
||||||
| Error::ReadState { .. }
|
| Error::ReadState { .. }
|
||||||
| Error::FromJson { .. }
|
| Error::FromJson { .. }
|
||||||
|
| Error::RetryTimesExceeded { .. }
|
||||||
| Error::RetryLater { .. }
|
| Error::RetryLater { .. }
|
||||||
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
||||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||||
|
|||||||
@@ -17,8 +17,10 @@ mod runner;
|
|||||||
|
|
||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use backon::ExponentialBuilder;
|
||||||
use common_telemetry::logging;
|
use common_telemetry::logging;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
@@ -291,12 +293,16 @@ impl ManagerContext {
|
|||||||
pub struct ManagerConfig {
|
pub struct ManagerConfig {
|
||||||
/// Object store
|
/// Object store
|
||||||
pub object_store: ObjectStore,
|
pub object_store: ObjectStore,
|
||||||
|
pub max_retry_times: usize,
|
||||||
|
pub retry_delay: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A [ProcedureManager] that maintains procedure states locally.
|
/// A [ProcedureManager] that maintains procedure states locally.
|
||||||
pub struct LocalManager {
|
pub struct LocalManager {
|
||||||
manager_ctx: Arc<ManagerContext>,
|
manager_ctx: Arc<ManagerContext>,
|
||||||
state_store: StateStoreRef,
|
state_store: StateStoreRef,
|
||||||
|
max_retry_times: usize,
|
||||||
|
retry_delay: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LocalManager {
|
impl LocalManager {
|
||||||
@@ -305,6 +311,8 @@ impl LocalManager {
|
|||||||
LocalManager {
|
LocalManager {
|
||||||
manager_ctx: Arc::new(ManagerContext::new()),
|
manager_ctx: Arc::new(ManagerContext::new()),
|
||||||
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
|
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
|
||||||
|
max_retry_times: config.max_retry_times,
|
||||||
|
retry_delay: config.retry_delay,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,7 +329,11 @@ impl LocalManager {
|
|||||||
procedure,
|
procedure,
|
||||||
manager_ctx: self.manager_ctx.clone(),
|
manager_ctx: self.manager_ctx.clone(),
|
||||||
step,
|
step,
|
||||||
|
exponential_builder: ExponentialBuilder::default()
|
||||||
|
.with_min_delay(self.retry_delay)
|
||||||
|
.with_max_times(self.max_retry_times),
|
||||||
store: ProcedureStore::new(self.state_store.clone()),
|
store: ProcedureStore::new(self.state_store.clone()),
|
||||||
|
rolling_back: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
let watcher = meta.state_receiver.clone();
|
let watcher = meta.state_receiver.clone();
|
||||||
@@ -543,6 +555,8 @@ mod tests {
|
|||||||
let dir = create_temp_dir("register");
|
let dir = create_temp_dir("register");
|
||||||
let config = ManagerConfig {
|
let config = ManagerConfig {
|
||||||
object_store: test_util::new_object_store(&dir),
|
object_store: test_util::new_object_store(&dir),
|
||||||
|
max_retry_times: 3,
|
||||||
|
retry_delay: Duration::from_millis(500),
|
||||||
};
|
};
|
||||||
let manager = LocalManager::new(config);
|
let manager = LocalManager::new(config);
|
||||||
|
|
||||||
@@ -562,6 +576,8 @@ mod tests {
|
|||||||
let object_store = test_util::new_object_store(&dir);
|
let object_store = test_util::new_object_store(&dir);
|
||||||
let config = ManagerConfig {
|
let config = ManagerConfig {
|
||||||
object_store: object_store.clone(),
|
object_store: object_store.clone(),
|
||||||
|
max_retry_times: 3,
|
||||||
|
retry_delay: Duration::from_millis(500),
|
||||||
};
|
};
|
||||||
let manager = LocalManager::new(config);
|
let manager = LocalManager::new(config);
|
||||||
|
|
||||||
@@ -606,6 +622,8 @@ mod tests {
|
|||||||
let dir = create_temp_dir("submit");
|
let dir = create_temp_dir("submit");
|
||||||
let config = ManagerConfig {
|
let config = ManagerConfig {
|
||||||
object_store: test_util::new_object_store(&dir),
|
object_store: test_util::new_object_store(&dir),
|
||||||
|
max_retry_times: 3,
|
||||||
|
retry_delay: Duration::from_millis(500),
|
||||||
};
|
};
|
||||||
let manager = LocalManager::new(config);
|
let manager = LocalManager::new(config);
|
||||||
|
|
||||||
@@ -652,6 +670,8 @@ mod tests {
|
|||||||
let dir = create_temp_dir("on_err");
|
let dir = create_temp_dir("on_err");
|
||||||
let config = ManagerConfig {
|
let config = ManagerConfig {
|
||||||
object_store: test_util::new_object_store(&dir),
|
object_store: test_util::new_object_store(&dir),
|
||||||
|
max_retry_times: 3,
|
||||||
|
retry_delay: Duration::from_millis(500),
|
||||||
};
|
};
|
||||||
let manager = LocalManager::new(config);
|
let manager = LocalManager::new(config);
|
||||||
|
|
||||||
|
|||||||
@@ -15,15 +15,15 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||||
use common_telemetry::logging;
|
use common_telemetry::logging;
|
||||||
use tokio::time;
|
use tokio::time;
|
||||||
|
|
||||||
use crate::error::{ProcedurePanicSnafu, Result};
|
use crate::error::{ProcedurePanicSnafu, Result};
|
||||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||||
use crate::store::ProcedureStore;
|
use crate::store::ProcedureStore;
|
||||||
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
|
use crate::ProcedureState::Retrying;
|
||||||
|
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
|
||||||
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum ExecResult {
|
enum ExecResult {
|
||||||
@@ -108,7 +108,9 @@ pub(crate) struct Runner {
|
|||||||
pub(crate) procedure: BoxedProcedure,
|
pub(crate) procedure: BoxedProcedure,
|
||||||
pub(crate) manager_ctx: Arc<ManagerContext>,
|
pub(crate) manager_ctx: Arc<ManagerContext>,
|
||||||
pub(crate) step: u32,
|
pub(crate) step: u32,
|
||||||
|
pub(crate) exponential_builder: ExponentialBuilder,
|
||||||
pub(crate) store: ProcedureStore,
|
pub(crate) store: ProcedureStore,
|
||||||
|
pub(crate) rolling_back: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Runner {
|
impl Runner {
|
||||||
@@ -164,18 +166,56 @@ impl Runner {
|
|||||||
provider: self.manager_ctx.clone(),
|
provider: self.manager_ctx.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
self.rolling_back = false;
|
||||||
|
self.execute_once_with_retry(&ctx).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute_once_with_retry(&mut self, ctx: &Context) {
|
||||||
|
let mut retry = self.exponential_builder.build();
|
||||||
|
let mut retry_times = 0;
|
||||||
loop {
|
loop {
|
||||||
match self.execute_once(&ctx).await {
|
match self.execute_once(ctx).await {
|
||||||
ExecResult::Continue => (),
|
|
||||||
ExecResult::Done | ExecResult::Failed => return,
|
ExecResult::Done | ExecResult::Failed => return,
|
||||||
|
ExecResult::Continue => (),
|
||||||
ExecResult::RetryLater => {
|
ExecResult::RetryLater => {
|
||||||
self.wait_on_err().await;
|
retry_times += 1;
|
||||||
|
if let Some(d) = retry.next() {
|
||||||
|
self.wait_on_err(d, retry_times).await;
|
||||||
|
} else {
|
||||||
|
assert!(self.meta.state().is_retrying());
|
||||||
|
if let Retrying { error } = self.meta.state() {
|
||||||
|
self.meta.set_state(ProcedureState::failed(Arc::new(
|
||||||
|
Error::RetryTimesExceeded {
|
||||||
|
source: error,
|
||||||
|
procedure_id: self.meta.id,
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn rollback(&mut self, error: Arc<Error>) -> ExecResult {
|
||||||
|
if let Err(e) = self.rollback_procedure().await {
|
||||||
|
self.rolling_back = true;
|
||||||
|
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||||
|
return ExecResult::RetryLater;
|
||||||
|
}
|
||||||
|
self.meta.set_state(ProcedureState::failed(error));
|
||||||
|
ExecResult::Failed
|
||||||
|
}
|
||||||
|
|
||||||
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
|
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
|
||||||
|
// if rolling_back, there is no need to execute again.
|
||||||
|
if self.rolling_back {
|
||||||
|
// We can definitely get the previous error here.
|
||||||
|
let state = self.meta.state();
|
||||||
|
let err = state.error().unwrap();
|
||||||
|
return self.rollback(err.clone()).await;
|
||||||
|
}
|
||||||
match self.procedure.execute(ctx).await {
|
match self.procedure.execute(ctx).await {
|
||||||
Ok(status) => {
|
Ok(status) => {
|
||||||
logging::debug!(
|
logging::debug!(
|
||||||
@@ -186,8 +226,11 @@ impl Runner {
|
|||||||
status.need_persist(),
|
status.need_persist(),
|
||||||
);
|
);
|
||||||
|
|
||||||
if status.need_persist() && self.persist_procedure().await.is_err() {
|
if status.need_persist() {
|
||||||
return ExecResult::RetryLater;
|
if let Err(err) = self.persist_procedure().await {
|
||||||
|
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
|
||||||
|
return ExecResult::RetryLater;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match status {
|
match status {
|
||||||
@@ -196,7 +239,8 @@ impl Runner {
|
|||||||
self.on_suspended(subprocedures).await;
|
self.on_suspended(subprocedures).await;
|
||||||
}
|
}
|
||||||
Status::Done => {
|
Status::Done => {
|
||||||
if self.commit_procedure().await.is_err() {
|
if let Err(e) = self.commit_procedure().await {
|
||||||
|
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||||
return ExecResult::RetryLater;
|
return ExecResult::RetryLater;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -217,17 +261,12 @@ impl Runner {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if e.is_retry_later() {
|
if e.is_retry_later() {
|
||||||
|
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||||
return ExecResult::RetryLater;
|
return ExecResult::RetryLater;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
|
|
||||||
|
|
||||||
// Write rollback key so we can skip this procedure while recovering procedures.
|
// Write rollback key so we can skip this procedure while recovering procedures.
|
||||||
if self.rollback_procedure().await.is_err() {
|
self.rollback(Arc::new(e)).await
|
||||||
return ExecResult::RetryLater;
|
|
||||||
}
|
|
||||||
|
|
||||||
ExecResult::Failed
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -261,7 +300,9 @@ impl Runner {
|
|||||||
procedure,
|
procedure,
|
||||||
manager_ctx: self.manager_ctx.clone(),
|
manager_ctx: self.manager_ctx.clone(),
|
||||||
step,
|
step,
|
||||||
|
exponential_builder: self.exponential_builder.clone(),
|
||||||
store: self.store.clone(),
|
store: self.store.clone(),
|
||||||
|
rolling_back: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Insert the procedure. We already check the procedure existence before inserting
|
// Insert the procedure. We already check the procedure existence before inserting
|
||||||
@@ -285,8 +326,16 @@ impl Runner {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_on_err(&self) {
|
/// Extend the retry time to wait for the next retry.
|
||||||
time::sleep(ERR_WAIT_DURATION).await;
|
async fn wait_on_err(&self, d: Duration, i: u64) {
|
||||||
|
logging::info!(
|
||||||
|
"Procedure {}-{} retry for the {} times after {} millis",
|
||||||
|
self.procedure.type_name(),
|
||||||
|
self.meta.id,
|
||||||
|
i,
|
||||||
|
d.as_millis(),
|
||||||
|
);
|
||||||
|
time::sleep(d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
|
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
|
||||||
@@ -416,7 +465,9 @@ mod tests {
|
|||||||
procedure,
|
procedure,
|
||||||
manager_ctx: Arc::new(ManagerContext::new()),
|
manager_ctx: Arc::new(ManagerContext::new()),
|
||||||
step: 0,
|
step: 0,
|
||||||
|
exponential_builder: ExponentialBuilder::default(),
|
||||||
store,
|
store,
|
||||||
|
rolling_back: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -744,7 +795,7 @@ mod tests {
|
|||||||
|
|
||||||
let res = runner.execute_once(&ctx).await;
|
let res = runner.execute_once(&ctx).await;
|
||||||
assert!(res.is_retry_later(), "{res:?}");
|
assert!(res.is_retry_later(), "{res:?}");
|
||||||
assert!(meta.state().is_running());
|
assert!(meta.state().is_retrying());
|
||||||
|
|
||||||
let res = runner.execute_once(&ctx).await;
|
let res = runner.execute_once(&ctx).await;
|
||||||
assert!(res.is_done(), "{res:?}");
|
assert!(res.is_done(), "{res:?}");
|
||||||
@@ -752,6 +803,36 @@ mod tests {
|
|||||||
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
|
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_execute_exceed_max_retry_later() {
|
||||||
|
let exec_fn =
|
||||||
|
|_| async { Err(Error::retry_later(MockError::new(StatusCode::Unexpected))) }.boxed();
|
||||||
|
|
||||||
|
let exceed_max_retry_later = ProcedureAdapter {
|
||||||
|
data: "exceed_max_retry_later".to_string(),
|
||||||
|
lock_key: LockKey::single("catalog.schema.table"),
|
||||||
|
exec_fn,
|
||||||
|
};
|
||||||
|
|
||||||
|
let dir = create_temp_dir("exceed_max_retry_later");
|
||||||
|
let meta = exceed_max_retry_later.new_meta(ROOT_ID);
|
||||||
|
let object_store = test_util::new_object_store(&dir);
|
||||||
|
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||||
|
let mut runner = new_runner(
|
||||||
|
meta.clone(),
|
||||||
|
Box::new(exceed_max_retry_later),
|
||||||
|
procedure_store,
|
||||||
|
);
|
||||||
|
runner.exponential_builder = ExponentialBuilder::default()
|
||||||
|
.with_min_delay(Duration::from_millis(1))
|
||||||
|
.with_max_times(3);
|
||||||
|
|
||||||
|
// Run the runner and execute the procedure.
|
||||||
|
runner.execute_procedure_in_loop().await;
|
||||||
|
let err = meta.state().error().unwrap().to_string();
|
||||||
|
assert!(err.contains("Procedure retry exceeded max times"));
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_child_error() {
|
async fn test_child_error() {
|
||||||
let mut times = 0;
|
let mut times = 0;
|
||||||
@@ -819,7 +900,7 @@ mod tests {
|
|||||||
// Replace the manager ctx.
|
// Replace the manager ctx.
|
||||||
runner.manager_ctx = manager_ctx;
|
runner.manager_ctx = manager_ctx;
|
||||||
|
|
||||||
// Run the runer and execute the procedure.
|
// Run the runner and execute the procedure.
|
||||||
runner.run().await;
|
runner.run().await;
|
||||||
let err = meta.state().error().unwrap().to_string();
|
let err = meta.state().error().unwrap().to_string();
|
||||||
assert!(err.contains("subprocedure failed"), "{err}");
|
assert!(err.contains("subprocedure failed"), "{err}");
|
||||||
|
|||||||
@@ -206,6 +206,8 @@ pub enum ProcedureState {
|
|||||||
Running,
|
Running,
|
||||||
/// The procedure is finished.
|
/// The procedure is finished.
|
||||||
Done,
|
Done,
|
||||||
|
/// The procedure is failed and can be retried.
|
||||||
|
Retrying { error: Arc<Error> },
|
||||||
/// The procedure is failed and cannot proceed anymore.
|
/// The procedure is failed and cannot proceed anymore.
|
||||||
Failed { error: Arc<Error> },
|
Failed { error: Arc<Error> },
|
||||||
}
|
}
|
||||||
@@ -216,6 +218,11 @@ impl ProcedureState {
|
|||||||
ProcedureState::Failed { error }
|
ProcedureState::Failed { error }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a [ProcedureState] with retrying state.
|
||||||
|
pub fn retrying(error: Arc<Error>) -> ProcedureState {
|
||||||
|
ProcedureState::Retrying { error }
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns true if the procedure state is running.
|
/// Returns true if the procedure state is running.
|
||||||
pub fn is_running(&self) -> bool {
|
pub fn is_running(&self) -> bool {
|
||||||
matches!(self, ProcedureState::Running)
|
matches!(self, ProcedureState::Running)
|
||||||
@@ -231,10 +238,16 @@ impl ProcedureState {
|
|||||||
matches!(self, ProcedureState::Failed { .. })
|
matches!(self, ProcedureState::Failed { .. })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if the procedure state is retrying.
|
||||||
|
pub fn is_retrying(&self) -> bool {
|
||||||
|
matches!(self, ProcedureState::Retrying { .. })
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the error.
|
/// Returns the error.
|
||||||
pub fn error(&self) -> Option<&Arc<Error>> {
|
pub fn error(&self) -> Option<&Arc<Error>> {
|
||||||
match self {
|
match self {
|
||||||
ProcedureState::Failed { error } => Some(error),
|
ProcedureState::Failed { error } => Some(error),
|
||||||
|
ProcedureState::Retrying { error } => Some(error),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,6 +33,9 @@ pub async fn wait(watcher: &mut Watcher) -> Result<()> {
|
|||||||
ProcedureState::Failed { error } => {
|
ProcedureState::Failed { error } => {
|
||||||
return Err(error.clone()).context(ProcedureExecSnafu);
|
return Err(error.clone()).context(ProcedureExecSnafu);
|
||||||
}
|
}
|
||||||
|
ProcedureState::Retrying { error } => {
|
||||||
|
return Err(error.clone()).context(ProcedureExecSnafu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ tonic.workspace = true
|
|||||||
tower = { version = "0.4", features = ["full"] }
|
tower = { version = "0.4", features = ["full"] }
|
||||||
tower-http = { version = "0.3", features = ["full"] }
|
tower-http = { version = "0.3", features = ["full"] }
|
||||||
url = "2.3.1"
|
url = "2.3.1"
|
||||||
|
uuid.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
|
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ impl Default for ObjectStoreConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(default)]
|
||||||
pub struct WalConfig {
|
pub struct WalConfig {
|
||||||
// wal directory
|
// wal directory
|
||||||
pub dir: String,
|
pub dir: String,
|
||||||
@@ -108,6 +109,7 @@ impl Default for WalConfig {
|
|||||||
|
|
||||||
/// Options for table compaction
|
/// Options for table compaction
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||||
|
#[serde(default)]
|
||||||
pub struct CompactionConfig {
|
pub struct CompactionConfig {
|
||||||
/// Max task number that can concurrently run.
|
/// Max task number that can concurrently run.
|
||||||
pub max_inflight_tasks: usize,
|
pub max_inflight_tasks: usize,
|
||||||
@@ -149,11 +151,22 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
|
|||||||
pub struct ProcedureConfig {
|
pub struct ProcedureConfig {
|
||||||
/// Storage config for procedure manager.
|
/// Storage config for procedure manager.
|
||||||
pub store: ObjectStoreConfig,
|
pub store: ObjectStoreConfig,
|
||||||
|
/// Max retry times of procedure.
|
||||||
|
pub max_retry_times: usize,
|
||||||
|
/// Initial retry delay of procedures, increases exponentially.
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub retry_delay: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ProcedureConfig {
|
impl Default for ProcedureConfig {
|
||||||
fn default() -> ProcedureConfig {
|
fn default() -> ProcedureConfig {
|
||||||
ProcedureConfig::from_file_path("/tmp/greptimedb/procedure/".to_string())
|
ProcedureConfig {
|
||||||
|
store: ObjectStoreConfig::File(FileConfig {
|
||||||
|
data_dir: "/tmp/greptimedb/procedure/".to_string(),
|
||||||
|
}),
|
||||||
|
max_retry_times: 3,
|
||||||
|
retry_delay: Duration::from_millis(500),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,6 +174,7 @@ impl ProcedureConfig {
|
|||||||
pub fn from_file_path(path: String) -> ProcedureConfig {
|
pub fn from_file_path(path: String) -> ProcedureConfig {
|
||||||
ProcedureConfig {
|
ProcedureConfig {
|
||||||
store: ObjectStoreConfig::File(FileConfig { data_dir: path }),
|
store: ObjectStoreConfig::File(FileConfig { data_dir: path }),
|
||||||
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -240,6 +254,20 @@ impl Datanode {
|
|||||||
pub fn get_instance(&self) -> InstanceRef {
|
pub fn get_instance(&self) -> InstanceRef {
|
||||||
self.instance.clone()
|
self.instance.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn shutdown_instance(&self) -> Result<()> {
|
||||||
|
self.instance.shutdown().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn shutdown_services(&self) -> Result<()> {
|
||||||
|
self.services.shutdown().await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shutdown(&self) -> Result<()> {
|
||||||
|
// We must shutdown services first
|
||||||
|
self.shutdown_services().await?;
|
||||||
|
self.shutdown_instance().await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -35,6 +35,24 @@ pub enum Error {
|
|||||||
source: query::error::Error,
|
source: query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||||
|
PlanStatement {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to execute statement, source: {}", source))]
|
||||||
|
ExecuteStatement {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to execute logical plan, source: {}", source))]
|
||||||
|
ExecuteLogicalPlan {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to decode logical plan, source: {}", source))]
|
#[snafu(display("Failed to decode logical plan, source: {}", source))]
|
||||||
DecodeLogicalPlan {
|
DecodeLogicalPlan {
|
||||||
#[snafu(backtrace)]
|
#[snafu(backtrace)]
|
||||||
@@ -151,6 +169,13 @@ pub enum Error {
|
|||||||
source: TableError,
|
source: TableError,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
|
||||||
|
FlushTable {
|
||||||
|
table_name: String,
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: TableError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start server, source: {}", source))]
|
#[snafu(display("Failed to start server, source: {}", source))]
|
||||||
StartServer {
|
StartServer {
|
||||||
#[snafu(backtrace)]
|
#[snafu(backtrace)]
|
||||||
@@ -482,6 +507,24 @@ pub enum Error {
|
|||||||
#[snafu(backtrace)]
|
#[snafu(backtrace)]
|
||||||
source: common_procedure::error::Error,
|
source: common_procedure::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to close table engine, source: {}", source))]
|
||||||
|
CloseTableEngine {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to shutdown server, source: {}", source))]
|
||||||
|
ShutdownServer {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: servers::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to shutdown instance, source: {}", source))]
|
||||||
|
ShutdownInstance {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -490,7 +533,12 @@ impl ErrorExt for Error {
|
|||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
use Error::*;
|
use Error::*;
|
||||||
match self {
|
match self {
|
||||||
ExecuteSql { source } | DescribeStatement { source } => source.status_code(),
|
ExecuteSql { source }
|
||||||
|
| PlanStatement { source }
|
||||||
|
| ExecuteStatement { source }
|
||||||
|
| ExecuteLogicalPlan { source }
|
||||||
|
| DescribeStatement { source } => source.status_code(),
|
||||||
|
|
||||||
DecodeLogicalPlan { source } => source.status_code(),
|
DecodeLogicalPlan { source } => source.status_code(),
|
||||||
NewCatalog { source } | RegisterSchema { source } => source.status_code(),
|
NewCatalog { source } | RegisterSchema { source } => source.status_code(),
|
||||||
FindTable { source, .. } => source.status_code(),
|
FindTable { source, .. } => source.status_code(),
|
||||||
@@ -498,6 +546,7 @@ impl ErrorExt for Error {
|
|||||||
source.status_code()
|
source.status_code()
|
||||||
}
|
}
|
||||||
DropTable { source, .. } => source.status_code(),
|
DropTable { source, .. } => source.status_code(),
|
||||||
|
FlushTable { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Insert { source, .. } => source.status_code(),
|
Insert { source, .. } => source.status_code(),
|
||||||
Delete { source, .. } => source.status_code(),
|
Delete { source, .. } => source.status_code(),
|
||||||
@@ -550,7 +599,10 @@ impl ErrorExt for Error {
|
|||||||
| BuildParquetRecordBatchStream { .. }
|
| BuildParquetRecordBatchStream { .. }
|
||||||
| InvalidSchema { .. }
|
| InvalidSchema { .. }
|
||||||
| ParseDataTypes { .. }
|
| ParseDataTypes { .. }
|
||||||
| IncorrectInternalState { .. } => StatusCode::Internal,
|
| IncorrectInternalState { .. }
|
||||||
|
| ShutdownServer { .. }
|
||||||
|
| ShutdownInstance { .. }
|
||||||
|
| CloseTableEngine { .. } => StatusCode::Internal,
|
||||||
|
|
||||||
BuildBackend { .. }
|
BuildBackend { .. }
|
||||||
| InitBackend { .. }
|
| InitBackend { .. }
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
||||||
use catalog::{region_number, CatalogManagerRef};
|
use catalog::{region_stats, CatalogManagerRef};
|
||||||
use common_telemetry::{error, info, warn};
|
use common_telemetry::{error, info, warn};
|
||||||
use meta_client::client::{HeartbeatSender, MetaClient};
|
use meta_client::client::{HeartbeatSender, MetaClient};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -106,11 +106,11 @@ impl HeartbeatTask {
|
|||||||
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
||||||
common_runtime::spawn_bg(async move {
|
common_runtime::spawn_bg(async move {
|
||||||
while running.load(Ordering::Acquire) {
|
while running.load(Ordering::Acquire) {
|
||||||
let region_num = match region_number(&catalog_manager_clone).await {
|
let (region_num, region_stats) = match region_stats(&catalog_manager_clone).await {
|
||||||
Ok(region_num) => region_num as i64,
|
Ok(region_stats) => (region_stats.len() as i64, region_stats),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("failed to get region number, err: {e:?}");
|
error!("failed to get region status, err: {e:?}");
|
||||||
-1
|
(-1, vec![])
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -123,6 +123,7 @@ impl HeartbeatTask {
|
|||||||
region_num,
|
region_num,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
|
region_stats,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -144,6 +145,18 @@ impl HeartbeatTask {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn close(&self) -> Result<()> {
|
||||||
|
let running = self.running.clone();
|
||||||
|
if running
|
||||||
|
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
warn!("Call close heartbeat task multiple times");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolves hostname:port address for meta registration
|
/// Resolves hostname:port address for meta registration
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ use catalog::remote::MetaKvBackend;
|
|||||||
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
|
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||||
|
use common_error::prelude::BoxedError;
|
||||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
@@ -51,7 +52,7 @@ use crate::datanode::{
|
|||||||
};
|
};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
|
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
|
||||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result,
|
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result, ShutdownInstanceSnafu,
|
||||||
};
|
};
|
||||||
use crate::heartbeat::HeartbeatTask;
|
use crate::heartbeat::HeartbeatTask;
|
||||||
use crate::script::ScriptExecutor;
|
use crate::script::ScriptExecutor;
|
||||||
@@ -77,9 +78,6 @@ pub type InstanceRef = Arc<Instance>;
|
|||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
|
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
|
||||||
let object_store = new_object_store(&opts.storage).await?;
|
|
||||||
let logstore = Arc::new(create_log_store(&opts.wal).await?);
|
|
||||||
|
|
||||||
let meta_client = match opts.mode {
|
let meta_client = match opts.mode {
|
||||||
Mode::Standalone => None,
|
Mode::Standalone => None,
|
||||||
Mode::Distributed => {
|
Mode::Distributed => {
|
||||||
@@ -96,11 +94,22 @@ impl Instance {
|
|||||||
|
|
||||||
let compaction_scheduler = create_compaction_scheduler(opts);
|
let compaction_scheduler = create_compaction_scheduler(opts);
|
||||||
|
|
||||||
|
Self::new_with(opts, meta_client, compaction_scheduler).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn new_with(
|
||||||
|
opts: &DatanodeOptions,
|
||||||
|
meta_client: Option<Arc<MetaClient>>,
|
||||||
|
compaction_scheduler: CompactionSchedulerRef<RaftEngineLogStore>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let object_store = new_object_store(&opts.storage).await?;
|
||||||
|
let log_store = Arc::new(create_log_store(&opts.wal).await?);
|
||||||
|
|
||||||
let table_engine = Arc::new(DefaultEngine::new(
|
let table_engine = Arc::new(DefaultEngine::new(
|
||||||
TableEngineConfig::default(),
|
TableEngineConfig::default(),
|
||||||
EngineImpl::new(
|
EngineImpl::new(
|
||||||
StorageEngineConfig::from(opts),
|
StorageEngineConfig::from(opts),
|
||||||
logstore.clone(),
|
log_store.clone(),
|
||||||
object_store.clone(),
|
object_store.clone(),
|
||||||
compaction_scheduler,
|
compaction_scheduler,
|
||||||
),
|
),
|
||||||
@@ -108,7 +117,7 @@ impl Instance {
|
|||||||
));
|
));
|
||||||
|
|
||||||
// create remote catalog manager
|
// create remote catalog manager
|
||||||
let (catalog_manager, factory, table_id_provider) = match opts.mode {
|
let (catalog_manager, table_id_provider) = match opts.mode {
|
||||||
Mode::Standalone => {
|
Mode::Standalone => {
|
||||||
if opts.enable_memory_catalog {
|
if opts.enable_memory_catalog {
|
||||||
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
|
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
|
||||||
@@ -125,11 +134,8 @@ impl Instance {
|
|||||||
.await
|
.await
|
||||||
.expect("Failed to register numbers");
|
.expect("Failed to register numbers");
|
||||||
|
|
||||||
let factory = QueryEngineFactory::new(catalog.clone());
|
|
||||||
|
|
||||||
(
|
(
|
||||||
catalog.clone() as CatalogManagerRef,
|
catalog.clone() as CatalogManagerRef,
|
||||||
factory,
|
|
||||||
Some(catalog as TableIdProviderRef),
|
Some(catalog as TableIdProviderRef),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
@@ -138,11 +144,9 @@ impl Instance {
|
|||||||
.await
|
.await
|
||||||
.context(CatalogSnafu)?,
|
.context(CatalogSnafu)?,
|
||||||
);
|
);
|
||||||
let factory = QueryEngineFactory::new(catalog.clone());
|
|
||||||
|
|
||||||
(
|
(
|
||||||
catalog.clone() as CatalogManagerRef,
|
catalog.clone() as CatalogManagerRef,
|
||||||
factory,
|
|
||||||
Some(catalog as TableIdProviderRef),
|
Some(catalog as TableIdProviderRef),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -156,11 +160,11 @@ impl Instance {
|
|||||||
client: meta_client.as_ref().unwrap().clone(),
|
client: meta_client.as_ref().unwrap().clone(),
|
||||||
}),
|
}),
|
||||||
));
|
));
|
||||||
let factory = QueryEngineFactory::new(catalog.clone());
|
(catalog as CatalogManagerRef, None)
|
||||||
(catalog as CatalogManagerRef, factory, None)
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let factory = QueryEngineFactory::new(catalog_manager.clone());
|
||||||
let query_engine = factory.query_engine();
|
let query_engine = factory.query_engine();
|
||||||
let script_executor =
|
let script_executor =
|
||||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||||
@@ -220,6 +224,22 @@ impl Instance {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn shutdown(&self) -> Result<()> {
|
||||||
|
if let Some(heartbeat_task) = &self.heartbeat_task {
|
||||||
|
heartbeat_task
|
||||||
|
.close()
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(ShutdownInstanceSnafu)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.sql_handler
|
||||||
|
.close()
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(ShutdownInstanceSnafu)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sql_handler(&self) -> &SqlHandler {
|
pub fn sql_handler(&self) -> &SqlHandler {
|
||||||
&self.sql_handler
|
&self.sql_handler
|
||||||
}
|
}
|
||||||
@@ -227,6 +247,10 @@ impl Instance {
|
|||||||
pub fn catalog_manager(&self) -> &CatalogManagerRef {
|
pub fn catalog_manager(&self) -> &CatalogManagerRef {
|
||||||
&self.catalog_manager
|
&self.catalog_manager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn query_engine(&self) -> QueryEngineRef {
|
||||||
|
self.query_engine.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_compaction_scheduler<S: LogStore>(opts: &DatanodeOptions) -> CompactionSchedulerRef<S> {
|
fn create_compaction_scheduler<S: LogStore>(opts: &DatanodeOptions) -> CompactionSchedulerRef<S> {
|
||||||
@@ -436,7 +460,11 @@ pub(crate) async fn create_procedure_manager(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let object_store = new_object_store(&procedure_config.store).await?;
|
let object_store = new_object_store(&procedure_config.store).await?;
|
||||||
let manager_config = ManagerConfig { object_store };
|
let manager_config = ManagerConfig {
|
||||||
|
object_store,
|
||||||
|
max_retry_times: procedure_config.max_retry_times,
|
||||||
|
retry_delay: procedure_config.retry_delay,
|
||||||
|
};
|
||||||
|
|
||||||
Ok(Some(Arc::new(LocalManager::new(manager_config))))
|
Ok(Some(Arc::new(LocalManager::new(manager_config))))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,15 +18,19 @@ use api::v1::query_request::Query;
|
|||||||
use api::v1::{CreateDatabaseExpr, DdlRequest, InsertRequest};
|
use api::v1::{CreateDatabaseExpr, DdlRequest, InsertRequest};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||||
use query::plan::LogicalPlan;
|
use query::plan::LogicalPlan;
|
||||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
use sql::statements::statement::Statement;
|
||||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
use table::requests::CreateDatabaseRequest;
|
use table::requests::CreateDatabaseRequest;
|
||||||
|
|
||||||
use crate::error::{self, DecodeLogicalPlanSnafu, ExecuteSqlSnafu, Result};
|
use crate::error::{
|
||||||
|
self, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu, ExecuteSqlSnafu, PlanStatementSnafu,
|
||||||
|
Result,
|
||||||
|
};
|
||||||
use crate::instance::Instance;
|
use crate::instance::Instance;
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
@@ -51,17 +55,42 @@ impl Instance {
|
|||||||
self.query_engine
|
self.query_engine
|
||||||
.execute(&LogicalPlan::DfPlan(logical_plan))
|
.execute(&LogicalPlan::DfPlan(logical_plan))
|
||||||
.await
|
.await
|
||||||
.context(ExecuteSqlSnafu)
|
.context(ExecuteLogicalPlanSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_query(&self, query: Query, ctx: QueryContextRef) -> Result<Output> {
|
async fn handle_query(&self, query: Query, ctx: QueryContextRef) -> Result<Output> {
|
||||||
Ok(match query {
|
match query {
|
||||||
Query::Sql(sql) => {
|
Query::Sql(sql) => {
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).context(ExecuteSqlSnafu)?;
|
let stmt = QueryLanguageParser::parse_sql(&sql).context(ExecuteSqlSnafu)?;
|
||||||
self.execute_stmt(stmt, ctx).await?
|
match stmt {
|
||||||
|
// TODO(LFC): Remove SQL execution branch here.
|
||||||
|
// Keep this because substrait can't handle much of SQLs now.
|
||||||
|
QueryStatement::Sql(Statement::Query(_)) => {
|
||||||
|
let plan = self
|
||||||
|
.query_engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?;
|
||||||
|
self.query_engine
|
||||||
|
.execute(&plan)
|
||||||
|
.await
|
||||||
|
.context(ExecuteLogicalPlanSnafu)
|
||||||
|
}
|
||||||
|
_ => self.execute_stmt(stmt, ctx).await,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Query::LogicalPlan(plan) => self.execute_logical(plan).await?,
|
Query::LogicalPlan(plan) => self.execute_logical(plan).await,
|
||||||
})
|
Query::PromRangeQuery(promql) => {
|
||||||
|
let prom_query = PromQuery {
|
||||||
|
query: promql.query,
|
||||||
|
start: promql.start,
|
||||||
|
end: promql.end,
|
||||||
|
step: promql.step,
|
||||||
|
};
|
||||||
|
self.execute_promql(&prom_query, ctx).await
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_insert(
|
pub async fn handle_insert(
|
||||||
@@ -98,7 +127,7 @@ impl Instance {
|
|||||||
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
|
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
|
||||||
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
|
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
|
||||||
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
|
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
|
||||||
DdlExpr::FlushTable(_) => todo!(),
|
DdlExpr::FlushTable(expr) => self.handle_flush_table(expr).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,11 +161,23 @@ mod test {
|
|||||||
};
|
};
|
||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
|
use query::parser::QueryLanguageParser;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::tests::test_util::{self, MockInstance};
|
use crate::tests::test_util::{self, MockInstance};
|
||||||
|
|
||||||
|
async fn exec_selection(instance: &Instance, sql: &str) -> Output {
|
||||||
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
|
let engine = instance.query_engine();
|
||||||
|
let plan = engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
engine.execute(&plan).await.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
async fn test_handle_ddl() {
|
async fn test_handle_ddl() {
|
||||||
let instance = MockInstance::new("test_handle_ddl").await;
|
let instance = MockInstance::new("test_handle_ddl").await;
|
||||||
@@ -199,22 +240,17 @@ mod test {
|
|||||||
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
||||||
assert!(matches!(output, Output::AffectedRows(0)));
|
assert!(matches!(output, Output::AffectedRows(0)));
|
||||||
|
|
||||||
|
let stmt = QueryLanguageParser::parse_sql(
|
||||||
|
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
let output = instance
|
let output = instance
|
||||||
.execute_sql(
|
.execute_stmt(stmt, QueryContext::arc())
|
||||||
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
|
|
||||||
QueryContext::arc(),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(matches!(output, Output::AffectedRows(1)));
|
assert!(matches!(output, Output::AffectedRows(1)));
|
||||||
|
|
||||||
let output = instance
|
let output = exec_selection(instance, "SELECT ts, a, b FROM my_database.my_table").await;
|
||||||
.execute_sql(
|
|
||||||
"SELECT ts, a, b FROM my_database.my_table",
|
|
||||||
QueryContext::arc(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let Output::Stream(stream) = output else { unreachable!() };
|
let Output::Stream(stream) = output else { unreachable!() };
|
||||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
let expected = "\
|
let expected = "\
|
||||||
@@ -280,10 +316,7 @@ mod test {
|
|||||||
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
||||||
assert!(matches!(output, Output::AffectedRows(3)));
|
assert!(matches!(output, Output::AffectedRows(3)));
|
||||||
|
|
||||||
let output = instance
|
let output = exec_selection(instance, "SELECT ts, host, cpu FROM demo").await;
|
||||||
.execute_sql("SELECT ts, host, cpu FROM demo", QueryContext::arc())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let Output::Stream(stream) = output else { unreachable!() };
|
let Output::Stream(stream) = output else { unreachable!() };
|
||||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
let expected = "\
|
let expected = "\
|
||||||
|
|||||||
@@ -17,27 +17,28 @@ use std::time::{Duration, SystemTime};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_error::prelude::BoxedError;
|
use common_error::prelude::BoxedError;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::RecordBatches;
|
|
||||||
use common_telemetry::logging::info;
|
use common_telemetry::logging::info;
|
||||||
use common_telemetry::timer;
|
use common_telemetry::timer;
|
||||||
use datatypes::schema::Schema;
|
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use query::error::QueryExecutionSnafu;
|
||||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||||
|
use query::query_engine::StatementHandler;
|
||||||
use servers::error as server_error;
|
use servers::error as server_error;
|
||||||
use servers::prom::PromHandler;
|
use servers::prom::PromHandler;
|
||||||
use servers::query_handler::sql::SqlQueryHandler;
|
|
||||||
use session::context::{QueryContext, QueryContextRef};
|
use session::context::{QueryContext, QueryContextRef};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use sql::ast::ObjectName;
|
use sql::ast::ObjectName;
|
||||||
use sql::statements::copy::CopyTable;
|
use sql::statements::copy::CopyTable;
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
use sql::statements::tql::Tql;
|
|
||||||
use table::engine::TableReference;
|
use table::engine::TableReference;
|
||||||
use table::requests::{
|
use table::requests::{
|
||||||
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::error::{self, BumpTableIdSnafu, ExecuteSqlSnafu, Result, TableIdProviderNotFoundSnafu};
|
use crate::error::{
|
||||||
|
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
|
||||||
|
TableIdProviderNotFoundSnafu,
|
||||||
|
};
|
||||||
use crate::instance::Instance;
|
use crate::instance::Instance;
|
||||||
use crate::metric;
|
use crate::metric;
|
||||||
use crate::sql::insert::InsertRequests;
|
use crate::sql::insert::InsertRequests;
|
||||||
@@ -50,18 +51,6 @@ impl Instance {
|
|||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
match stmt {
|
match stmt {
|
||||||
QueryStatement::Sql(Statement::Query(_)) | QueryStatement::Promql(_) => {
|
|
||||||
let logical_plan = self
|
|
||||||
.query_engine
|
|
||||||
.statement_to_plan(stmt, query_ctx)
|
|
||||||
.await
|
|
||||||
.context(ExecuteSqlSnafu)?;
|
|
||||||
|
|
||||||
self.query_engine
|
|
||||||
.execute(&logical_plan)
|
|
||||||
.await
|
|
||||||
.context(ExecuteSqlSnafu)
|
|
||||||
}
|
|
||||||
QueryStatement::Sql(Statement::Insert(insert)) => {
|
QueryStatement::Sql(Statement::Insert(insert)) => {
|
||||||
let requests = self
|
let requests = self
|
||||||
.sql_handler
|
.sql_handler
|
||||||
@@ -163,11 +152,6 @@ impl Instance {
|
|||||||
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
|
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
QueryStatement::Sql(Statement::Explain(explain)) => {
|
|
||||||
self.sql_handler
|
|
||||||
.execute(SqlRequest::Explain(Box::new(explain)), query_ctx)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
QueryStatement::Sql(Statement::DescribeTable(describe_table)) => {
|
QueryStatement::Sql(Statement::DescribeTable(describe_table)) => {
|
||||||
self.sql_handler
|
self.sql_handler
|
||||||
.execute(SqlRequest::DescribeTable(describe_table), query_ctx)
|
.execute(SqlRequest::DescribeTable(describe_table), query_ctx)
|
||||||
@@ -176,28 +160,17 @@ impl Instance {
|
|||||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||||
}
|
}
|
||||||
QueryStatement::Sql(Statement::Use(ref schema)) => {
|
|
||||||
let catalog = &query_ctx.current_catalog();
|
|
||||||
ensure!(
|
|
||||||
self.is_valid_schema(catalog, schema)?,
|
|
||||||
error::DatabaseNotFoundSnafu { catalog, schema }
|
|
||||||
);
|
|
||||||
|
|
||||||
query_ctx.set_current_schema(schema);
|
|
||||||
|
|
||||||
Ok(Output::RecordBatches(RecordBatches::empty()))
|
|
||||||
}
|
|
||||||
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
||||||
CopyTable::To(copy_table) => {
|
CopyTable::To(copy_table) => {
|
||||||
let (catalog_name, schema_name, table_name) =
|
let (catalog_name, schema_name, table_name) =
|
||||||
table_idents_to_full_name(copy_table.table_name(), query_ctx.clone())?;
|
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||||
let file_name = copy_table.file_name().to_string();
|
let file_name = copy_table.file_name;
|
||||||
|
|
||||||
let req = CopyTableRequest {
|
let req = CopyTableRequest {
|
||||||
catalog_name,
|
catalog_name,
|
||||||
schema_name,
|
schema_name,
|
||||||
table_name,
|
table_name,
|
||||||
file_name,
|
file_name,
|
||||||
|
connection: copy_table.connection,
|
||||||
};
|
};
|
||||||
|
|
||||||
self.sql_handler
|
self.sql_handler
|
||||||
@@ -220,49 +193,30 @@ impl Instance {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
QueryStatement::Sql(Statement::Tql(tql)) => self.execute_tql(tql, query_ctx).await,
|
QueryStatement::Sql(Statement::Query(_))
|
||||||
|
| QueryStatement::Sql(Statement::Explain(_))
|
||||||
|
| QueryStatement::Sql(Statement::Use(_))
|
||||||
|
| QueryStatement::Sql(Statement::Tql(_))
|
||||||
|
| QueryStatement::Promql(_) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
|
||||||
match tql {
|
|
||||||
Tql::Eval(eval) => {
|
|
||||||
let promql = PromQuery {
|
|
||||||
start: eval.start,
|
|
||||||
end: eval.end,
|
|
||||||
step: eval.step,
|
|
||||||
query: eval.query,
|
|
||||||
};
|
|
||||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ExecuteSqlSnafu)?;
|
|
||||||
let logical_plan = self
|
|
||||||
.query_engine
|
|
||||||
.statement_to_plan(stmt, query_ctx)
|
|
||||||
.await
|
|
||||||
.context(ExecuteSqlSnafu)?;
|
|
||||||
|
|
||||||
self.query_engine
|
|
||||||
.execute(&logical_plan)
|
|
||||||
.await
|
|
||||||
.context(ExecuteSqlSnafu)
|
|
||||||
}
|
|
||||||
Tql::Explain(_explain) => {
|
|
||||||
todo!("waiting for promql-parser ast adding a explain node")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn execute_sql(&self, sql: &str, query_ctx: QueryContextRef) -> Result<Output> {
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(sql).context(ExecuteSqlSnafu)?;
|
|
||||||
self.execute_stmt(stmt, query_ctx).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn execute_promql(
|
pub async fn execute_promql(
|
||||||
&self,
|
&self,
|
||||||
promql: &PromQuery,
|
promql: &PromQuery,
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
|
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?;
|
let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?;
|
||||||
self.execute_stmt(stmt, query_ctx).await
|
|
||||||
|
let engine = self.query_engine();
|
||||||
|
let plan = engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, query_ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?;
|
||||||
|
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ruihang): merge this and `execute_promql` after #951 landed
|
// TODO(ruihang): merge this and `execute_promql` after #951 landed
|
||||||
@@ -291,7 +245,14 @@ impl Instance {
|
|||||||
eval_stmt.lookback_delta = lookback
|
eval_stmt.lookback_delta = lookback
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.execute_stmt(stmt, query_ctx).await
|
|
||||||
|
let engine = self.query_engine();
|
||||||
|
let plan = engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, query_ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?;
|
||||||
|
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -327,57 +288,16 @@ pub fn table_idents_to_full_name(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl SqlQueryHandler for Instance {
|
impl StatementHandler for Instance {
|
||||||
type Error = error::Error;
|
async fn handle_statement(
|
||||||
|
|
||||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
|
||||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
|
||||||
// we assume sql string has only 1 statement in datanode
|
|
||||||
let result = self.execute_sql(query, query_ctx).await;
|
|
||||||
vec![result]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_promql_query(
|
|
||||||
&self,
|
&self,
|
||||||
query: &PromQuery,
|
stmt: QueryStatement,
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Vec<Result<Output>> {
|
) -> query::error::Result<Output> {
|
||||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
self.execute_stmt(stmt, query_ctx)
|
||||||
let result = self.execute_promql(query, query_ctx).await;
|
|
||||||
vec![result]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_statement_query(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Output> {
|
|
||||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
|
||||||
self.execute_stmt(QueryStatement::Sql(stmt), query_ctx)
|
|
||||||
.await
|
.await
|
||||||
}
|
.map_err(BoxedError::new)
|
||||||
|
.context(QueryExecutionSnafu)
|
||||||
async fn do_describe(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Option<Schema>> {
|
|
||||||
if let Statement::Query(_) = stmt {
|
|
||||||
self.query_engine
|
|
||||||
.describe(QueryStatement::Sql(stmt), query_ctx)
|
|
||||||
.await
|
|
||||||
.map(Some)
|
|
||||||
.context(error::DescribeStatementSnafu)
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
|
||||||
self.catalog_manager
|
|
||||||
.schema(catalog, schema)
|
|
||||||
.map(|s| s.is_some())
|
|
||||||
.context(error::CatalogSnafu)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,12 +13,13 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#![feature(assert_matches)]
|
#![feature(assert_matches)]
|
||||||
|
#![feature(trait_upcasting)]
|
||||||
|
|
||||||
pub mod datanode;
|
pub mod datanode;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
mod heartbeat;
|
mod heartbeat;
|
||||||
pub mod instance;
|
pub mod instance;
|
||||||
mod metric;
|
pub mod metric;
|
||||||
mod mock;
|
mod mock;
|
||||||
mod script;
|
mod script;
|
||||||
pub mod server;
|
pub mod server;
|
||||||
|
|||||||
@@ -12,32 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::atomic::{AtomicU32, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use catalog::remote::MetaKvBackend;
|
|
||||||
use catalog::CatalogManagerRef;
|
|
||||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
|
||||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||||
use meta_srv::mocks::MockInfo;
|
use meta_srv::mocks::MockInfo;
|
||||||
use mito::config::EngineConfig as TableEngineConfig;
|
|
||||||
use query::QueryEngineFactory;
|
|
||||||
use servers::Mode;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use storage::compaction::noop::NoopCompactionScheduler;
|
use storage::compaction::noop::NoopCompactionScheduler;
|
||||||
use storage::config::EngineConfig as StorageEngineConfig;
|
|
||||||
use storage::EngineImpl;
|
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::table::TableIdProvider;
|
|
||||||
|
|
||||||
use crate::datanode::DatanodeOptions;
|
use crate::datanode::DatanodeOptions;
|
||||||
use crate::error::{CatalogSnafu, RecoverProcedureSnafu, Result};
|
use crate::error::Result;
|
||||||
use crate::heartbeat::HeartbeatTask;
|
use crate::instance::Instance;
|
||||||
use crate::instance::{
|
|
||||||
create_log_store, create_procedure_manager, new_object_store, DefaultEngine, Instance,
|
|
||||||
};
|
|
||||||
use crate::script::ScriptExecutor;
|
|
||||||
use crate::sql::SqlHandler;
|
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub async fn with_mock_meta_client(opts: &DatanodeOptions) -> Result<Self> {
|
pub async fn with_mock_meta_client(opts: &DatanodeOptions) -> Result<Self> {
|
||||||
@@ -46,98 +29,9 @@ impl Instance {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn with_mock_meta_server(opts: &DatanodeOptions, meta_srv: MockInfo) -> Result<Self> {
|
pub async fn with_mock_meta_server(opts: &DatanodeOptions, meta_srv: MockInfo) -> Result<Self> {
|
||||||
let object_store = new_object_store(&opts.storage).await?;
|
|
||||||
let logstore = Arc::new(create_log_store(&opts.wal).await?);
|
|
||||||
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
|
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
|
||||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||||
let table_engine = Arc::new(DefaultEngine::new(
|
Instance::new_with(opts, Some(meta_client), compaction_scheduler).await
|
||||||
TableEngineConfig::default(),
|
|
||||||
EngineImpl::new(
|
|
||||||
StorageEngineConfig::default(),
|
|
||||||
logstore.clone(),
|
|
||||||
object_store.clone(),
|
|
||||||
compaction_scheduler,
|
|
||||||
),
|
|
||||||
object_store,
|
|
||||||
));
|
|
||||||
|
|
||||||
// By default, catalog manager and factory are created in standalone mode
|
|
||||||
let (catalog_manager, factory, heartbeat_task) = match opts.mode {
|
|
||||||
Mode::Standalone => {
|
|
||||||
let catalog = Arc::new(
|
|
||||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
|
||||||
.await
|
|
||||||
.context(CatalogSnafu)?,
|
|
||||||
);
|
|
||||||
let factory = QueryEngineFactory::new(catalog.clone());
|
|
||||||
(catalog as CatalogManagerRef, factory, None)
|
|
||||||
}
|
|
||||||
Mode::Distributed => {
|
|
||||||
let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
|
|
||||||
table_engine.clone(),
|
|
||||||
opts.node_id.unwrap_or(42),
|
|
||||||
Arc::new(MetaKvBackend {
|
|
||||||
client: meta_client.clone(),
|
|
||||||
}),
|
|
||||||
));
|
|
||||||
let factory = QueryEngineFactory::new(catalog.clone());
|
|
||||||
let heartbeat_task = HeartbeatTask::new(
|
|
||||||
opts.node_id.unwrap_or(42),
|
|
||||||
opts.rpc_addr.clone(),
|
|
||||||
None,
|
|
||||||
meta_client.clone(),
|
|
||||||
catalog.clone(),
|
|
||||||
);
|
|
||||||
(catalog as CatalogManagerRef, factory, Some(heartbeat_task))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let query_engine = factory.query_engine();
|
|
||||||
let script_executor =
|
|
||||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
|
||||||
|
|
||||||
let procedure_manager = create_procedure_manager(&opts.procedure).await?;
|
|
||||||
if let Some(procedure_manager) = &procedure_manager {
|
|
||||||
table_engine.register_procedure_loaders(&**procedure_manager);
|
|
||||||
// Recover procedures.
|
|
||||||
procedure_manager
|
|
||||||
.recover()
|
|
||||||
.await
|
|
||||||
.context(RecoverProcedureSnafu)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
query_engine: query_engine.clone(),
|
|
||||||
sql_handler: SqlHandler::new(
|
|
||||||
table_engine.clone(),
|
|
||||||
catalog_manager.clone(),
|
|
||||||
query_engine.clone(),
|
|
||||||
table_engine,
|
|
||||||
procedure_manager,
|
|
||||||
),
|
|
||||||
catalog_manager,
|
|
||||||
script_executor,
|
|
||||||
table_id_provider: Some(Arc::new(LocalTableIdProvider::default())),
|
|
||||||
heartbeat_task,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct LocalTableIdProvider {
|
|
||||||
inner: Arc<AtomicU32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for LocalTableIdProvider {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
inner: Arc::new(AtomicU32::new(MIN_USER_TABLE_ID)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl TableIdProvider for LocalTableIdProvider {
|
|
||||||
async fn next_table_id(&self) -> table::Result<TableId> {
|
|
||||||
Ok(self.inner.fetch_add(1, Ordering::Relaxed))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,20 +17,15 @@ use std::net::SocketAddr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_runtime::Builder as RuntimeBuilder;
|
use common_runtime::Builder as RuntimeBuilder;
|
||||||
use common_telemetry::tracing::log::info;
|
|
||||||
use servers::error::Error::InternalIo;
|
|
||||||
use servers::grpc::GrpcServer;
|
use servers::grpc::GrpcServer;
|
||||||
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
|
|
||||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||||
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
|
|
||||||
use servers::server::Server;
|
use servers::server::Server;
|
||||||
use servers::tls::TlsOption;
|
|
||||||
use servers::Mode;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::datanode::DatanodeOptions;
|
use crate::datanode::DatanodeOptions;
|
||||||
use crate::error::Error::StartServer;
|
use crate::error::{
|
||||||
use crate::error::{ParseAddrSnafu, Result, RuntimeResourceSnafu, StartServerSnafu};
|
ParseAddrSnafu, Result, RuntimeResourceSnafu, ShutdownServerSnafu, StartServerSnafu,
|
||||||
|
};
|
||||||
use crate::instance::InstanceRef;
|
use crate::instance::InstanceRef;
|
||||||
|
|
||||||
pub mod grpc;
|
pub mod grpc;
|
||||||
@@ -38,7 +33,6 @@ pub mod grpc;
|
|||||||
/// All rpc services.
|
/// All rpc services.
|
||||||
pub struct Services {
|
pub struct Services {
|
||||||
grpc_server: GrpcServer,
|
grpc_server: GrpcServer,
|
||||||
mysql_server: Option<Box<dyn Server>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Services {
|
impl Services {
|
||||||
@@ -51,48 +45,12 @@ impl Services {
|
|||||||
.context(RuntimeResourceSnafu)?,
|
.context(RuntimeResourceSnafu)?,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mysql_server = match opts.mode {
|
|
||||||
Mode::Standalone => {
|
|
||||||
info!("Disable MySQL server on datanode when running in standalone mode");
|
|
||||||
None
|
|
||||||
}
|
|
||||||
Mode::Distributed => {
|
|
||||||
let mysql_io_runtime = Arc::new(
|
|
||||||
RuntimeBuilder::default()
|
|
||||||
.worker_threads(opts.mysql_runtime_size)
|
|
||||||
.thread_name("mysql-io-handlers")
|
|
||||||
.build()
|
|
||||||
.context(RuntimeResourceSnafu)?,
|
|
||||||
);
|
|
||||||
let tls = TlsOption::default();
|
|
||||||
// default tls config returns None
|
|
||||||
// but try to think a better way to do this
|
|
||||||
Some(MysqlServer::create_server(
|
|
||||||
mysql_io_runtime,
|
|
||||||
Arc::new(MysqlSpawnRef::new(
|
|
||||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
|
||||||
None,
|
|
||||||
)),
|
|
||||||
Arc::new(MysqlSpawnConfig::new(
|
|
||||||
tls.should_force_tls(),
|
|
||||||
tls.setup()
|
|
||||||
.map_err(|e| StartServer {
|
|
||||||
source: InternalIo { source: e },
|
|
||||||
})?
|
|
||||||
.map(Arc::new),
|
|
||||||
false,
|
|
||||||
)),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
grpc_server: GrpcServer::new(
|
grpc_server: GrpcServer::new(
|
||||||
ServerGrpcQueryHandlerAdaptor::arc(instance),
|
ServerGrpcQueryHandlerAdaptor::arc(instance),
|
||||||
None,
|
None,
|
||||||
grpc_runtime,
|
grpc_runtime,
|
||||||
),
|
),
|
||||||
mysql_server,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,19 +58,17 @@ impl Services {
|
|||||||
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
||||||
addr: &opts.rpc_addr,
|
addr: &opts.rpc_addr,
|
||||||
})?;
|
})?;
|
||||||
|
self.grpc_server
|
||||||
let mut res = vec![self.grpc_server.start(grpc_addr)];
|
.start(grpc_addr)
|
||||||
if let Some(mysql_server) = &self.mysql_server {
|
|
||||||
let mysql_addr = &opts.mysql_addr;
|
|
||||||
let mysql_addr: SocketAddr = mysql_addr
|
|
||||||
.parse()
|
|
||||||
.context(ParseAddrSnafu { addr: mysql_addr })?;
|
|
||||||
res.push(mysql_server.start(mysql_addr));
|
|
||||||
};
|
|
||||||
|
|
||||||
futures::future::try_join_all(res)
|
|
||||||
.await
|
.await
|
||||||
.context(StartServerSnafu)?;
|
.context(StartServerSnafu)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn shutdown(&self) -> Result<()> {
|
||||||
|
self.grpc_server
|
||||||
|
.shutdown()
|
||||||
|
.await
|
||||||
|
.context(ShutdownServerSnafu)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,13 +12,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr};
|
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, FlushTableExpr};
|
||||||
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
|
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use table::requests::DropTableRequest;
|
use table::requests::{DropTableRequest, FlushTableRequest};
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu,
|
AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu,
|
||||||
@@ -82,6 +82,24 @@ impl Instance {
|
|||||||
.execute(SqlRequest::DropTable(req), QueryContext::arc())
|
.execute(SqlRequest::DropTable(req), QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn handle_flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||||
|
let table_name = if expr.table_name.trim().is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(expr.table_name)
|
||||||
|
};
|
||||||
|
|
||||||
|
let req = FlushTableRequest {
|
||||||
|
catalog_name: expr.catalog_name,
|
||||||
|
schema_name: expr.schema_name,
|
||||||
|
table_name,
|
||||||
|
region_number: expr.region_id,
|
||||||
|
};
|
||||||
|
self.sql_handler()
|
||||||
|
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -136,7 +154,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
||||||
fn test_create_column_schema() {
|
fn test_create_column_schema() {
|
||||||
let column_def = ColumnDef {
|
let column_def = ColumnDef {
|
||||||
name: "a".to_string(),
|
name: "a".to_string(),
|
||||||
|
|||||||
@@ -13,22 +13,24 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use catalog::CatalogManagerRef;
|
use catalog::CatalogManagerRef;
|
||||||
|
use common_error::prelude::BoxedError;
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_telemetry::error;
|
use common_telemetry::error;
|
||||||
use query::query_engine::QueryEngineRef;
|
use query::query_engine::QueryEngineRef;
|
||||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
use query::sql::{describe_table, show_databases, show_tables};
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use sql::statements::delete::Delete;
|
use sql::statements::delete::Delete;
|
||||||
use sql::statements::describe::DescribeTable;
|
use sql::statements::describe::DescribeTable;
|
||||||
use sql::statements::explain::Explain;
|
|
||||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||||
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
||||||
use table::requests::*;
|
use table::requests::*;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{self, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu};
|
use crate::error::{
|
||||||
|
self, CloseTableEngineSnafu, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu,
|
||||||
|
};
|
||||||
use crate::instance::sql::table_idents_to_full_name;
|
use crate::instance::sql::table_idents_to_full_name;
|
||||||
|
|
||||||
mod alter;
|
mod alter;
|
||||||
@@ -37,6 +39,7 @@ mod copy_table_from;
|
|||||||
mod create;
|
mod create;
|
||||||
mod delete;
|
mod delete;
|
||||||
mod drop_table;
|
mod drop_table;
|
||||||
|
mod flush_table;
|
||||||
pub(crate) mod insert;
|
pub(crate) mod insert;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -46,10 +49,10 @@ pub enum SqlRequest {
|
|||||||
CreateDatabase(CreateDatabaseRequest),
|
CreateDatabase(CreateDatabaseRequest),
|
||||||
Alter(AlterTableRequest),
|
Alter(AlterTableRequest),
|
||||||
DropTable(DropTableRequest),
|
DropTable(DropTableRequest),
|
||||||
|
FlushTable(FlushTableRequest),
|
||||||
ShowDatabases(ShowDatabases),
|
ShowDatabases(ShowDatabases),
|
||||||
ShowTables(ShowTables),
|
ShowTables(ShowTables),
|
||||||
DescribeTable(DescribeTable),
|
DescribeTable(DescribeTable),
|
||||||
Explain(Box<Explain>),
|
|
||||||
Delete(Delete),
|
Delete(Delete),
|
||||||
CopyTable(CopyTableRequest),
|
CopyTable(CopyTableRequest),
|
||||||
CopyTableFrom(CopyTableFromRequest),
|
CopyTableFrom(CopyTableFromRequest),
|
||||||
@@ -115,9 +118,7 @@ impl SqlHandler {
|
|||||||
})?;
|
})?;
|
||||||
describe_table(table).context(ExecuteSqlSnafu)
|
describe_table(table).context(ExecuteSqlSnafu)
|
||||||
}
|
}
|
||||||
SqlRequest::Explain(req) => explain(req, self.query_engine.clone(), query_ctx.clone())
|
SqlRequest::FlushTable(req) => self.flush_table(req).await,
|
||||||
.await
|
|
||||||
.context(ExecuteSqlSnafu),
|
|
||||||
};
|
};
|
||||||
if let Err(e) = &result {
|
if let Err(e) = &result {
|
||||||
error!(e; "{query_ctx}");
|
error!(e; "{query_ctx}");
|
||||||
@@ -139,6 +140,14 @@ impl SqlHandler {
|
|||||||
pub fn table_engine(&self) -> TableEngineRef {
|
pub fn table_engine(&self) -> TableEngineRef {
|
||||||
self.table_engine.clone()
|
self.table_engine.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn close(&self) -> Result<()> {
|
||||||
|
self.table_engine
|
||||||
|
.close()
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(CloseTableEngineSnafu)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|
||||||
use common_query::physical_plan::SessionContext;
|
use common_query::physical_plan::SessionContext;
|
||||||
@@ -22,16 +23,54 @@ use datafusion::parquet::basic::{Compression, Encoding};
|
|||||||
use datafusion::parquet::file::properties::WriterProperties;
|
use datafusion::parquet::file::properties::WriterProperties;
|
||||||
use datafusion::physical_plan::RecordBatchStream;
|
use datafusion::physical_plan::RecordBatchStream;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use object_store::services::Fs as Builder;
|
use object_store::ObjectStore;
|
||||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use table::engine::TableReference;
|
use table::engine::TableReference;
|
||||||
use table::requests::CopyTableRequest;
|
use table::requests::CopyTableRequest;
|
||||||
|
use url::{ParseError, Url};
|
||||||
|
|
||||||
|
use super::copy_table_from::{build_fs_backend, build_s3_backend, S3_SCHEMA};
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::sql::SqlHandler;
|
use crate::sql::SqlHandler;
|
||||||
|
|
||||||
impl SqlHandler {
|
impl SqlHandler {
|
||||||
|
fn build_backend(
|
||||||
|
&self,
|
||||||
|
url: &str,
|
||||||
|
connection: HashMap<String, String>,
|
||||||
|
) -> Result<(ObjectStore, String)> {
|
||||||
|
let result = Url::parse(url);
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(url) => {
|
||||||
|
let host = url.host_str();
|
||||||
|
|
||||||
|
let schema = url.scheme();
|
||||||
|
|
||||||
|
let path = url.path();
|
||||||
|
|
||||||
|
match schema.to_uppercase().as_str() {
|
||||||
|
S3_SCHEMA => {
|
||||||
|
let object_store = build_s3_backend(host, "/", connection)?;
|
||||||
|
Ok((object_store, path.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => error::UnsupportedBackendProtocolSnafu {
|
||||||
|
protocol: schema.to_string(),
|
||||||
|
}
|
||||||
|
.fail(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||||
|
let object_store = build_fs_backend("/")?;
|
||||||
|
Ok((object_store, url.to_string()))
|
||||||
|
}
|
||||||
|
Err(err) => Err(error::Error::InvalidUrl {
|
||||||
|
url: url.to_string(),
|
||||||
|
source: err,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
||||||
let table_ref = TableReference {
|
let table_ref = TableReference {
|
||||||
catalog: &req.catalog_name,
|
catalog: &req.catalog_name,
|
||||||
@@ -52,13 +91,9 @@ impl SqlHandler {
|
|||||||
.context(error::TableScanExecSnafu)?;
|
.context(error::TableScanExecSnafu)?;
|
||||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
||||||
|
|
||||||
let accessor = Builder::default()
|
let (object_store, file_name) = self.build_backend(&req.file_name, req.connection)?;
|
||||||
.root("/")
|
|
||||||
.build()
|
|
||||||
.context(error::BuildBackendSnafu)?;
|
|
||||||
let object_store = ObjectStore::new(accessor).finish();
|
|
||||||
|
|
||||||
let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
|
let mut parquet_writer = ParquetWriter::new(file_name, stream, object_store);
|
||||||
// TODO(jiachun):
|
// TODO(jiachun):
|
||||||
// For now, COPY is implemented synchronously.
|
// For now, COPY is implemented synchronously.
|
||||||
// When copying large table, it will be blocked for a long time.
|
// When copying large table, it will be blocked for a long time.
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ use url::{ParseError, Url};
|
|||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::sql::SqlHandler;
|
use crate::sql::SqlHandler;
|
||||||
|
|
||||||
const S3_SCHEMA: &str = "S3";
|
pub const S3_SCHEMA: &str = "S3";
|
||||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||||
@@ -165,13 +165,10 @@ impl DataSource {
|
|||||||
Source::Dir
|
Source::Dir
|
||||||
};
|
};
|
||||||
|
|
||||||
let accessor = Fs::default()
|
let object_store = build_fs_backend(&path)?;
|
||||||
.root(&path)
|
|
||||||
.build()
|
|
||||||
.context(error::BuildBackendSnafu)?;
|
|
||||||
|
|
||||||
Ok(DataSource {
|
Ok(DataSource {
|
||||||
object_store: ObjectStore::new(accessor).finish(),
|
object_store,
|
||||||
source,
|
source,
|
||||||
path,
|
path,
|
||||||
regex,
|
regex,
|
||||||
@@ -184,59 +181,6 @@ impl DataSource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_s3_backend(
|
|
||||||
host: Option<&str>,
|
|
||||||
path: &str,
|
|
||||||
connection: HashMap<String, String>,
|
|
||||||
) -> Result<ObjectStore> {
|
|
||||||
let mut builder = S3::default();
|
|
||||||
|
|
||||||
builder.root(path);
|
|
||||||
|
|
||||||
if let Some(bucket) = host {
|
|
||||||
builder.bucket(bucket);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
|
||||||
builder.endpoint(endpoint);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(region) = connection.get(REGION) {
|
|
||||||
builder.region(region);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
|
||||||
builder.access_key_id(key_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
|
||||||
builder.secret_access_key(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
|
||||||
builder.security_token(session_token);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
|
||||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
|
||||||
error::InvalidConnectionSnafu {
|
|
||||||
msg: format!(
|
|
||||||
"failed to parse the option {}={}, {}",
|
|
||||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.build()
|
|
||||||
})?;
|
|
||||||
if enable {
|
|
||||||
builder.enable_virtual_host_style();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
|
||||||
|
|
||||||
Ok(ObjectStore::new(accessor).finish())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_url(
|
fn from_url(
|
||||||
url: Url,
|
url: Url,
|
||||||
regex: Option<Regex>,
|
regex: Option<Regex>,
|
||||||
@@ -257,7 +201,7 @@ impl DataSource {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let object_store = match schema.to_uppercase().as_str() {
|
let object_store = match schema.to_uppercase().as_str() {
|
||||||
S3_SCHEMA => DataSource::build_s3_backend(host, &dir, connection)?,
|
S3_SCHEMA => build_s3_backend(host, &dir, connection)?,
|
||||||
_ => {
|
_ => {
|
||||||
return error::UnsupportedBackendProtocolSnafu {
|
return error::UnsupportedBackendProtocolSnafu {
|
||||||
protocol: schema.to_string(),
|
protocol: schema.to_string(),
|
||||||
@@ -348,6 +292,68 @@ impl DataSource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn build_s3_backend(
|
||||||
|
host: Option<&str>,
|
||||||
|
path: &str,
|
||||||
|
connection: HashMap<String, String>,
|
||||||
|
) -> Result<ObjectStore> {
|
||||||
|
let mut builder = S3::default();
|
||||||
|
|
||||||
|
builder.root(path);
|
||||||
|
|
||||||
|
if let Some(bucket) = host {
|
||||||
|
builder.bucket(bucket);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||||
|
builder.endpoint(endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(region) = connection.get(REGION) {
|
||||||
|
builder.region(region);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||||
|
builder.access_key_id(key_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||||
|
builder.secret_access_key(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||||
|
builder.security_token(session_token);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||||
|
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||||
|
error::InvalidConnectionSnafu {
|
||||||
|
msg: format!(
|
||||||
|
"failed to parse the option {}={}, {}",
|
||||||
|
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
if enable {
|
||||||
|
builder.enable_virtual_host_style();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
||||||
|
|
||||||
|
Ok(ObjectStore::new(accessor).finish())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||||
|
let accessor = Fs::default()
|
||||||
|
.root(root)
|
||||||
|
.build()
|
||||||
|
.context(error::BuildBackendSnafu)?;
|
||||||
|
|
||||||
|
Ok(ObjectStore::new(accessor).finish())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
|||||||
78
src/datanode/src/sql/flush_table.rs
Normal file
78
src/datanode/src/sql/flush_table.rs
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use common_query::Output;
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use table::engine::TableReference;
|
||||||
|
use table::requests::FlushTableRequest;
|
||||||
|
|
||||||
|
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
|
||||||
|
use crate::sql::SqlHandler;
|
||||||
|
|
||||||
|
impl SqlHandler {
|
||||||
|
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
|
||||||
|
if let Some(table) = &req.table_name {
|
||||||
|
self.flush_table_inner(
|
||||||
|
&req.catalog_name,
|
||||||
|
&req.schema_name,
|
||||||
|
table,
|
||||||
|
req.region_number,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
let schema = self
|
||||||
|
.catalog_manager
|
||||||
|
.schema(&req.catalog_name, &req.schema_name)
|
||||||
|
.context(CatalogSnafu)?
|
||||||
|
.context(DatabaseNotFoundSnafu {
|
||||||
|
catalog: &req.catalog_name,
|
||||||
|
schema: &req.schema_name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let all_table_names = schema.table_names().context(CatalogSnafu)?;
|
||||||
|
futures::future::join_all(all_table_names.iter().map(|table| {
|
||||||
|
self.flush_table_inner(
|
||||||
|
&req.catalog_name,
|
||||||
|
&req.schema_name,
|
||||||
|
table,
|
||||||
|
req.region_number,
|
||||||
|
)
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
}
|
||||||
|
Ok(Output::AffectedRows(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn flush_table_inner(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
schema: &str,
|
||||||
|
table: &str,
|
||||||
|
region: Option<u32>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let table_ref = TableReference {
|
||||||
|
catalog,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
};
|
||||||
|
|
||||||
|
let full_table_name = table_ref.to_string();
|
||||||
|
let table = self.get_table(&table_ref)?;
|
||||||
|
table.flush(region).await.context(error::FlushTableSnafu {
|
||||||
|
table_name: full_table_name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -39,8 +39,8 @@ use table::TableRef;
|
|||||||
use crate::error::{
|
use crate::error::{
|
||||||
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
|
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
|
||||||
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
|
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
|
||||||
ExecuteSqlSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu, ParseSqlValueSnafu,
|
ExecuteLogicalPlanSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||||
Result, TableNotFoundSnafu,
|
ParseSqlValueSnafu, PlanStatementSnafu, Result, TableNotFoundSnafu,
|
||||||
};
|
};
|
||||||
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
|
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
|
||||||
|
|
||||||
@@ -236,18 +236,19 @@ impl SqlHandler {
|
|||||||
|
|
||||||
let logical_plan = self
|
let logical_plan = self
|
||||||
.query_engine
|
.query_engine
|
||||||
.statement_to_plan(
|
.planner()
|
||||||
|
.plan(
|
||||||
QueryStatement::Sql(Statement::Query(Box::new(query))),
|
QueryStatement::Sql(Statement::Query(Box::new(query))),
|
||||||
query_ctx.clone(),
|
query_ctx.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(ExecuteSqlSnafu)?;
|
.context(PlanStatementSnafu)?;
|
||||||
|
|
||||||
let output = self
|
let output = self
|
||||||
.query_engine
|
.query_engine
|
||||||
.execute(&logical_plan)
|
.execute(&logical_plan)
|
||||||
.await
|
.await
|
||||||
.context(ExecuteSqlSnafu)?;
|
.context(ExecuteLogicalPlanSnafu)?;
|
||||||
|
|
||||||
let stream: InsertRequestStream = match output {
|
let stream: InsertRequestStream = match output {
|
||||||
Output::RecordBatches(batches) => {
|
Output::RecordBatches(batches) => {
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// TODO(LFC): These tests should be moved to frontend crate. They are actually standalone instance tests.
|
||||||
mod instance_test;
|
mod instance_test;
|
||||||
mod promql_test;
|
mod promql_test;
|
||||||
pub(crate) mod test_util;
|
pub(crate) mod test_util;
|
||||||
|
|||||||
@@ -12,16 +12,21 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::env;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::util;
|
use common_recordbatch::util;
|
||||||
|
use common_telemetry::logging;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
|
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
|
||||||
|
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
use snafu::ResultExt;
|
||||||
|
use sql::statements::statement::Statement;
|
||||||
|
|
||||||
use crate::error::Error;
|
use crate::error::{Error, ExecuteLogicalPlanSnafu, PlanStatementSnafu};
|
||||||
use crate::tests::test_util::{self, check_output_stream, setup_test_instance, MockInstance};
|
use crate::tests::test_util::{self, check_output_stream, setup_test_instance, MockInstance};
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
@@ -414,7 +419,6 @@ pub async fn test_execute_create() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_rename_table() {
|
async fn test_rename_table() {
|
||||||
common_telemetry::init_default_ut_logging();
|
|
||||||
let instance = MockInstance::new("test_rename_table_local").await;
|
let instance = MockInstance::new("test_rename_table_local").await;
|
||||||
|
|
||||||
let output = execute_sql(&instance, "create database db").await;
|
let output = execute_sql(&instance, "create database db").await;
|
||||||
@@ -795,6 +799,45 @@ async fn test_execute_copy_to() {
|
|||||||
assert!(matches!(output, Output::AffectedRows(2)));
|
assert!(matches!(output, Output::AffectedRows(2)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_execute_copy_to_s3() {
|
||||||
|
logging::init_default_ut_logging();
|
||||||
|
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
|
||||||
|
if !bucket.is_empty() {
|
||||||
|
let instance = setup_test_instance("test_execute_copy_to_s3").await;
|
||||||
|
|
||||||
|
// setups
|
||||||
|
execute_sql(
|
||||||
|
&instance,
|
||||||
|
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let output = execute_sql(
|
||||||
|
&instance,
|
||||||
|
r#"insert into demo(host, cpu, memory, ts) values
|
||||||
|
('host1', 66.6, 1024, 1655276557000),
|
||||||
|
('host2', 88.8, 333.3, 1655276558000)
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(2)));
|
||||||
|
let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
|
||||||
|
let key = env::var("GT_S3_ACCESS_KEY").unwrap();
|
||||||
|
let url =
|
||||||
|
env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
|
||||||
|
|
||||||
|
let root = uuid::Uuid::new_v4().to_string();
|
||||||
|
|
||||||
|
// exports
|
||||||
|
let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
|
||||||
|
|
||||||
|
let output = execute_sql(&instance, ©_to_stmt).await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(2)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
async fn test_execute_copy_from() {
|
async fn test_execute_copy_from() {
|
||||||
let instance = setup_test_instance("test_execute_copy_from").await;
|
let instance = setup_test_instance("test_execute_copy_from").await;
|
||||||
@@ -880,6 +923,106 @@ async fn test_execute_copy_from() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_execute_copy_from_s3() {
|
||||||
|
logging::init_default_ut_logging();
|
||||||
|
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
|
||||||
|
if !bucket.is_empty() {
|
||||||
|
let instance = setup_test_instance("test_execute_copy_from_s3").await;
|
||||||
|
|
||||||
|
// setups
|
||||||
|
execute_sql(
|
||||||
|
&instance,
|
||||||
|
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let output = execute_sql(
|
||||||
|
&instance,
|
||||||
|
r#"insert into demo(host, cpu, memory, ts) values
|
||||||
|
('host1', 66.6, 1024, 1655276557000),
|
||||||
|
('host2', 88.8, 333.3, 1655276558000)
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(2)));
|
||||||
|
|
||||||
|
// export
|
||||||
|
let root = uuid::Uuid::new_v4().to_string();
|
||||||
|
let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
|
||||||
|
let key = env::var("GT_S3_ACCESS_KEY").unwrap();
|
||||||
|
let url =
|
||||||
|
env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
|
||||||
|
|
||||||
|
let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
|
||||||
|
logging::info!("Copy table to s3: {}", copy_to_stmt);
|
||||||
|
|
||||||
|
let output = execute_sql(&instance, ©_to_stmt).await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(2)));
|
||||||
|
|
||||||
|
struct Test<'a> {
|
||||||
|
sql: &'a str,
|
||||||
|
table_name: &'a str,
|
||||||
|
}
|
||||||
|
let tests = [
|
||||||
|
Test {
|
||||||
|
sql: &format!(
|
||||||
|
"Copy with_filename FROM 's3://{}/{}/export/demo.parquet_1_2'",
|
||||||
|
bucket, root
|
||||||
|
),
|
||||||
|
table_name: "with_filename",
|
||||||
|
},
|
||||||
|
Test {
|
||||||
|
sql: &format!("Copy with_path FROM 's3://{}/{}/export/'", bucket, root),
|
||||||
|
table_name: "with_path",
|
||||||
|
},
|
||||||
|
Test {
|
||||||
|
sql: &format!(
|
||||||
|
"Copy with_pattern FROM 's3://{}/{}/export/' WITH (PATTERN = 'demo.*')",
|
||||||
|
bucket, root
|
||||||
|
),
|
||||||
|
table_name: "with_pattern",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for test in tests {
|
||||||
|
// import
|
||||||
|
execute_sql(
|
||||||
|
&instance,
|
||||||
|
&format!(
|
||||||
|
"create table {}(host string, cpu double, memory double, ts timestamp time index);",
|
||||||
|
test.table_name
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let sql = format!(
|
||||||
|
"{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')",
|
||||||
|
test.sql, key_id, key, url
|
||||||
|
);
|
||||||
|
logging::info!("Running sql: {}", sql);
|
||||||
|
|
||||||
|
let output = execute_sql(&instance, &sql).await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(2)));
|
||||||
|
|
||||||
|
let output = execute_sql(
|
||||||
|
&instance,
|
||||||
|
&format!("select * from {} order by ts", test.table_name),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let expected = "\
|
||||||
|
+-------+------+--------+---------------------+
|
||||||
|
| host | cpu | memory | ts |
|
||||||
|
+-------+------+--------+---------------------+
|
||||||
|
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
|
||||||
|
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
|
||||||
|
+-------+------+--------+---------------------+"
|
||||||
|
.to_string();
|
||||||
|
check_output_stream(output, expected).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
async fn test_create_by_procedure() {
|
async fn test_create_by_procedure() {
|
||||||
common_telemetry::init_default_ut_logging();
|
common_telemetry::init_default_ut_logging();
|
||||||
@@ -933,7 +1076,20 @@ async fn try_execute_sql_in_db(
|
|||||||
db: &str,
|
db: &str,
|
||||||
) -> Result<Output, crate::error::Error> {
|
) -> Result<Output, crate::error::Error> {
|
||||||
let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
|
let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
|
||||||
instance.inner().execute_sql(sql, query_ctx).await
|
|
||||||
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
|
match stmt {
|
||||||
|
QueryStatement::Sql(Statement::Query(_)) => {
|
||||||
|
let engine = instance.inner().query_engine();
|
||||||
|
let plan = engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, query_ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?;
|
||||||
|
engine.execute(&plan).await.context(ExecuteLogicalPlanSnafu)
|
||||||
|
}
|
||||||
|
_ => instance.inner().execute_stmt(stmt, query_ctx).await,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_sql_in_db(instance: &MockInstance, sql: &str, db: &str) -> Output {
|
async fn execute_sql_in_db(instance: &MockInstance, sql: &str, db: &str) -> Output {
|
||||||
|
|||||||
@@ -31,22 +31,14 @@ async fn create_insert_query_assert(
|
|||||||
expected: &str,
|
expected: &str,
|
||||||
) {
|
) {
|
||||||
let instance = setup_test_instance("test_execute_insert").await;
|
let instance = setup_test_instance("test_execute_insert").await;
|
||||||
let query_ctx = QueryContext::arc();
|
|
||||||
instance
|
|
||||||
.inner()
|
|
||||||
.execute_sql(create, query_ctx.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
instance
|
instance.execute_sql(create).await;
|
||||||
.inner()
|
|
||||||
.execute_sql(insert, query_ctx.clone())
|
instance.execute_sql(insert).await;
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let query_output = instance
|
let query_output = instance
|
||||||
.inner()
|
.inner()
|
||||||
.execute_promql_statement(promql, start, end, interval, lookback, query_ctx)
|
.execute_promql_statement(promql, start, end, interval, lookback, QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let expected = String::from(expected);
|
let expected = String::from(expected);
|
||||||
@@ -56,24 +48,12 @@ async fn create_insert_query_assert(
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn create_insert_tql_assert(create: &str, insert: &str, tql: &str, expected: &str) {
|
async fn create_insert_tql_assert(create: &str, insert: &str, tql: &str, expected: &str) {
|
||||||
let instance = setup_test_instance("test_execute_insert").await;
|
let instance = setup_test_instance("test_execute_insert").await;
|
||||||
let query_ctx = QueryContext::arc();
|
|
||||||
instance
|
|
||||||
.inner()
|
|
||||||
.execute_sql(create, query_ctx.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
instance
|
instance.execute_sql(create).await;
|
||||||
.inner()
|
|
||||||
.execute_sql(insert, query_ctx.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let query_output = instance
|
instance.execute_sql(insert).await;
|
||||||
.inner()
|
|
||||||
.execute_sql(tql, query_ctx.clone())
|
let query_output = instance.execute_sql(tql).await;
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let expected = String::from(expected);
|
let expected = String::from(expected);
|
||||||
check_unordered_output_stream(query_output, expected).await;
|
check_unordered_output_stream(query_output, expected).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
@@ -22,9 +23,13 @@ use datatypes::data_type::ConcreteDataType;
|
|||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use mito::config::EngineConfig;
|
use mito::config::EngineConfig;
|
||||||
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
||||||
|
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||||
use query::QueryEngineFactory;
|
use query::QueryEngineFactory;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
use session::context::QueryContext;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use sql::statements::statement::Statement;
|
||||||
|
use sql::statements::tql::Tql;
|
||||||
use table::engine::{EngineContext, TableEngineRef};
|
use table::engine::{EngineContext, TableEngineRef};
|
||||||
use table::requests::{CreateTableRequest, TableOptions};
|
use table::requests::{CreateTableRequest, TableOptions};
|
||||||
|
|
||||||
@@ -60,6 +65,8 @@ impl MockInstance {
|
|||||||
store: ObjectStoreConfig::File(FileConfig {
|
store: ObjectStoreConfig::File(FileConfig {
|
||||||
data_dir: procedure_dir.path().to_str().unwrap().to_string(),
|
data_dir: procedure_dir.path().to_str().unwrap().to_string(),
|
||||||
}),
|
}),
|
||||||
|
max_retry_times: 3,
|
||||||
|
retry_delay: Duration::from_millis(500),
|
||||||
});
|
});
|
||||||
|
|
||||||
let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
|
let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
|
||||||
@@ -72,6 +79,40 @@ impl MockInstance {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn execute_sql(&self, sql: &str) -> Output {
|
||||||
|
let engine = self.inner().query_engine();
|
||||||
|
let planner = engine.planner();
|
||||||
|
|
||||||
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
|
match stmt {
|
||||||
|
QueryStatement::Sql(Statement::Query(_)) => {
|
||||||
|
let plan = planner.plan(stmt, QueryContext::arc()).await.unwrap();
|
||||||
|
engine.execute(&plan).await.unwrap()
|
||||||
|
}
|
||||||
|
QueryStatement::Sql(Statement::Tql(tql)) => {
|
||||||
|
let plan = match tql {
|
||||||
|
Tql::Eval(eval) => {
|
||||||
|
let promql = PromQuery {
|
||||||
|
start: eval.start,
|
||||||
|
end: eval.end,
|
||||||
|
step: eval.step,
|
||||||
|
query: eval.query,
|
||||||
|
};
|
||||||
|
let stmt = QueryLanguageParser::parse_promql(&promql).unwrap();
|
||||||
|
planner.plan(stmt, QueryContext::arc()).await.unwrap()
|
||||||
|
}
|
||||||
|
Tql::Explain(_) => unimplemented!(),
|
||||||
|
};
|
||||||
|
engine.execute(&plan).await.unwrap()
|
||||||
|
}
|
||||||
|
_ => self
|
||||||
|
.inner()
|
||||||
|
.execute_stmt(stmt, QueryContext::arc())
|
||||||
|
.await
|
||||||
|
.unwrap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn inner(&self) -> &Instance {
|
pub(crate) fn inner(&self) -> &Instance {
|
||||||
&self.instance
|
&self.instance
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ client = { path = "../client" }
|
|||||||
common-base = { path = "../common/base" }
|
common-base = { path = "../common/base" }
|
||||||
common-catalog = { path = "../common/catalog" }
|
common-catalog = { path = "../common/catalog" }
|
||||||
common-error = { path = "../common/error" }
|
common-error = { path = "../common/error" }
|
||||||
|
common-function = { path = "../common/function" }
|
||||||
common-grpc = { path = "../common/grpc" }
|
common-grpc = { path = "../common/grpc" }
|
||||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||||
common-query = { path = "../common/query" }
|
common-query = { path = "../common/query" }
|
||||||
|
|||||||
@@ -44,6 +44,12 @@ pub enum Error {
|
|||||||
source: servers::error::Error,
|
source: servers::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to shutdown server, source: {}", source))]
|
||||||
|
ShutdownServer {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: servers::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to parse address {}, source: {}", addr, source))]
|
#[snafu(display("Failed to parse address {}, source: {}", addr, source))]
|
||||||
ParseAddr {
|
ParseAddr {
|
||||||
addr: String,
|
addr: String,
|
||||||
@@ -241,6 +247,24 @@ pub enum Error {
|
|||||||
source: query::error::Error,
|
source: query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||||
|
PlanStatement {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to parse query, source: {}", source))]
|
||||||
|
ParseQuery {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to execute logical plan, source: {}", source))]
|
||||||
|
ExecLogicalPlan {
|
||||||
|
#[snafu(backtrace)]
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to build DataFusion logical plan, source: {}", source))]
|
#[snafu(display("Failed to build DataFusion logical plan, source: {}", source))]
|
||||||
BuildDfLogicalPlan {
|
BuildDfLogicalPlan {
|
||||||
source: datafusion_common::DataFusionError,
|
source: datafusion_common::DataFusionError,
|
||||||
@@ -381,6 +405,7 @@ impl ErrorExt for Error {
|
|||||||
|
|
||||||
Error::SqlExecIntercepted { source, .. } => source.status_code(),
|
Error::SqlExecIntercepted { source, .. } => source.status_code(),
|
||||||
Error::StartServer { source, .. } => source.status_code(),
|
Error::StartServer { source, .. } => source.status_code(),
|
||||||
|
Error::ShutdownServer { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::ParseSql { source } => source.status_code(),
|
Error::ParseSql { source } => source.status_code(),
|
||||||
|
|
||||||
@@ -419,9 +444,12 @@ impl ErrorExt for Error {
|
|||||||
| Error::ToTableInsertRequest { source }
|
| Error::ToTableInsertRequest { source }
|
||||||
| Error::FindNewColumnsOnInsertion { source } => source.status_code(),
|
| Error::FindNewColumnsOnInsertion { source } => source.status_code(),
|
||||||
|
|
||||||
Error::ExecuteStatement { source, .. } | Error::DescribeStatement { source } => {
|
Error::ExecuteStatement { source, .. }
|
||||||
source.status_code()
|
| Error::PlanStatement { source }
|
||||||
}
|
| Error::ParseQuery { source }
|
||||||
|
| Error::ExecLogicalPlan { source }
|
||||||
|
| Error::DescribeStatement { source } => source.status_code(),
|
||||||
|
|
||||||
Error::AlterExprToRequest { source, .. } => source.status_code(),
|
Error::AlterExprToRequest { source, .. } => source.status_code(),
|
||||||
Error::LeaderNotFound { .. } => StatusCode::StorageUnavailable,
|
Error::LeaderNotFound { .. } => StatusCode::StorageUnavailable,
|
||||||
Error::TableAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
Error::TableAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||||
|
|||||||
@@ -12,25 +12,18 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_base::Plugins;
|
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::prelude::*;
|
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
|
||||||
use crate::grpc::GrpcOptions;
|
use crate::grpc::GrpcOptions;
|
||||||
use crate::influxdb::InfluxdbOptions;
|
use crate::influxdb::InfluxdbOptions;
|
||||||
use crate::instance::FrontendInstance;
|
|
||||||
use crate::mysql::MysqlOptions;
|
use crate::mysql::MysqlOptions;
|
||||||
use crate::opentsdb::OpentsdbOptions;
|
use crate::opentsdb::OpentsdbOptions;
|
||||||
use crate::postgres::PostgresOptions;
|
use crate::postgres::PostgresOptions;
|
||||||
use crate::prom::PromOptions;
|
use crate::prom::PromOptions;
|
||||||
use crate::prometheus::PrometheusOptions;
|
use crate::prometheus::PrometheusOptions;
|
||||||
use crate::server::Services;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@@ -64,40 +57,6 @@ impl Default for FrontendOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Frontend<T>
|
|
||||||
where
|
|
||||||
T: FrontendInstance,
|
|
||||||
{
|
|
||||||
opts: FrontendOptions,
|
|
||||||
instance: Option<T>,
|
|
||||||
plugins: Arc<Plugins>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: FrontendInstance> Frontend<T> {
|
|
||||||
pub fn new(opts: FrontendOptions, instance: T, plugins: Arc<Plugins>) -> Self {
|
|
||||||
Self {
|
|
||||||
opts,
|
|
||||||
instance: Some(instance),
|
|
||||||
plugins,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn start(&mut self) -> Result<()> {
|
|
||||||
let mut instance = self
|
|
||||||
.instance
|
|
||||||
.take()
|
|
||||||
.context(error::IllegalFrontendStateSnafu {
|
|
||||||
err_msg: "Frontend instance not initialized",
|
|
||||||
})?;
|
|
||||||
instance.start().await?;
|
|
||||||
|
|
||||||
let instance = Arc::new(instance);
|
|
||||||
|
|
||||||
// TODO(sunng87): merge this into instance
|
|
||||||
Services::start(&self.opts, instance, self.plugins.clone()).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -36,22 +36,26 @@ use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
|||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
use common_telemetry::logging::{debug, info};
|
use common_telemetry::logging::{debug, info};
|
||||||
|
use common_telemetry::timer;
|
||||||
use datafusion::sql::sqlparser::ast::ObjectName;
|
use datafusion::sql::sqlparser::ast::ObjectName;
|
||||||
use datanode::instance::sql::table_idents_to_full_name;
|
use datanode::instance::sql::table_idents_to_full_name;
|
||||||
use datanode::instance::InstanceRef as DnInstanceRef;
|
use datanode::instance::InstanceRef as DnInstanceRef;
|
||||||
|
use datanode::metric;
|
||||||
use datatypes::schema::Schema;
|
use datatypes::schema::Schema;
|
||||||
use distributed::DistInstance;
|
use distributed::DistInstance;
|
||||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use partition::manager::PartitionRuleManager;
|
use partition::manager::PartitionRuleManager;
|
||||||
use partition::route::TableRoutes;
|
use partition::route::TableRoutes;
|
||||||
use query::parser::PromQuery;
|
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||||
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
|
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
|
||||||
|
use query::query_engine::StatementHandlerRef;
|
||||||
|
use query::{QueryEngineFactory, QueryEngineRef};
|
||||||
use servers::error as server_error;
|
use servers::error as server_error;
|
||||||
use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
|
use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
|
||||||
use servers::prom::{PromHandler, PromHandlerRef};
|
use servers::prom::{PromHandler, PromHandlerRef};
|
||||||
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
||||||
use servers::query_handler::sql::{SqlQueryHandler, SqlQueryHandlerRef};
|
use servers::query_handler::sql::SqlQueryHandler;
|
||||||
use servers::query_handler::{
|
use servers::query_handler::{
|
||||||
InfluxdbLineProtocolHandler, OpentsdbProtocolHandler, PrometheusProtocolHandler, ScriptHandler,
|
InfluxdbLineProtocolHandler, OpentsdbProtocolHandler, PrometheusProtocolHandler, ScriptHandler,
|
||||||
ScriptHandlerRef,
|
ScriptHandlerRef,
|
||||||
@@ -62,16 +66,19 @@ use sql::dialect::GenericDialect;
|
|||||||
use sql::parser::ParserContext;
|
use sql::parser::ParserContext;
|
||||||
use sql::statements::copy::CopyTable;
|
use sql::statements::copy::CopyTable;
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
|
use sql::statements::tql::Tql;
|
||||||
|
|
||||||
use crate::catalog::FrontendCatalogManager;
|
use crate::catalog::FrontendCatalogManager;
|
||||||
use crate::datanode::DatanodeClients;
|
use crate::datanode::DatanodeClients;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, Error, ExecutePromqlSnafu, ExternalSnafu, MissingMetasrvOptsSnafu, NotSupportedSnafu,
|
self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExecuteStatementSnafu, ExternalSnafu,
|
||||||
ParseSqlSnafu, Result, SqlExecInterceptedSnafu,
|
InvalidInsertRequestSnafu, MissingMetasrvOptsSnafu, NotSupportedSnafu, ParseQuerySnafu,
|
||||||
|
ParseSqlSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
|
||||||
};
|
};
|
||||||
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
|
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
|
||||||
use crate::frontend::FrontendOptions;
|
use crate::frontend::FrontendOptions;
|
||||||
use crate::instance::standalone::{StandaloneGrpcQueryHandler, StandaloneSqlQueryHandler};
|
use crate::instance::standalone::StandaloneGrpcQueryHandler;
|
||||||
|
use crate::server::{start_server, ServerHandlers, Services};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait FrontendInstance:
|
pub trait FrontendInstance:
|
||||||
@@ -97,7 +104,8 @@ pub struct Instance {
|
|||||||
|
|
||||||
/// Script handler is None in distributed mode, only works on standalone mode.
|
/// Script handler is None in distributed mode, only works on standalone mode.
|
||||||
script_handler: Option<ScriptHandlerRef>,
|
script_handler: Option<ScriptHandlerRef>,
|
||||||
sql_handler: SqlQueryHandlerRef<Error>,
|
statement_handler: StatementHandlerRef,
|
||||||
|
query_engine: QueryEngineRef,
|
||||||
grpc_query_handler: GrpcQueryHandlerRef<Error>,
|
grpc_query_handler: GrpcQueryHandlerRef<Error>,
|
||||||
promql_handler: Option<PromHandlerRef>,
|
promql_handler: Option<PromHandlerRef>,
|
||||||
|
|
||||||
@@ -106,6 +114,8 @@ pub struct Instance {
|
|||||||
/// plugins: this map holds extensions to customize query or auth
|
/// plugins: this map holds extensions to customize query or auth
|
||||||
/// behaviours.
|
/// behaviours.
|
||||||
plugins: Arc<Plugins>,
|
plugins: Arc<Plugins>,
|
||||||
|
|
||||||
|
servers: Arc<ServerHandlers>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
@@ -128,22 +138,24 @@ impl Instance {
|
|||||||
datanode_clients.clone(),
|
datanode_clients.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let dist_instance = DistInstance::new(
|
let dist_instance =
|
||||||
meta_client,
|
DistInstance::new(meta_client, catalog_manager.clone(), datanode_clients);
|
||||||
catalog_manager.clone(),
|
|
||||||
datanode_clients,
|
|
||||||
plugins.clone(),
|
|
||||||
);
|
|
||||||
let dist_instance = Arc::new(dist_instance);
|
let dist_instance = Arc::new(dist_instance);
|
||||||
|
|
||||||
|
let query_engine =
|
||||||
|
QueryEngineFactory::new_with_plugins(catalog_manager.clone(), plugins.clone())
|
||||||
|
.query_engine();
|
||||||
|
|
||||||
Ok(Instance {
|
Ok(Instance {
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
script_handler: None,
|
script_handler: None,
|
||||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||||
sql_handler: dist_instance.clone(),
|
statement_handler: dist_instance.clone(),
|
||||||
|
query_engine,
|
||||||
grpc_query_handler: dist_instance,
|
grpc_query_handler: dist_instance,
|
||||||
promql_handler: None,
|
promql_handler: None,
|
||||||
plugins,
|
plugins: plugins.clone(),
|
||||||
|
servers: Arc::new(HashMap::new()),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,23 +194,40 @@ impl Instance {
|
|||||||
catalog_manager: dn_instance.catalog_manager().clone(),
|
catalog_manager: dn_instance.catalog_manager().clone(),
|
||||||
script_handler: None,
|
script_handler: None,
|
||||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||||
sql_handler: StandaloneSqlQueryHandler::arc(dn_instance.clone()),
|
statement_handler: dn_instance.clone(),
|
||||||
|
query_engine: dn_instance.query_engine(),
|
||||||
grpc_query_handler: StandaloneGrpcQueryHandler::arc(dn_instance.clone()),
|
grpc_query_handler: StandaloneGrpcQueryHandler::arc(dn_instance.clone()),
|
||||||
promql_handler: Some(dn_instance.clone()),
|
promql_handler: Some(dn_instance.clone()),
|
||||||
plugins: Default::default(),
|
plugins: Default::default(),
|
||||||
|
servers: Arc::new(HashMap::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn build_servers(
|
||||||
|
&mut self,
|
||||||
|
opts: &FrontendOptions,
|
||||||
|
plugins: Arc<Plugins>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let servers = Services::build(opts, Arc::new(self.clone()), plugins).await?;
|
||||||
|
self.servers = Arc::new(servers);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) fn new_distributed(dist_instance: Arc<DistInstance>) -> Self {
|
pub(crate) fn new_distributed(dist_instance: Arc<DistInstance>) -> Self {
|
||||||
|
let catalog_manager = dist_instance.catalog_manager();
|
||||||
|
let query_engine = QueryEngineFactory::new(catalog_manager.clone()).query_engine();
|
||||||
Instance {
|
Instance {
|
||||||
catalog_manager: dist_instance.catalog_manager(),
|
catalog_manager,
|
||||||
script_handler: None,
|
script_handler: None,
|
||||||
|
statement_handler: dist_instance.clone(),
|
||||||
|
query_engine,
|
||||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||||
sql_handler: dist_instance.clone(),
|
|
||||||
grpc_query_handler: dist_instance,
|
grpc_query_handler: dist_instance,
|
||||||
promql_handler: None,
|
promql_handler: None,
|
||||||
plugins: Default::default(),
|
plugins: Default::default(),
|
||||||
|
servers: Arc::new(HashMap::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,7 +260,7 @@ impl Instance {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<Output> {
|
async fn handle_insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<Output> {
|
||||||
self.create_or_alter_table_on_demand(ctx.clone(), &request.table_name, &request.columns)
|
self.create_or_alter_table_on_demand(ctx.clone(), &request)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let query = Request::Insert(request);
|
let query = Request::Insert(request);
|
||||||
@@ -244,11 +273,12 @@ impl Instance {
|
|||||||
async fn create_or_alter_table_on_demand(
|
async fn create_or_alter_table_on_demand(
|
||||||
&self,
|
&self,
|
||||||
ctx: QueryContextRef,
|
ctx: QueryContextRef,
|
||||||
table_name: &str,
|
request: &InsertRequest,
|
||||||
columns: &[Column],
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let catalog_name = &ctx.current_catalog();
|
let catalog_name = &ctx.current_catalog();
|
||||||
let schema_name = &ctx.current_schema();
|
let schema_name = &ctx.current_schema();
|
||||||
|
let table_name = &request.table_name;
|
||||||
|
let columns = &request.columns;
|
||||||
|
|
||||||
let table = self
|
let table = self
|
||||||
.catalog_manager
|
.catalog_manager
|
||||||
@@ -271,6 +301,8 @@ impl Instance {
|
|||||||
Some(table) => {
|
Some(table) => {
|
||||||
let schema = table.schema();
|
let schema = table.schema();
|
||||||
|
|
||||||
|
validate_insert_request(schema.as_ref(), request)?;
|
||||||
|
|
||||||
if let Some(add_columns) = common_grpc_expr::find_new_columns(&schema, columns)
|
if let Some(add_columns) = common_grpc_expr::find_new_columns(&schema, columns)
|
||||||
.context(error::FindNewColumnsOnInsertionSnafu)?
|
.context(error::FindNewColumnsOnInsertionSnafu)?
|
||||||
{
|
{
|
||||||
@@ -370,13 +402,24 @@ impl Instance {
|
|||||||
pub fn plugins(&self) -> Arc<Plugins> {
|
pub fn plugins(&self) -> Arc<Plugins> {
|
||||||
self.plugins.clone()
|
self.plugins.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn shutdown(&self) -> Result<()> {
|
||||||
|
futures::future::try_join_all(self.servers.values().map(|server| server.0.shutdown()))
|
||||||
|
.await
|
||||||
|
.context(error::ShutdownServerSnafu)
|
||||||
|
.map(|_| ())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FrontendInstance for Instance {
|
impl FrontendInstance for Instance {
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
// TODO(hl): Frontend init should move to here
|
// TODO(hl): Frontend init should move to here
|
||||||
Ok(())
|
|
||||||
|
futures::future::try_join_all(self.servers.values().map(start_server))
|
||||||
|
.await
|
||||||
|
.context(error::StartServerSnafu)
|
||||||
|
.map(|_| ())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -387,20 +430,57 @@ fn parse_stmt(sql: &str) -> Result<Vec<Statement>> {
|
|||||||
impl Instance {
|
impl Instance {
|
||||||
async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
||||||
check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
|
check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
|
||||||
|
|
||||||
|
let planner = self.query_engine.planner();
|
||||||
|
|
||||||
match stmt {
|
match stmt {
|
||||||
|
Statement::Query(_) | Statement::Explain(_) => {
|
||||||
|
let plan = planner
|
||||||
|
.plan(QueryStatement::Sql(stmt), query_ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?;
|
||||||
|
self.query_engine
|
||||||
|
.execute(&plan)
|
||||||
|
.await
|
||||||
|
.context(ExecLogicalPlanSnafu)
|
||||||
|
}
|
||||||
|
Statement::Tql(tql) => {
|
||||||
|
let plan = match tql {
|
||||||
|
Tql::Eval(eval) => {
|
||||||
|
let promql = PromQuery {
|
||||||
|
start: eval.start,
|
||||||
|
end: eval.end,
|
||||||
|
step: eval.step,
|
||||||
|
query: eval.query,
|
||||||
|
};
|
||||||
|
let stmt =
|
||||||
|
QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||||
|
planner
|
||||||
|
.plan(stmt, query_ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?
|
||||||
|
}
|
||||||
|
Tql::Explain(_) => unimplemented!(),
|
||||||
|
};
|
||||||
|
self.query_engine
|
||||||
|
.execute(&plan)
|
||||||
|
.await
|
||||||
|
.context(ExecLogicalPlanSnafu)
|
||||||
|
}
|
||||||
Statement::CreateDatabase(_)
|
Statement::CreateDatabase(_)
|
||||||
| Statement::ShowDatabases(_)
|
| Statement::ShowDatabases(_)
|
||||||
| Statement::CreateTable(_)
|
| Statement::CreateTable(_)
|
||||||
| Statement::ShowTables(_)
|
| Statement::ShowTables(_)
|
||||||
| Statement::DescribeTable(_)
|
| Statement::DescribeTable(_)
|
||||||
| Statement::Explain(_)
|
|
||||||
| Statement::Query(_)
|
|
||||||
| Statement::Insert(_)
|
| Statement::Insert(_)
|
||||||
| Statement::Delete(_)
|
| Statement::Delete(_)
|
||||||
| Statement::Alter(_)
|
| Statement::Alter(_)
|
||||||
| Statement::DropTable(_)
|
| Statement::DropTable(_)
|
||||||
| Statement::Tql(_)
|
| Statement::Copy(_) => self
|
||||||
| Statement::Copy(_) => self.sql_handler.do_statement_query(stmt, query_ctx).await,
|
.statement_handler
|
||||||
|
.handle_statement(QueryStatement::Sql(stmt), query_ctx)
|
||||||
|
.await
|
||||||
|
.context(ExecuteStatementSnafu),
|
||||||
Statement::Use(db) => self.handle_use(db, query_ctx),
|
Statement::Use(db) => self.handle_use(db, query_ctx),
|
||||||
Statement::ShowCreateTable(_) => NotSupportedSnafu {
|
Statement::ShowCreateTable(_) => NotSupportedSnafu {
|
||||||
feat: format!("{stmt:?}"),
|
feat: format!("{stmt:?}"),
|
||||||
@@ -415,6 +495,8 @@ impl SqlQueryHandler for Instance {
|
|||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||||
|
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||||
|
|
||||||
let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
|
let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
|
||||||
let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
|
let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
|
||||||
Ok(q) => q,
|
Ok(q) => q,
|
||||||
@@ -471,28 +553,26 @@ impl SqlQueryHandler for Instance {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_statement_query(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Output> {
|
|
||||||
let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
|
|
||||||
|
|
||||||
// TODO(sunng87): figure out at which stage we can call
|
|
||||||
// this hook after ArrowFlight adoption. We need to provide
|
|
||||||
// LogicalPlan as to this hook.
|
|
||||||
query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
|
|
||||||
self.query_statement(stmt, query_ctx.clone())
|
|
||||||
.await
|
|
||||||
.and_then(|output| query_interceptor.post_execute(output, query_ctx.clone()))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_describe(
|
async fn do_describe(
|
||||||
&self,
|
&self,
|
||||||
stmt: Statement,
|
stmt: Statement,
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Result<Option<Schema>> {
|
) -> Result<Option<Schema>> {
|
||||||
self.sql_handler.do_describe(stmt, query_ctx).await
|
if let Statement::Query(_) = stmt {
|
||||||
|
let plan = self
|
||||||
|
.query_engine
|
||||||
|
.planner()
|
||||||
|
.plan(QueryStatement::Sql(stmt), query_ctx)
|
||||||
|
.await
|
||||||
|
.context(PlanStatementSnafu)?;
|
||||||
|
self.query_engine
|
||||||
|
.describe(plan)
|
||||||
|
.await
|
||||||
|
.map(Some)
|
||||||
|
.context(error::DescribeStatementSnafu)
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||||
@@ -597,7 +677,7 @@ pub fn check_permission(
|
|||||||
validate_param(delete.table_name(), query_ctx)?;
|
validate_param(delete.table_name(), query_ctx)?;
|
||||||
}
|
}
|
||||||
Statement::Copy(stmd) => match stmd {
|
Statement::Copy(stmd) => match stmd {
|
||||||
CopyTable::To(copy_table_to) => validate_param(copy_table_to.table_name(), query_ctx)?,
|
CopyTable::To(copy_table_to) => validate_param(©_table_to.table_name, query_ctx)?,
|
||||||
CopyTable::From(copy_table_from) => {
|
CopyTable::From(copy_table_from) => {
|
||||||
validate_param(©_table_from.table_name, query_ctx)?
|
validate_param(©_table_from.table_name, query_ctx)?
|
||||||
}
|
}
|
||||||
@@ -616,13 +696,39 @@ fn validate_param(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()>
|
|||||||
.context(SqlExecInterceptedSnafu)
|
.context(SqlExecInterceptedSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn validate_insert_request(schema: &Schema, request: &InsertRequest) -> Result<()> {
|
||||||
|
for column_schema in schema.column_schemas() {
|
||||||
|
if column_schema.is_nullable() || column_schema.default_constraint().is_some() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let not_null = request
|
||||||
|
.columns
|
||||||
|
.iter()
|
||||||
|
.find(|x| x.column_name == column_schema.name)
|
||||||
|
.map(|column| column.null_mask.is_empty() || column.null_mask.iter().all(|x| *x == 0));
|
||||||
|
ensure!(
|
||||||
|
not_null == Some(true),
|
||||||
|
InvalidInsertRequestSnafu {
|
||||||
|
reason: format!(
|
||||||
|
"Expecting insert data to be presented on a not null or no default value column '{}'.",
|
||||||
|
&column_schema.name
|
||||||
|
)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::atomic::AtomicU32;
|
use std::sync::atomic::AtomicU32;
|
||||||
|
|
||||||
|
use api::v1::column::Values;
|
||||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||||
|
use datatypes::prelude::{ConcreteDataType, Value};
|
||||||
|
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
|
||||||
use query::query_engine::options::QueryOptions;
|
use query::query_engine::options::QueryOptions;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use strfmt::Format;
|
use strfmt::Format;
|
||||||
@@ -632,6 +738,71 @@ mod tests {
|
|||||||
use crate::tests;
|
use crate::tests;
|
||||||
use crate::tests::MockDistributedInstance;
|
use crate::tests::MockDistributedInstance;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_validate_insert_request() {
|
||||||
|
let schema = Schema::new(vec![
|
||||||
|
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true)
|
||||||
|
.with_default_constraint(None)
|
||||||
|
.unwrap(),
|
||||||
|
ColumnSchema::new("b", ConcreteDataType::int32_datatype(), true)
|
||||||
|
.with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::Int32(100))))
|
||||||
|
.unwrap(),
|
||||||
|
]);
|
||||||
|
let request = InsertRequest {
|
||||||
|
columns: vec![Column {
|
||||||
|
column_name: "c".to_string(),
|
||||||
|
values: Some(Values {
|
||||||
|
i32_values: vec![1],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
null_mask: vec![0],
|
||||||
|
..Default::default()
|
||||||
|
}],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
// If nullable is true, it doesn't matter whether the insert request has the column.
|
||||||
|
assert!(validate_insert_request(&schema, &request).is_ok());
|
||||||
|
|
||||||
|
let schema = Schema::new(vec![
|
||||||
|
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false)
|
||||||
|
.with_default_constraint(None)
|
||||||
|
.unwrap(),
|
||||||
|
ColumnSchema::new("b", ConcreteDataType::int32_datatype(), false)
|
||||||
|
.with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::Int32(-100))))
|
||||||
|
.unwrap(),
|
||||||
|
]);
|
||||||
|
let request = InsertRequest {
|
||||||
|
columns: vec![Column {
|
||||||
|
column_name: "a".to_string(),
|
||||||
|
values: Some(Values {
|
||||||
|
i32_values: vec![1],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
null_mask: vec![0],
|
||||||
|
..Default::default()
|
||||||
|
}],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
// If nullable is false, but the column is defined with default value,
|
||||||
|
// it also doesn't matter whether the insert request has the column.
|
||||||
|
assert!(validate_insert_request(&schema, &request).is_ok());
|
||||||
|
|
||||||
|
let request = InsertRequest {
|
||||||
|
columns: vec![Column {
|
||||||
|
column_name: "b".to_string(),
|
||||||
|
values: Some(Values {
|
||||||
|
i32_values: vec![1],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
null_mask: vec![0],
|
||||||
|
..Default::default()
|
||||||
|
}],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
// Neither of the above cases.
|
||||||
|
assert!(validate_insert_request(&schema, &request).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_exec_validation() {
|
fn test_exec_validation() {
|
||||||
let query_ctx = Arc::new(QueryContext::new());
|
let query_ctx = Arc::new(QueryContext::new());
|
||||||
@@ -906,12 +1077,16 @@ mod tests {
|
|||||||
.collect::<HashMap<u32, u64>>();
|
.collect::<HashMap<u32, u64>>();
|
||||||
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
||||||
|
|
||||||
|
let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
|
||||||
for (region, dn) in region_to_dn_map.iter() {
|
for (region, dn) in region_to_dn_map.iter() {
|
||||||
let dn = instance.datanodes.get(dn).unwrap();
|
let dn = instance.datanodes.get(dn).unwrap();
|
||||||
let output = dn
|
let engine = dn.query_engine();
|
||||||
.execute_sql("SELECT ts, host FROM demo ORDER BY ts", QueryContext::arc())
|
let plan = engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt.clone(), QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
let output = engine.execute(&plan).await.unwrap();
|
||||||
let Output::Stream(stream) = output else { unreachable!() };
|
let Output::Stream(stream) = output else { unreachable!() };
|
||||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
let actual = recordbatches.pretty_print().unwrap();
|
let actual = recordbatches.pretty_print().unwrap();
|
||||||
|
|||||||
@@ -19,15 +19,14 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use api::helper::ColumnDataTypeWrapper;
|
use api::helper::ColumnDataTypeWrapper;
|
||||||
use api::v1::{
|
use api::v1::{
|
||||||
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, InsertRequest,
|
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, FlushTableExpr,
|
||||||
TableId,
|
InsertRequest, TableId,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::helper::{SchemaKey, SchemaValue};
|
use catalog::helper::{SchemaKey, SchemaValue};
|
||||||
use catalog::{CatalogManager, DeregisterTableRequest, RegisterTableRequest};
|
use catalog::{CatalogManager, DeregisterTableRequest, RegisterTableRequest};
|
||||||
use chrono::DateTime;
|
use chrono::DateTime;
|
||||||
use client::Database;
|
use client::Database;
|
||||||
use common_base::Plugins;
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use common_error::prelude::BoxedError;
|
use common_error::prelude::BoxedError;
|
||||||
@@ -35,18 +34,18 @@ use common_query::Output;
|
|||||||
use common_telemetry::{debug, info};
|
use common_telemetry::{debug, info};
|
||||||
use datanode::instance::sql::table_idents_to_full_name;
|
use datanode::instance::sql::table_idents_to_full_name;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::schema::{RawSchema, Schema};
|
use datatypes::schema::RawSchema;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use meta_client::rpc::router::DeleteRequest as MetaDeleteRequest;
|
use meta_client::rpc::router::DeleteRequest as MetaDeleteRequest;
|
||||||
use meta_client::rpc::{
|
use meta_client::rpc::{
|
||||||
CompareAndPutRequest, CreateRequest as MetaCreateRequest, Partition as MetaPartition,
|
CompareAndPutRequest, CreateRequest as MetaCreateRequest, Partition as MetaPartition,
|
||||||
RouteResponse, TableName,
|
RouteRequest, RouteResponse, TableName,
|
||||||
};
|
};
|
||||||
use partition::partition::{PartitionBound, PartitionDef};
|
use partition::partition::{PartitionBound, PartitionDef};
|
||||||
use query::parser::{PromQuery, QueryStatement};
|
use query::error::QueryExecutionSnafu;
|
||||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
use query::parser::QueryStatement;
|
||||||
use query::{QueryEngineFactory, QueryEngineRef};
|
use query::query_engine::StatementHandler;
|
||||||
use servers::query_handler::sql::SqlQueryHandler;
|
use query::sql::{describe_table, show_databases, show_tables};
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use sql::ast::Value as SqlValue;
|
use sql::ast::Value as SqlValue;
|
||||||
@@ -61,12 +60,12 @@ use crate::catalog::FrontendCatalogManager;
|
|||||||
use crate::datanode::DatanodeClients;
|
use crate::datanode::DatanodeClients;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, AlterExprToRequestSnafu, CatalogEntrySerdeSnafu, CatalogSnafu, ColumnDataTypeSnafu,
|
self, AlterExprToRequestSnafu, CatalogEntrySerdeSnafu, CatalogSnafu, ColumnDataTypeSnafu,
|
||||||
DeserializePartitionSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu, RequestDatanodeSnafu,
|
DeserializePartitionSnafu, NotSupportedSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu,
|
||||||
RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu, TableAlreadyExistSnafu,
|
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu,
|
||||||
TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu, UnrecognizedTableOptionSnafu,
|
TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu,
|
||||||
|
UnrecognizedTableOptionSnafu,
|
||||||
};
|
};
|
||||||
use crate::expr_factory;
|
use crate::expr_factory;
|
||||||
use crate::instance::parse_stmt;
|
|
||||||
use crate::sql::insert_to_request;
|
use crate::sql::insert_to_request;
|
||||||
use crate::table::DistTable;
|
use crate::table::DistTable;
|
||||||
|
|
||||||
@@ -75,7 +74,6 @@ pub(crate) struct DistInstance {
|
|||||||
meta_client: Arc<MetaClient>,
|
meta_client: Arc<MetaClient>,
|
||||||
catalog_manager: Arc<FrontendCatalogManager>,
|
catalog_manager: Arc<FrontendCatalogManager>,
|
||||||
datanode_clients: Arc<DatanodeClients>,
|
datanode_clients: Arc<DatanodeClients>,
|
||||||
query_engine: QueryEngineRef,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DistInstance {
|
impl DistInstance {
|
||||||
@@ -83,16 +81,11 @@ impl DistInstance {
|
|||||||
meta_client: Arc<MetaClient>,
|
meta_client: Arc<MetaClient>,
|
||||||
catalog_manager: Arc<FrontendCatalogManager>,
|
catalog_manager: Arc<FrontendCatalogManager>,
|
||||||
datanode_clients: Arc<DatanodeClients>,
|
datanode_clients: Arc<DatanodeClients>,
|
||||||
plugins: Arc<Plugins>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let query_engine =
|
|
||||||
QueryEngineFactory::new_with_plugins(catalog_manager.clone(), plugins.clone())
|
|
||||||
.query_engine();
|
|
||||||
Self {
|
Self {
|
||||||
meta_client,
|
meta_client,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
datanode_clients,
|
datanode_clients,
|
||||||
query_engine,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,20 +259,67 @@ impl DistInstance {
|
|||||||
Ok(Output::AffectedRows(1))
|
Ok(Output::AffectedRows(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flush_table(&self, table_name: TableName, region_id: Option<u32>) -> Result<Output> {
|
||||||
|
let _ = self
|
||||||
|
.catalog_manager
|
||||||
|
.table(
|
||||||
|
&table_name.catalog_name,
|
||||||
|
&table_name.schema_name,
|
||||||
|
&table_name.table_name,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context(CatalogSnafu)?
|
||||||
|
.with_context(|| TableNotFoundSnafu {
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let route_response = self
|
||||||
|
.meta_client
|
||||||
|
.route(RouteRequest {
|
||||||
|
table_names: vec![table_name.clone()],
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.context(RequestMetaSnafu)?;
|
||||||
|
|
||||||
|
let expr = FlushTableExpr {
|
||||||
|
catalog_name: table_name.catalog_name.clone(),
|
||||||
|
schema_name: table_name.schema_name.clone(),
|
||||||
|
table_name: table_name.table_name.clone(),
|
||||||
|
region_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
for table_route in &route_response.table_routes {
|
||||||
|
let should_send_rpc = table_route.region_routes.iter().any(|route| {
|
||||||
|
if let Some(region_id) = region_id {
|
||||||
|
region_id == route.region.id as u32
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if !should_send_rpc {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for datanode in table_route.find_leaders() {
|
||||||
|
debug!("Flushing table {table_name} on Datanode {datanode:?}");
|
||||||
|
|
||||||
|
let client = self.datanode_clients.get_client(&datanode).await;
|
||||||
|
let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
|
||||||
|
client
|
||||||
|
.flush_table(expr.clone())
|
||||||
|
.await
|
||||||
|
.context(RequestDatanodeSnafu)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Output::AffectedRows(0))
|
||||||
|
}
|
||||||
|
|
||||||
async fn handle_statement(
|
async fn handle_statement(
|
||||||
&self,
|
&self,
|
||||||
stmt: Statement,
|
stmt: Statement,
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
match stmt {
|
match stmt {
|
||||||
Statement::Query(_) => {
|
|
||||||
let plan = self
|
|
||||||
.query_engine
|
|
||||||
.statement_to_plan(QueryStatement::Sql(stmt), query_ctx)
|
|
||||||
.await
|
|
||||||
.context(error::ExecuteStatementSnafu {})?;
|
|
||||||
self.query_engine.execute(&plan).await
|
|
||||||
}
|
|
||||||
Statement::CreateDatabase(stmt) => {
|
Statement::CreateDatabase(stmt) => {
|
||||||
let expr = CreateDatabaseExpr {
|
let expr = CreateDatabaseExpr {
|
||||||
database_name: stmt.name.to_string(),
|
database_name: stmt.name.to_string(),
|
||||||
@@ -321,9 +361,6 @@ impl DistInstance {
|
|||||||
})?;
|
})?;
|
||||||
describe_table(table)
|
describe_table(table)
|
||||||
}
|
}
|
||||||
Statement::Explain(stmt) => {
|
|
||||||
explain(Box::new(stmt), self.query_engine.clone(), query_ctx).await
|
|
||||||
}
|
|
||||||
Statement::Insert(insert) => {
|
Statement::Insert(insert) => {
|
||||||
let (catalog, schema, table) =
|
let (catalog, schema, table) =
|
||||||
table_idents_to_full_name(insert.table_name(), query_ctx.clone())
|
table_idents_to_full_name(insert.table_name(), query_ctx.clone())
|
||||||
@@ -353,29 +390,6 @@ impl DistInstance {
|
|||||||
.context(error::ExecuteStatementSnafu)
|
.context(error::ExecuteStatementSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_sql(&self, sql: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
|
||||||
let stmts = parse_stmt(sql);
|
|
||||||
match stmts {
|
|
||||||
Ok(stmts) => {
|
|
||||||
let mut results = Vec::with_capacity(stmts.len());
|
|
||||||
|
|
||||||
for stmt in stmts {
|
|
||||||
let result = self.handle_statement(stmt, query_ctx.clone()).await;
|
|
||||||
let is_err = result.is_err();
|
|
||||||
|
|
||||||
results.push(result);
|
|
||||||
|
|
||||||
if is_err {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
results
|
|
||||||
}
|
|
||||||
Err(e) => vec![Err(e)],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handles distributed database creation
|
/// Handles distributed database creation
|
||||||
async fn handle_create_database(
|
async fn handle_create_database(
|
||||||
&self,
|
&self,
|
||||||
@@ -519,50 +533,21 @@ impl DistInstance {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl SqlQueryHandler for DistInstance {
|
impl StatementHandler for DistInstance {
|
||||||
type Error = error::Error;
|
async fn handle_statement(
|
||||||
|
|
||||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
|
||||||
self.handle_sql(query, query_ctx).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_promql_query(
|
|
||||||
&self,
|
&self,
|
||||||
_: &PromQuery,
|
stmt: QueryStatement,
|
||||||
_: QueryContextRef,
|
|
||||||
) -> Vec<std::result::Result<Output, Self::Error>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_statement_query(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Result<Output> {
|
) -> query::error::Result<Output> {
|
||||||
self.handle_statement(stmt, query_ctx).await
|
match stmt {
|
||||||
}
|
QueryStatement::Sql(stmt) => self.handle_statement(stmt, query_ctx).await,
|
||||||
|
QueryStatement::Promql(_) => NotSupportedSnafu {
|
||||||
async fn do_describe(
|
feat: "distributed execute promql".to_string(),
|
||||||
&self,
|
}
|
||||||
stmt: Statement,
|
.fail(),
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Option<Schema>> {
|
|
||||||
if let Statement::Query(_) = stmt {
|
|
||||||
self.query_engine
|
|
||||||
.describe(QueryStatement::Sql(stmt), query_ctx)
|
|
||||||
.await
|
|
||||||
.map(Some)
|
|
||||||
.context(error::DescribeStatementSnafu)
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
}
|
||||||
}
|
.map_err(BoxedError::new)
|
||||||
|
.context(QueryExecutionSnafu)
|
||||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
|
||||||
self.catalog_manager
|
|
||||||
.schema(catalog, schema)
|
|
||||||
.map(|s| s.is_some())
|
|
||||||
.context(CatalogSnafu)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -721,14 +706,15 @@ fn find_partition_columns(
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use servers::query_handler::sql::SqlQueryHandlerRef;
|
use query::parser::QueryLanguageParser;
|
||||||
|
use query::query_engine::StatementHandlerRef;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use sql::dialect::GenericDialect;
|
use sql::dialect::GenericDialect;
|
||||||
use sql::parser::ParserContext;
|
use sql::parser::ParserContext;
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::instance::standalone::StandaloneSqlQueryHandler;
|
use crate::instance::parse_stmt;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_parse_partitions() {
|
async fn test_parse_partitions() {
|
||||||
@@ -771,28 +757,28 @@ ENGINE=mito",
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_sql(instance: &Arc<DistInstance>, sql: &str) -> Output {
|
||||||
|
let stmt = parse_stmt(sql).unwrap().remove(0);
|
||||||
|
instance
|
||||||
|
.handle_statement(stmt, QueryContext::arc())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
async fn test_show_databases() {
|
async fn test_show_databases() {
|
||||||
let instance = crate::tests::create_distributed_instance("test_show_databases").await;
|
let instance = crate::tests::create_distributed_instance("test_show_databases").await;
|
||||||
let dist_instance = &instance.dist_instance;
|
let dist_instance = &instance.dist_instance;
|
||||||
|
|
||||||
let sql = "create database test_show_databases";
|
let sql = "create database test_show_databases";
|
||||||
let output = dist_instance
|
let output = handle_sql(dist_instance, sql).await;
|
||||||
.handle_sql(sql, QueryContext::arc())
|
|
||||||
.await
|
|
||||||
.remove(0)
|
|
||||||
.unwrap();
|
|
||||||
match output {
|
match output {
|
||||||
Output::AffectedRows(rows) => assert_eq!(rows, 1),
|
Output::AffectedRows(rows) => assert_eq!(rows, 1),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
|
|
||||||
let sql = "show databases";
|
let sql = "show databases";
|
||||||
let output = dist_instance
|
let output = handle_sql(dist_instance, sql).await;
|
||||||
.handle_sql(sql, QueryContext::arc())
|
|
||||||
.await
|
|
||||||
.remove(0)
|
|
||||||
.unwrap();
|
|
||||||
match output {
|
match output {
|
||||||
Output::RecordBatches(r) => {
|
Output::RecordBatches(r) => {
|
||||||
let expected1 = vec![
|
let expected1 = vec![
|
||||||
@@ -829,11 +815,7 @@ ENGINE=mito",
|
|||||||
let datanode_instances = instance.datanodes;
|
let datanode_instances = instance.datanodes;
|
||||||
|
|
||||||
let sql = "create database test_show_tables";
|
let sql = "create database test_show_tables";
|
||||||
dist_instance
|
handle_sql(dist_instance, sql).await;
|
||||||
.handle_sql(sql, QueryContext::arc())
|
|
||||||
.await
|
|
||||||
.remove(0)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let sql = "
|
let sql = "
|
||||||
CREATE TABLE greptime.test_show_tables.dist_numbers (
|
CREATE TABLE greptime.test_show_tables.dist_numbers (
|
||||||
@@ -848,18 +830,14 @@ ENGINE=mito",
|
|||||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||||
)
|
)
|
||||||
ENGINE=mito";
|
ENGINE=mito";
|
||||||
dist_instance
|
handle_sql(dist_instance, sql).await;
|
||||||
.handle_sql(sql, QueryContext::arc())
|
|
||||||
.await
|
|
||||||
.remove(0)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
async fn assert_show_tables(instance: SqlQueryHandlerRef<error::Error>) {
|
async fn assert_show_tables(handler: StatementHandlerRef) {
|
||||||
let sql = "show tables in test_show_tables";
|
let sql = "show tables in test_show_tables";
|
||||||
let output = instance
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
.do_query(sql, QueryContext::arc())
|
let output = handler
|
||||||
|
.handle_statement(stmt, QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.remove(0)
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
match output {
|
match output {
|
||||||
Output::RecordBatches(r) => {
|
Output::RecordBatches(r) => {
|
||||||
@@ -878,7 +856,7 @@ ENGINE=mito",
|
|||||||
|
|
||||||
// Asserts that new table is created in Datanode as well.
|
// Asserts that new table is created in Datanode as well.
|
||||||
for x in datanode_instances.values() {
|
for x in datanode_instances.values() {
|
||||||
assert_show_tables(StandaloneSqlQueryHandler::arc(x.clone())).await
|
assert_show_tables(x.clone()).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,7 +57,11 @@ impl GrpcQueryHandler for DistInstance {
|
|||||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||||
self.drop_table(table_name).await
|
self.drop_table(table_name).await
|
||||||
}
|
}
|
||||||
DdlExpr::FlushTable(_) => todo!(),
|
DdlExpr::FlushTable(expr) => {
|
||||||
|
let table_name =
|
||||||
|
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||||
|
self.flush_table(table_name, expr.region_id).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ use api::v1::greptime_request::Request;
|
|||||||
use api::v1::query_request::Query;
|
use api::v1::query_request::Query;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
|
use query::parser::PromQuery;
|
||||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||||
use servers::query_handler::sql::SqlQueryHandler;
|
use servers::query_handler::sql::SqlQueryHandler;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
@@ -54,6 +55,23 @@ impl GrpcQueryHandler for Instance {
|
|||||||
}
|
}
|
||||||
.fail();
|
.fail();
|
||||||
}
|
}
|
||||||
|
Query::PromRangeQuery(promql) => {
|
||||||
|
let prom_query = PromQuery {
|
||||||
|
query: promql.query,
|
||||||
|
start: promql.start,
|
||||||
|
end: promql.end,
|
||||||
|
step: promql.step,
|
||||||
|
};
|
||||||
|
let mut result =
|
||||||
|
SqlQueryHandler::do_promql_query(self, &prom_query, ctx).await;
|
||||||
|
ensure!(
|
||||||
|
result.len() == 1,
|
||||||
|
error::NotSupportedSnafu {
|
||||||
|
feat: "execute multiple statements in PromQL query string through GRPC interface"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
result.remove(0)?
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Request::Ddl(request) => {
|
Request::Ddl(request) => {
|
||||||
@@ -73,13 +91,15 @@ mod test {
|
|||||||
use api::v1::ddl_request::Expr as DdlExpr;
|
use api::v1::ddl_request::Expr as DdlExpr;
|
||||||
use api::v1::{
|
use api::v1::{
|
||||||
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
|
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
|
||||||
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, InsertRequest,
|
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||||
QueryRequest,
|
InsertRequest, QueryRequest,
|
||||||
};
|
};
|
||||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
|
use query::parser::QueryLanguageParser;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
use tests::{has_parquet_file, test_region_dir};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::table::DistTable;
|
use crate::table::DistTable;
|
||||||
@@ -333,6 +353,108 @@ CREATE TABLE {table_name} (
|
|||||||
test_insert_and_query_on_auto_created_table(instance).await
|
test_insert_and_query_on_auto_created_table(instance).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_distributed_flush_table() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
|
||||||
|
let data_tmp_dirs = instance.data_tmp_dirs();
|
||||||
|
let frontend = instance.frontend.as_ref();
|
||||||
|
|
||||||
|
let table_name = "my_dist_table";
|
||||||
|
let sql = format!(
|
||||||
|
r"
|
||||||
|
CREATE TABLE {table_name} (
|
||||||
|
a INT,
|
||||||
|
ts TIMESTAMP,
|
||||||
|
TIME INDEX (ts)
|
||||||
|
) PARTITION BY RANGE COLUMNS(a) (
|
||||||
|
PARTITION r0 VALUES LESS THAN (10),
|
||||||
|
PARTITION r1 VALUES LESS THAN (20),
|
||||||
|
PARTITION r2 VALUES LESS THAN (50),
|
||||||
|
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||||
|
)"
|
||||||
|
);
|
||||||
|
create_table(frontend, sql).await;
|
||||||
|
|
||||||
|
test_insert_and_query_on_existing_table(frontend, table_name).await;
|
||||||
|
|
||||||
|
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||||
|
// Wait for previous task finished
|
||||||
|
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||||
|
|
||||||
|
let table_id = 1024;
|
||||||
|
|
||||||
|
let table = instance
|
||||||
|
.frontend
|
||||||
|
.catalog_manager()
|
||||||
|
.table("greptime", "public", table_name)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||||
|
|
||||||
|
let TableGlobalValue { regions_id_map, .. } = table
|
||||||
|
.table_global_value(&TableGlobalKey {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let region_to_dn_map = regions_id_map
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (v[0], *k))
|
||||||
|
.collect::<HashMap<u32, u64>>();
|
||||||
|
|
||||||
|
for (region, dn) in region_to_dn_map.iter() {
|
||||||
|
// data_tmp_dirs -> dn: 1..4
|
||||||
|
let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
|
||||||
|
let region_dir = test_region_dir(
|
||||||
|
data_tmp_dir.path().to_str().unwrap(),
|
||||||
|
"greptime",
|
||||||
|
"public",
|
||||||
|
table_id,
|
||||||
|
*region,
|
||||||
|
);
|
||||||
|
has_parquet_file(®ion_dir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_standalone_flush_table() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
|
||||||
|
let instance = &standalone.instance;
|
||||||
|
let data_tmp_dir = standalone.data_tmp_dir();
|
||||||
|
|
||||||
|
let table_name = "my_table";
|
||||||
|
let sql = format!("CREATE TABLE {table_name} (a INT, ts TIMESTAMP, TIME INDEX (ts))");
|
||||||
|
|
||||||
|
create_table(instance, sql).await;
|
||||||
|
|
||||||
|
test_insert_and_query_on_existing_table(instance, table_name).await;
|
||||||
|
|
||||||
|
let table_id = 1024;
|
||||||
|
let region_id = 0;
|
||||||
|
let region_dir = test_region_dir(
|
||||||
|
data_tmp_dir.path().to_str().unwrap(),
|
||||||
|
"greptime",
|
||||||
|
"public",
|
||||||
|
table_id,
|
||||||
|
region_id,
|
||||||
|
);
|
||||||
|
assert!(!has_parquet_file(®ion_dir));
|
||||||
|
|
||||||
|
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||||
|
// Wait for previous task finished
|
||||||
|
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||||
|
|
||||||
|
assert!(has_parquet_file(®ion_dir));
|
||||||
|
}
|
||||||
|
|
||||||
async fn create_table(frontend: &Instance, sql: String) {
|
async fn create_table(frontend: &Instance, sql: String) {
|
||||||
let request = Request::Query(QueryRequest {
|
let request = Request::Query(QueryRequest {
|
||||||
query: Some(Query::Sql(sql)),
|
query: Some(Query::Sql(sql)),
|
||||||
@@ -341,6 +463,26 @@ CREATE TABLE {table_name} (
|
|||||||
assert!(matches!(output, Output::AffectedRows(0)));
|
assert!(matches!(output, Output::AffectedRows(0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flush_table(
|
||||||
|
frontend: &Instance,
|
||||||
|
catalog_name: &str,
|
||||||
|
schema_name: &str,
|
||||||
|
table_name: &str,
|
||||||
|
region_id: Option<u32>,
|
||||||
|
) {
|
||||||
|
let request = Request::Ddl(DdlRequest {
|
||||||
|
expr: Some(DdlExpr::FlushTable(FlushTableExpr {
|
||||||
|
catalog_name: catalog_name.to_string(),
|
||||||
|
schema_name: schema_name.to_string(),
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
region_id,
|
||||||
|
})),
|
||||||
|
});
|
||||||
|
|
||||||
|
let output = query(frontend, request).await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(0)));
|
||||||
|
}
|
||||||
|
|
||||||
async fn test_insert_and_query_on_existing_table(instance: &Instance, table_name: &str) {
|
async fn test_insert_and_query_on_existing_table(instance: &Instance, table_name: &str) {
|
||||||
let insert = InsertRequest {
|
let insert = InsertRequest {
|
||||||
table_name: table_name.to_string(),
|
table_name: table_name.to_string(),
|
||||||
@@ -437,14 +579,18 @@ CREATE TABLE {table_name} (
|
|||||||
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
||||||
|
|
||||||
for (region, dn) in region_to_dn_map.iter() {
|
for (region, dn) in region_to_dn_map.iter() {
|
||||||
|
let stmt = QueryLanguageParser::parse_sql(&format!(
|
||||||
|
"SELECT ts, a FROM {table_name} ORDER BY ts"
|
||||||
|
))
|
||||||
|
.unwrap();
|
||||||
let dn = instance.datanodes.get(dn).unwrap();
|
let dn = instance.datanodes.get(dn).unwrap();
|
||||||
let output = dn
|
let engine = dn.query_engine();
|
||||||
.execute_sql(
|
let plan = engine
|
||||||
&format!("SELECT ts, a FROM {table_name} ORDER BY ts"),
|
.planner()
|
||||||
QueryContext::arc(),
|
.plan(stmt, QueryContext::arc())
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
let output = engine.execute(&plan).await.unwrap();
|
||||||
let Output::Stream(stream) = output else { unreachable!() };
|
let Output::Stream(stream) = output else { unreachable!() };
|
||||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
let actual = recordbatches.pretty_print().unwrap();
|
let actual = recordbatches.pretty_print().unwrap();
|
||||||
@@ -542,4 +688,100 @@ CREATE TABLE {table_name} (
|
|||||||
+---------------------+---+---+";
|
+---------------------+---+---+";
|
||||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_promql_query() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let standalone = tests::create_standalone_instance("test_standalone_promql_query").await;
|
||||||
|
let instance = &standalone.instance;
|
||||||
|
|
||||||
|
let table_name = "my_table";
|
||||||
|
let sql = format!("CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))");
|
||||||
|
create_table(instance, sql).await;
|
||||||
|
|
||||||
|
let insert = InsertRequest {
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
columns: vec![
|
||||||
|
Column {
|
||||||
|
column_name: "h".to_string(),
|
||||||
|
values: Some(Values {
|
||||||
|
string_values: vec![
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
"t".to_string(),
|
||||||
|
],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
semantic_type: SemanticType::Tag as i32,
|
||||||
|
datatype: ColumnDataType::String as i32,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Column {
|
||||||
|
column_name: "a".to_string(),
|
||||||
|
values: Some(Values {
|
||||||
|
f64_values: vec![1f64, 11f64, 20f64, 22f64, 50f64, 55f64, 99f64],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
null_mask: vec![4],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
datatype: ColumnDataType::Float64 as i32,
|
||||||
|
},
|
||||||
|
Column {
|
||||||
|
column_name: "ts".to_string(),
|
||||||
|
values: Some(Values {
|
||||||
|
ts_millisecond_values: vec![
|
||||||
|
1672557972000,
|
||||||
|
1672557973000,
|
||||||
|
1672557974000,
|
||||||
|
1672557975000,
|
||||||
|
1672557976000,
|
||||||
|
1672557977000,
|
||||||
|
1672557978000,
|
||||||
|
1672557979000,
|
||||||
|
],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
|
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
],
|
||||||
|
row_count: 8,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let request = Request::Insert(insert);
|
||||||
|
let output = query(instance, request).await;
|
||||||
|
assert!(matches!(output, Output::AffectedRows(8)));
|
||||||
|
|
||||||
|
let request = Request::Query(QueryRequest {
|
||||||
|
query: Some(Query::PromRangeQuery(api::v1::PromRangeQuery {
|
||||||
|
query: "my_table".to_owned(),
|
||||||
|
start: "1672557973".to_owned(),
|
||||||
|
end: "1672557978".to_owned(),
|
||||||
|
step: "1s".to_owned(),
|
||||||
|
})),
|
||||||
|
});
|
||||||
|
let output = query(instance, request).await;
|
||||||
|
let Output::Stream(stream) = output else { unreachable!() };
|
||||||
|
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
|
let expected = "\
|
||||||
|
+---+------+---------------------+
|
||||||
|
| h | a | ts |
|
||||||
|
+---+------+---------------------+
|
||||||
|
| t | 11.0 | 2023-01-01T07:26:13 |
|
||||||
|
| t | | 2023-01-01T07:26:14 |
|
||||||
|
| t | 20.0 | 2023-01-01T07:26:15 |
|
||||||
|
| t | 22.0 | 2023-01-01T07:26:16 |
|
||||||
|
| t | 50.0 | 2023-01-01T07:26:17 |
|
||||||
|
| t | 55.0 | 2023-01-01T07:26:18 |
|
||||||
|
+---+------+---------------------+";
|
||||||
|
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,74 +18,12 @@ use api::v1::greptime_request::Request as GreptimeRequest;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use datanode::error::Error as DatanodeError;
|
use datanode::error::Error as DatanodeError;
|
||||||
use datatypes::schema::Schema;
|
|
||||||
use query::parser::PromQuery;
|
|
||||||
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
||||||
use servers::query_handler::sql::{SqlQueryHandler, SqlQueryHandlerRef};
|
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use sql::statements::statement::Statement;
|
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
|
||||||
pub(crate) struct StandaloneSqlQueryHandler(SqlQueryHandlerRef<DatanodeError>);
|
|
||||||
|
|
||||||
impl StandaloneSqlQueryHandler {
|
|
||||||
pub(crate) fn arc(handler: SqlQueryHandlerRef<DatanodeError>) -> Arc<Self> {
|
|
||||||
Arc::new(Self(handler))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl SqlQueryHandler for StandaloneSqlQueryHandler {
|
|
||||||
type Error = error::Error;
|
|
||||||
|
|
||||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
|
||||||
self.0
|
|
||||||
.do_query(query, query_ctx)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| x.context(error::InvokeDatanodeSnafu))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_promql_query(
|
|
||||||
&self,
|
|
||||||
_: &PromQuery,
|
|
||||||
_: QueryContextRef,
|
|
||||||
) -> Vec<std::result::Result<Output, Self::Error>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_statement_query(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Output> {
|
|
||||||
self.0
|
|
||||||
.do_statement_query(stmt, query_ctx)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeDatanodeSnafu)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_describe(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Option<Schema>> {
|
|
||||||
self.0
|
|
||||||
.do_describe(stmt, query_ctx)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeDatanodeSnafu)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
|
||||||
self.0
|
|
||||||
.is_valid_schema(catalog, schema)
|
|
||||||
.context(error::InvokeDatanodeSnafu)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct StandaloneGrpcQueryHandler(GrpcQueryHandlerRef<DatanodeError>);
|
pub(crate) struct StandaloneGrpcQueryHandler(GrpcQueryHandlerRef<DatanodeError>);
|
||||||
|
|
||||||
impl StandaloneGrpcQueryHandler {
|
impl StandaloneGrpcQueryHandler {
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -30,7 +31,6 @@ use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
|||||||
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
|
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
|
||||||
use servers::server::Server;
|
use servers::server::Server;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio::try_join;
|
|
||||||
|
|
||||||
use crate::error::Error::StartServer;
|
use crate::error::Error::StartServer;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
@@ -41,19 +41,23 @@ use crate::prometheus::PrometheusOptions;
|
|||||||
|
|
||||||
pub(crate) struct Services;
|
pub(crate) struct Services;
|
||||||
|
|
||||||
|
pub type ServerHandlers = HashMap<String, ServerHandler>;
|
||||||
|
|
||||||
|
pub type ServerHandler = (Box<dyn Server>, SocketAddr);
|
||||||
|
|
||||||
impl Services {
|
impl Services {
|
||||||
pub(crate) async fn start<T>(
|
pub(crate) async fn build<T>(
|
||||||
opts: &FrontendOptions,
|
opts: &FrontendOptions,
|
||||||
instance: Arc<T>,
|
instance: Arc<T>,
|
||||||
plugins: Arc<Plugins>,
|
plugins: Arc<Plugins>,
|
||||||
) -> Result<()>
|
) -> Result<ServerHandlers>
|
||||||
where
|
where
|
||||||
T: FrontendInstance,
|
T: FrontendInstance,
|
||||||
{
|
{
|
||||||
info!("Starting frontend servers");
|
let mut result = Vec::<ServerHandler>::with_capacity(plugins.len());
|
||||||
let user_provider = plugins.get::<UserProviderRef>().cloned();
|
let user_provider = plugins.get::<UserProviderRef>().cloned();
|
||||||
|
|
||||||
let grpc_server_and_addr = if let Some(opts) = &opts.grpc_options {
|
if let Some(opts) = &opts.grpc_options {
|
||||||
let grpc_addr = parse_addr(&opts.addr)?;
|
let grpc_addr = parse_addr(&opts.addr)?;
|
||||||
|
|
||||||
let grpc_runtime = Arc::new(
|
let grpc_runtime = Arc::new(
|
||||||
@@ -70,12 +74,10 @@ impl Services {
|
|||||||
grpc_runtime,
|
grpc_runtime,
|
||||||
);
|
);
|
||||||
|
|
||||||
Some((Box::new(grpc_server) as _, grpc_addr))
|
result.push((Box::new(grpc_server), grpc_addr));
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mysql_server_and_addr = if let Some(opts) = &opts.mysql_options {
|
if let Some(opts) = &opts.mysql_options {
|
||||||
let mysql_addr = parse_addr(&opts.addr)?;
|
let mysql_addr = parse_addr(&opts.addr)?;
|
||||||
|
|
||||||
let mysql_io_runtime = Arc::new(
|
let mysql_io_runtime = Arc::new(
|
||||||
@@ -102,13 +104,10 @@ impl Services {
|
|||||||
opts.reject_no_database.unwrap_or(false),
|
opts.reject_no_database.unwrap_or(false),
|
||||||
)),
|
)),
|
||||||
);
|
);
|
||||||
|
result.push((mysql_server, mysql_addr));
|
||||||
|
}
|
||||||
|
|
||||||
Some((mysql_server, mysql_addr))
|
if let Some(opts) = &opts.postgres_options {
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let postgres_server_and_addr = if let Some(opts) = &opts.postgres_options {
|
|
||||||
let pg_addr = parse_addr(&opts.addr)?;
|
let pg_addr = parse_addr(&opts.addr)?;
|
||||||
|
|
||||||
let pg_io_runtime = Arc::new(
|
let pg_io_runtime = Arc::new(
|
||||||
@@ -126,12 +125,12 @@ impl Services {
|
|||||||
user_provider.clone(),
|
user_provider.clone(),
|
||||||
)) as Box<dyn Server>;
|
)) as Box<dyn Server>;
|
||||||
|
|
||||||
Some((pg_server, pg_addr))
|
result.push((pg_server, pg_addr));
|
||||||
} else {
|
}
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let opentsdb_server_and_addr = if let Some(opts) = &opts.opentsdb_options {
|
let mut set_opentsdb_handler = false;
|
||||||
|
|
||||||
|
if let Some(opts) = &opts.opentsdb_options {
|
||||||
let addr = parse_addr(&opts.addr)?;
|
let addr = parse_addr(&opts.addr)?;
|
||||||
|
|
||||||
let io_runtime = Arc::new(
|
let io_runtime = Arc::new(
|
||||||
@@ -144,23 +143,23 @@ impl Services {
|
|||||||
|
|
||||||
let server = OpentsdbServer::create_server(instance.clone(), io_runtime);
|
let server = OpentsdbServer::create_server(instance.clone(), io_runtime);
|
||||||
|
|
||||||
Some((server, addr))
|
result.push((server, addr));
|
||||||
} else {
|
set_opentsdb_handler = true;
|
||||||
None
|
}
|
||||||
};
|
|
||||||
|
|
||||||
let http_server_and_addr = if let Some(http_options) = &opts.http_options {
|
if let Some(http_options) = &opts.http_options {
|
||||||
let http_addr = parse_addr(&http_options.addr)?;
|
let http_addr = parse_addr(&http_options.addr)?;
|
||||||
|
|
||||||
let mut http_server = HttpServer::new(
|
let mut http_server = HttpServer::new(
|
||||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
||||||
|
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
|
||||||
http_options.clone(),
|
http_options.clone(),
|
||||||
);
|
);
|
||||||
if let Some(user_provider) = user_provider.clone() {
|
if let Some(user_provider) = user_provider.clone() {
|
||||||
http_server.set_user_provider(user_provider);
|
http_server.set_user_provider(user_provider);
|
||||||
}
|
}
|
||||||
|
|
||||||
if opentsdb_server_and_addr.is_some() {
|
if set_opentsdb_handler {
|
||||||
http_server.set_opentsdb_handler(instance.clone());
|
http_server.set_opentsdb_handler(instance.clone());
|
||||||
}
|
}
|
||||||
if matches!(
|
if matches!(
|
||||||
@@ -178,34 +177,24 @@ impl Services {
|
|||||||
}
|
}
|
||||||
http_server.set_script_handler(instance.clone());
|
http_server.set_script_handler(instance.clone());
|
||||||
|
|
||||||
Some((Box::new(http_server) as _, http_addr))
|
result.push((Box::new(http_server), http_addr));
|
||||||
} else {
|
}
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let prom_server_and_addr = if let Some(prom_options) = &opts.prom_options {
|
if let Some(prom_options) = &opts.prom_options {
|
||||||
let prom_addr = parse_addr(&prom_options.addr)?;
|
let prom_addr = parse_addr(&prom_options.addr)?;
|
||||||
|
|
||||||
let mut prom_server = PromServer::create_server(instance.clone());
|
let mut prom_server = PromServer::create_server(instance);
|
||||||
if let Some(user_provider) = user_provider {
|
if let Some(user_provider) = user_provider {
|
||||||
prom_server.set_user_provider(user_provider);
|
prom_server.set_user_provider(user_provider);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some((prom_server as _, prom_addr))
|
result.push((prom_server, prom_addr));
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
};
|
||||||
|
|
||||||
try_join!(
|
Ok(result
|
||||||
start_server(http_server_and_addr),
|
.into_iter()
|
||||||
start_server(grpc_server_and_addr),
|
.map(|(server, addr)| (server.name().to_string(), (server, addr)))
|
||||||
start_server(mysql_server_and_addr),
|
.collect())
|
||||||
start_server(postgres_server_and_addr),
|
|
||||||
start_server(opentsdb_server_and_addr),
|
|
||||||
start_server(prom_server_and_addr),
|
|
||||||
)
|
|
||||||
.context(error::StartServerSnafu)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,13 +202,10 @@ fn parse_addr(addr: &str) -> Result<SocketAddr> {
|
|||||||
addr.parse().context(error::ParseAddrSnafu { addr })
|
addr.parse().context(error::ParseAddrSnafu { addr })
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start_server(
|
pub async fn start_server(
|
||||||
server_and_addr: Option<(Box<dyn Server>, SocketAddr)>,
|
server_and_addr: &(Box<dyn Server>, SocketAddr),
|
||||||
) -> servers::error::Result<Option<SocketAddr>> {
|
) -> servers::error::Result<Option<SocketAddr>> {
|
||||||
if let Some((server, addr)) = server_and_addr {
|
let (server, addr) = server_and_addr;
|
||||||
info!("Starting server at {}", addr);
|
info!("Starting {} at {}", server.name(), addr);
|
||||||
server.start(addr).await.map(Some)
|
server.start(*addr).await.map(Some)
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -140,8 +140,11 @@ impl Table for DistTable {
|
|||||||
Ok(Arc::new(dist_scan))
|
Ok(Arc::new(dist_scan))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
|
fn supports_filters_pushdown(
|
||||||
Ok(FilterPushDownType::Inexact)
|
&self,
|
||||||
|
filters: &[&Expr],
|
||||||
|
) -> table::Result<Vec<FilterPushDownType>> {
|
||||||
|
Ok(vec![FilterPushDownType::Inexact; filters.len()])
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn alter(&self, context: AlterContext, request: &AlterTableRequest) -> table::Result<()> {
|
async fn alter(&self, context: AlterContext, request: &AlterTableRequest) -> table::Result<()> {
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ use partition::route::TableRoutes;
|
|||||||
use servers::grpc::GrpcServer;
|
use servers::grpc::GrpcServer;
|
||||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
use table::engine::{region_name, table_dir};
|
||||||
use tonic::transport::Server;
|
use tonic::transport::Server;
|
||||||
use tower::service_fn;
|
use tower::service_fn;
|
||||||
|
|
||||||
@@ -56,11 +57,23 @@ pub(crate) struct MockDistributedInstance {
|
|||||||
_guards: Vec<TestGuard>,
|
_guards: Vec<TestGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl MockDistributedInstance {
|
||||||
|
pub fn data_tmp_dirs(&self) -> Vec<&TempDir> {
|
||||||
|
self._guards.iter().map(|g| &g._data_tmp_dir).collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) struct MockStandaloneInstance {
|
pub(crate) struct MockStandaloneInstance {
|
||||||
pub(crate) instance: Arc<Instance>,
|
pub(crate) instance: Arc<Instance>,
|
||||||
_guard: TestGuard,
|
_guard: TestGuard,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl MockStandaloneInstance {
|
||||||
|
pub fn data_tmp_dir(&self) -> &TempDir {
|
||||||
|
&self._guard._data_tmp_dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandaloneInstance {
|
pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandaloneInstance {
|
||||||
let (opts, guard) = create_tmp_dir_and_datanode_opts(test_name);
|
let (opts, guard) = create_tmp_dir_and_datanode_opts(test_name);
|
||||||
let datanode_instance = DatanodeInstance::new(&opts).await.unwrap();
|
let datanode_instance = DatanodeInstance::new(&opts).await.unwrap();
|
||||||
@@ -258,7 +271,6 @@ pub(crate) async fn create_distributed_instance(test_name: &str) -> MockDistribu
|
|||||||
meta_client.clone(),
|
meta_client.clone(),
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
datanode_clients.clone(),
|
datanode_clients.clone(),
|
||||||
Default::default(),
|
|
||||||
);
|
);
|
||||||
let dist_instance = Arc::new(dist_instance);
|
let dist_instance = Arc::new(dist_instance);
|
||||||
let frontend = Instance::new_distributed(dist_instance.clone());
|
let frontend = Instance::new_distributed(dist_instance.clone());
|
||||||
@@ -270,3 +282,29 @@ pub(crate) async fn create_distributed_instance(test_name: &str) -> MockDistribu
|
|||||||
_guards: test_guards,
|
_guards: test_guards,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn test_region_dir(
|
||||||
|
dir: &str,
|
||||||
|
catalog_name: &str,
|
||||||
|
schema_name: &str,
|
||||||
|
table_id: u32,
|
||||||
|
region_id: u32,
|
||||||
|
) -> String {
|
||||||
|
let table_dir = table_dir(catalog_name, schema_name, table_id);
|
||||||
|
let region_name = region_name(table_id, region_id);
|
||||||
|
|
||||||
|
format!("{}/{}/{}", dir, table_dir, region_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_parquet_file(sst_dir: &str) -> bool {
|
||||||
|
for entry in std::fs::read_dir(sst_dir).unwrap() {
|
||||||
|
let entry = entry.unwrap();
|
||||||
|
let path = entry.path();
|
||||||
|
if !path.is_dir() {
|
||||||
|
assert_eq!("parquet", path.extension().unwrap());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|||||||
@@ -39,18 +39,45 @@ use crate::service::store::kv::ResettableKvStoreRef;
|
|||||||
use crate::service::store::memory::MemStore;
|
use crate::service::store::memory::MemStore;
|
||||||
use crate::{error, Result};
|
use crate::{error, Result};
|
||||||
|
|
||||||
// Bootstrap the rpc server to serve incoming request
|
#[derive(Clone)]
|
||||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
pub struct MetaSrvInstance {
|
||||||
let meta_srv = make_meta_srv(opts.clone()).await?;
|
meta_srv: MetaSrv,
|
||||||
bootstrap_meta_srv_with_router(opts, router(meta_srv)).await
|
|
||||||
|
opts: MetaSrvOptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn bootstrap_meta_srv_with_router(opts: MetaSrvOptions, router: Router) -> Result<()> {
|
impl MetaSrvInstance {
|
||||||
let listener = TcpListener::bind(&opts.bind_addr)
|
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
|
||||||
|
let meta_srv = build_meta_srv(&opts).await?;
|
||||||
|
|
||||||
|
Ok(MetaSrvInstance { meta_srv, opts })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start(&self) -> Result<()> {
|
||||||
|
self.meta_srv.start().await;
|
||||||
|
bootstrap_meta_srv_with_router(&self.opts.bind_addr, router(self.meta_srv.clone())).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn close(&self) -> Result<()> {
|
||||||
|
// TODO: shutdown the router
|
||||||
|
self.meta_srv.shutdown();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bootstrap the rpc server to serve incoming request
|
||||||
|
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||||
|
let meta_srv = make_meta_srv(&opts).await?;
|
||||||
|
bootstrap_meta_srv_with_router(&opts.bind_addr, router(meta_srv)).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn bootstrap_meta_srv_with_router(bind_addr: &str, router: Router) -> Result<()> {
|
||||||
|
let listener = TcpListener::bind(bind_addr)
|
||||||
.await
|
.await
|
||||||
.context(error::TcpBindSnafu {
|
.context(error::TcpBindSnafu { addr: bind_addr })?;
|
||||||
addr: &opts.bind_addr,
|
|
||||||
})?;
|
|
||||||
let listener = TcpListenerStream::new(listener);
|
let listener = TcpListenerStream::new(listener);
|
||||||
|
|
||||||
router
|
router
|
||||||
@@ -72,7 +99,7 @@ pub fn router(meta_srv: MetaSrv) -> Router {
|
|||||||
.add_service(admin::make_admin_service(meta_srv))
|
.add_service(admin::make_admin_service(meta_srv))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
pub async fn build_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
|
||||||
let (kv_store, election, lock) = if opts.use_memory_store {
|
let (kv_store, election, lock) = if opts.use_memory_store {
|
||||||
(Arc::new(MemStore::new()) as _, None, None)
|
(Arc::new(MemStore::new()) as _, None, None)
|
||||||
} else {
|
} else {
|
||||||
@@ -107,7 +134,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let meta_srv = MetaSrvBuilder::new()
|
let meta_srv = MetaSrvBuilder::new()
|
||||||
.options(opts)
|
.options(opts.clone())
|
||||||
.kv_store(kv_store)
|
.kv_store(kv_store)
|
||||||
.in_memory(in_memory)
|
.in_memory(in_memory)
|
||||||
.selector(selector)
|
.selector(selector)
|
||||||
@@ -117,6 +144,12 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
|||||||
.build()
|
.build()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
Ok(meta_srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn make_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
|
||||||
|
let meta_srv = build_meta_srv(opts).await?;
|
||||||
|
|
||||||
meta_srv.start().await;
|
meta_srv.start().await;
|
||||||
|
|
||||||
Ok(meta_srv)
|
Ok(meta_srv)
|
||||||
|
|||||||
@@ -31,13 +31,14 @@ use snafu::{ensure, OptionExt, ResultExt};
|
|||||||
use store_api::storage::{
|
use store_api::storage::{
|
||||||
ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder, ColumnId,
|
ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder, ColumnId,
|
||||||
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, Region,
|
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, Region,
|
||||||
RegionDescriptorBuilder, RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
RegionDescriptorBuilder, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
||||||
|
};
|
||||||
|
use table::engine::{
|
||||||
|
region_id, region_name, table_dir, EngineContext, TableEngine, TableEngineProcedure,
|
||||||
|
TableReference,
|
||||||
};
|
};
|
||||||
use table::engine::{EngineContext, TableEngine, TableEngineProcedure, TableReference};
|
|
||||||
use table::error::TableOperationSnafu;
|
use table::error::TableOperationSnafu;
|
||||||
use table::metadata::{
|
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion};
|
||||||
TableId, TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion,
|
|
||||||
};
|
|
||||||
use table::requests::{
|
use table::requests::{
|
||||||
AlterKind, AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
AlterKind, AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||||
};
|
};
|
||||||
@@ -59,22 +60,6 @@ pub const MITO_ENGINE: &str = "mito";
|
|||||||
pub const INIT_COLUMN_ID: ColumnId = 0;
|
pub const INIT_COLUMN_ID: ColumnId = 0;
|
||||||
const INIT_TABLE_VERSION: TableVersion = 0;
|
const INIT_TABLE_VERSION: TableVersion = 0;
|
||||||
|
|
||||||
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
|
|
||||||
#[inline]
|
|
||||||
fn region_name(table_id: TableId, n: u32) -> String {
|
|
||||||
format!("{table_id}_{n:010}")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn region_id(table_id: TableId, n: u32) -> RegionId {
|
|
||||||
(u64::from(table_id) << 32) | u64::from(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> String {
|
|
||||||
format!("{catalog_name}/{schema_name}/{table_id}/")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// [TableEngine] implementation.
|
/// [TableEngine] implementation.
|
||||||
///
|
///
|
||||||
/// About mito <https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo>.
|
/// About mito <https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo>.
|
||||||
@@ -165,6 +150,10 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(table_error::TableOperationSnafu)
|
.context(table_error::TableOperationSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn close(&self) -> TableResult<()> {
|
||||||
|
self.inner.close().await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: StorageEngine> TableEngineProcedure for MitoEngine<S> {
|
impl<S: StorageEngine> TableEngineProcedure for MitoEngine<S> {
|
||||||
@@ -623,6 +612,19 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
|||||||
.remove(&table_reference.to_string())
|
.remove(&table_reference.to_string())
|
||||||
.is_some())
|
.is_some())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn close(&self) -> TableResult<()> {
|
||||||
|
let _lock = self.table_mutex.lock().await;
|
||||||
|
|
||||||
|
let tables = self.tables.write().unwrap().clone();
|
||||||
|
|
||||||
|
futures::future::try_join_all(tables.values().map(|t| t.close()))
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(table_error::TableOperationSnafu)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: StorageEngine> MitoEngineInner<S> {
|
impl<S: StorageEngine> MitoEngineInner<S> {
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ use store_api::storage::{
|
|||||||
ColumnId, CreateOptions, EngineContext, OpenOptions, RegionDescriptorBuilder, RegionNumber,
|
ColumnId, CreateOptions, EngineContext, OpenOptions, RegionDescriptorBuilder, RegionNumber,
|
||||||
StorageEngine,
|
StorageEngine,
|
||||||
};
|
};
|
||||||
|
use table::engine::{region_id, table_dir};
|
||||||
use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType};
|
use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType};
|
||||||
use table::requests::CreateTableRequest;
|
use table::requests::CreateTableRequest;
|
||||||
|
|
||||||
@@ -146,7 +147,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
|||||||
/// Creates regions for the table.
|
/// Creates regions for the table.
|
||||||
async fn on_create_regions(&mut self) -> Result<Status> {
|
async fn on_create_regions(&mut self) -> Result<Status> {
|
||||||
let engine_ctx = EngineContext::default();
|
let engine_ctx = EngineContext::default();
|
||||||
let table_dir = engine::table_dir(
|
let table_dir = table_dir(
|
||||||
&self.data.request.catalog_name,
|
&self.data.request.catalog_name,
|
||||||
&self.data.request.schema_name,
|
&self.data.request.schema_name,
|
||||||
self.data.request.id,
|
self.data.request.id,
|
||||||
@@ -203,7 +204,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We need to create that region.
|
// We need to create that region.
|
||||||
let region_id = engine::region_id(self.data.request.id, *number);
|
let region_id = region_id(self.data.request.id, *number);
|
||||||
let region_desc = RegionDescriptorBuilder::default()
|
let region_desc = RegionDescriptorBuilder::default()
|
||||||
.id(region_id)
|
.id(region_id)
|
||||||
.name(region_name.clone())
|
.name(region_name.clone())
|
||||||
@@ -234,7 +235,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
|||||||
|
|
||||||
/// Writes metadata to the table manifest.
|
/// Writes metadata to the table manifest.
|
||||||
async fn on_write_table_manifest(&mut self) -> Result<Status> {
|
async fn on_write_table_manifest(&mut self) -> Result<Status> {
|
||||||
let table_dir = engine::table_dir(
|
let table_dir = table_dir(
|
||||||
&self.data.request.catalog_name,
|
&self.data.request.catalog_name,
|
||||||
&self.data.request.schema_name,
|
&self.data.request.schema_name,
|
||||||
self.data.request.id,
|
self.data.request.id,
|
||||||
|
|||||||
@@ -31,14 +31,28 @@ use storage::region::RegionImpl;
|
|||||||
use storage::EngineImpl;
|
use storage::EngineImpl;
|
||||||
use store_api::manifest::Manifest;
|
use store_api::manifest::Manifest;
|
||||||
use store_api::storage::ReadContext;
|
use store_api::storage::ReadContext;
|
||||||
use table::requests::{AddColumnRequest, AlterKind, DeleteRequest, TableOptions};
|
use table::requests::{
|
||||||
|
AddColumnRequest, AlterKind, DeleteRequest, FlushTableRequest, TableOptions,
|
||||||
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::table::test_util;
|
|
||||||
use crate::table::test_util::{
|
use crate::table::test_util::{
|
||||||
new_insert_request, schema_for_test, TestEngineComponents, TABLE_NAME,
|
self, new_insert_request, schema_for_test, setup_table, TestEngineComponents, TABLE_NAME,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub fn has_parquet_file(sst_dir: &str) -> bool {
|
||||||
|
for entry in std::fs::read_dir(sst_dir).unwrap() {
|
||||||
|
let entry = entry.unwrap();
|
||||||
|
let path = entry.path();
|
||||||
|
if !path.is_dir() {
|
||||||
|
assert_eq!("parquet", path.extension().unwrap());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
async fn setup_table_with_column_default_constraint() -> (TempDir, String, TableRef) {
|
async fn setup_table_with_column_default_constraint() -> (TempDir, String, TableRef) {
|
||||||
let table_name = "test_default_constraint";
|
let table_name = "test_default_constraint";
|
||||||
let column_schemas = vec![
|
let column_schemas = vec![
|
||||||
@@ -752,3 +766,76 @@ async fn test_table_delete_rows() {
|
|||||||
+-------+-----+--------+-------------------------+"
|
+-------+-----+--------+-------------------------+"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_flush_table_all_regions() {
|
||||||
|
let TestEngineComponents {
|
||||||
|
table_ref: table,
|
||||||
|
dir,
|
||||||
|
..
|
||||||
|
} = test_util::setup_test_engine_and_table().await;
|
||||||
|
|
||||||
|
setup_table(table.clone()).await;
|
||||||
|
|
||||||
|
let table_id = 1u32;
|
||||||
|
let region_name = region_name(table_id, 0);
|
||||||
|
|
||||||
|
let table_info = table.table_info();
|
||||||
|
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
|
||||||
|
|
||||||
|
let region_dir = format!(
|
||||||
|
"{}/{}/{}",
|
||||||
|
dir.path().to_str().unwrap(),
|
||||||
|
table_dir,
|
||||||
|
region_name
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(!has_parquet_file(®ion_dir));
|
||||||
|
|
||||||
|
// Trigger flush all region
|
||||||
|
table.flush(None).await.unwrap();
|
||||||
|
|
||||||
|
// Trigger again, wait for the previous task finished
|
||||||
|
table.flush(None).await.unwrap();
|
||||||
|
|
||||||
|
assert!(has_parquet_file(®ion_dir));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_flush_table_with_region_id() {
|
||||||
|
let TestEngineComponents {
|
||||||
|
table_ref: table,
|
||||||
|
dir,
|
||||||
|
..
|
||||||
|
} = test_util::setup_test_engine_and_table().await;
|
||||||
|
|
||||||
|
setup_table(table.clone()).await;
|
||||||
|
|
||||||
|
let table_id = 1u32;
|
||||||
|
let region_name = region_name(table_id, 0);
|
||||||
|
|
||||||
|
let table_info = table.table_info();
|
||||||
|
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
|
||||||
|
|
||||||
|
let region_dir = format!(
|
||||||
|
"{}/{}/{}",
|
||||||
|
dir.path().to_str().unwrap(),
|
||||||
|
table_dir,
|
||||||
|
region_name
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(!has_parquet_file(®ion_dir));
|
||||||
|
|
||||||
|
let req = FlushTableRequest {
|
||||||
|
region_number: Some(0),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Trigger flush all region
|
||||||
|
table.flush(req.region_number).await.unwrap();
|
||||||
|
|
||||||
|
// Trigger again, wait for the previous task finished
|
||||||
|
table.flush(req.region_number).await.unwrap();
|
||||||
|
|
||||||
|
assert!(has_parquet_file(®ion_dir));
|
||||||
|
}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ use table::requests::{
|
|||||||
AddColumnRequest, AlterKind, AlterTableRequest, DeleteRequest, InsertRequest,
|
AddColumnRequest, AlterKind, AlterTableRequest, DeleteRequest, InsertRequest,
|
||||||
};
|
};
|
||||||
use table::table::scan::SimpleTableScan;
|
use table::table::scan::SimpleTableScan;
|
||||||
use table::table::{AlterContext, Table};
|
use table::table::{AlterContext, RegionStat, Table};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
use crate::error;
|
use crate::error;
|
||||||
@@ -208,8 +208,8 @@ impl<R: Region> Table for MitoTable<R> {
|
|||||||
Ok(Arc::new(SimpleTableScan::new(stream)))
|
Ok(Arc::new(SimpleTableScan::new(stream)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::error::Result<FilterPushDownType> {
|
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> TableResult<Vec<FilterPushDownType>> {
|
||||||
Ok(FilterPushDownType::Inexact)
|
Ok(vec![FilterPushDownType::Inexact; filters.len()])
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Alter table changes the schemas of the table.
|
/// Alter table changes the schemas of the table.
|
||||||
@@ -322,6 +322,45 @@ impl<R: Region> Table for MitoTable<R> {
|
|||||||
}
|
}
|
||||||
Ok(rows_deleted)
|
Ok(rows_deleted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flush(&self, region_number: Option<RegionNumber>) -> TableResult<()> {
|
||||||
|
if let Some(region_number) = region_number {
|
||||||
|
if let Some(region) = self.regions.get(®ion_number) {
|
||||||
|
region
|
||||||
|
.flush()
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(table_error::TableOperationSnafu)?;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
futures::future::try_join_all(self.regions.values().map(|region| region.flush()))
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(table_error::TableOperationSnafu)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close(&self) -> TableResult<()> {
|
||||||
|
futures::future::try_join_all(self.regions.values().map(|region| region.close()))
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(table_error::TableOperationSnafu)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn region_stats(&self) -> TableResult<Vec<RegionStat>> {
|
||||||
|
Ok(self
|
||||||
|
.regions
|
||||||
|
.values()
|
||||||
|
.map(|region| RegionStat {
|
||||||
|
region_id: region.id(),
|
||||||
|
disk_usage_bytes: region.disk_usage_bytes(),
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ChunkStream {
|
struct ChunkStream {
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|||||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
|
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
|
||||||
use datatypes::vectors::VectorRef;
|
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector, VectorRef};
|
||||||
use log_store::NoopLogStore;
|
use log_store::NoopLogStore;
|
||||||
use object_store::services::Fs as Builder;
|
use object_store::services::Fs as Builder;
|
||||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||||
@@ -30,7 +30,7 @@ use storage::EngineImpl;
|
|||||||
use table::engine::{EngineContext, TableEngine};
|
use table::engine::{EngineContext, TableEngine};
|
||||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
|
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
|
||||||
use table::requests::{CreateTableRequest, InsertRequest, TableOptions};
|
use table::requests::{CreateTableRequest, InsertRequest, TableOptions};
|
||||||
use table::TableRef;
|
use table::{Table, TableRef};
|
||||||
|
|
||||||
use crate::config::EngineConfig;
|
use crate::config::EngineConfig;
|
||||||
use crate::engine::{MitoEngine, MITO_ENGINE};
|
use crate::engine::{MitoEngine, MITO_ENGINE};
|
||||||
@@ -178,3 +178,19 @@ pub async fn setup_mock_engine_and_table(
|
|||||||
|
|
||||||
(mock_engine, table_engine, table, object_store, dir)
|
(mock_engine, table_engine, table, object_store, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn setup_table(table: Arc<dyn Table>) {
|
||||||
|
let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
|
||||||
|
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2", "host3", "host4"]));
|
||||||
|
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
|
||||||
|
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
|
||||||
|
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2, 2, 1]));
|
||||||
|
|
||||||
|
columns_values.insert("host".to_string(), hosts.clone());
|
||||||
|
columns_values.insert("cpu".to_string(), cpus.clone());
|
||||||
|
columns_values.insert("memory".to_string(), memories.clone());
|
||||||
|
columns_values.insert("ts".to_string(), tss.clone());
|
||||||
|
|
||||||
|
let insert_req = new_insert_request("demo".to_string(), columns_values);
|
||||||
|
assert_eq!(4, table.insert(insert_req).await.unwrap());
|
||||||
|
}
|
||||||
|
|||||||
@@ -200,6 +200,10 @@ impl Region for MockRegion {
|
|||||||
fn disk_usage_bytes(&self) -> u64 {
|
fn disk_usage_bytes(&self) -> u64 {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flush(&self) -> Result<()> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MockRegionInner {
|
impl MockRegionInner {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ use datafusion::arrow::datatypes::{DataType, TimeUnit};
|
|||||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics};
|
use datafusion::common::{DFField, DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
|
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNodeCore};
|
||||||
use datafusion::physical_expr::PhysicalSortExpr;
|
use datafusion::physical_expr::PhysicalSortExpr;
|
||||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||||
use datafusion::physical_plan::{
|
use datafusion::physical_plan::{
|
||||||
@@ -37,7 +37,7 @@ use futures::Stream;
|
|||||||
|
|
||||||
use crate::extension_plan::Millisecond;
|
use crate::extension_plan::Millisecond;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct EmptyMetric {
|
pub struct EmptyMetric {
|
||||||
start: Millisecond,
|
start: Millisecond,
|
||||||
end: Millisecond,
|
end: Millisecond,
|
||||||
@@ -86,9 +86,9 @@ impl EmptyMetric {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserDefinedLogicalNode for EmptyMetric {
|
impl UserDefinedLogicalNodeCore for EmptyMetric {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn name(&self) -> &str {
|
||||||
self as _
|
"EmptyMetric"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||||
@@ -111,12 +111,8 @@ impl UserDefinedLogicalNode for EmptyMetric {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_template(
|
fn from_template(&self, _expr: &[Expr], _inputs: &[LogicalPlan]) -> Self {
|
||||||
&self,
|
self.clone()
|
||||||
_exprs: &[datafusion::prelude::Expr],
|
|
||||||
_inputs: &[LogicalPlan],
|
|
||||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
|
||||||
Arc::new(self.clone())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
|||||||
use datafusion::common::DFSchemaRef;
|
use datafusion::common::DFSchemaRef;
|
||||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||||
use datafusion::physical_expr::PhysicalSortExpr;
|
use datafusion::physical_expr::PhysicalSortExpr;
|
||||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||||
use datafusion::physical_plan::{
|
use datafusion::physical_plan::{
|
||||||
@@ -42,7 +42,7 @@ use crate::extension_plan::Millisecond;
|
|||||||
/// This plan will try to align the input time series, for every timestamp between
|
/// This plan will try to align the input time series, for every timestamp between
|
||||||
/// `start` and `end` with step `interval`. Find in the `lookback` range if data
|
/// `start` and `end` with step `interval`. Find in the `lookback` range if data
|
||||||
/// is missing at the given timestamp.
|
/// is missing at the given timestamp.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct InstantManipulate {
|
pub struct InstantManipulate {
|
||||||
start: Millisecond,
|
start: Millisecond,
|
||||||
end: Millisecond,
|
end: Millisecond,
|
||||||
@@ -52,9 +52,9 @@ pub struct InstantManipulate {
|
|||||||
input: LogicalPlan,
|
input: LogicalPlan,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserDefinedLogicalNode for InstantManipulate {
|
impl UserDefinedLogicalNodeCore for InstantManipulate {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn name(&self) -> &str {
|
||||||
self as _
|
"InstantManipulate"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||||
@@ -77,21 +77,17 @@ impl UserDefinedLogicalNode for InstantManipulate {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_template(
|
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||||
&self,
|
|
||||||
_exprs: &[Expr],
|
|
||||||
inputs: &[LogicalPlan],
|
|
||||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
|
||||||
assert!(!inputs.is_empty());
|
assert!(!inputs.is_empty());
|
||||||
|
|
||||||
Arc::new(Self {
|
Self {
|
||||||
start: self.start,
|
start: self.start,
|
||||||
end: self.end,
|
end: self.end,
|
||||||
lookback_delta: self.lookback_delta,
|
lookback_delta: self.lookback_delta,
|
||||||
interval: self.interval,
|
interval: self.interval,
|
||||||
time_index_column: self.time_index_column.clone(),
|
time_index_column: self.time_index_column.clone(),
|
||||||
input: inputs[0].clone(),
|
input: inputs[0].clone(),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use datafusion::arrow::compute;
|
|||||||
use datafusion::common::{DFSchemaRef, Result as DataFusionResult, Statistics};
|
use datafusion::common::{DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
|
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||||
use datafusion::physical_expr::PhysicalSortExpr;
|
use datafusion::physical_expr::PhysicalSortExpr;
|
||||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||||
use datafusion::physical_plan::{
|
use datafusion::physical_plan::{
|
||||||
@@ -43,7 +43,7 @@ use crate::extension_plan::Millisecond;
|
|||||||
/// - bias sample's timestamp by offset
|
/// - bias sample's timestamp by offset
|
||||||
/// - sort the record batch based on timestamp column
|
/// - sort the record batch based on timestamp column
|
||||||
/// - remove NaN values
|
/// - remove NaN values
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct SeriesNormalize {
|
pub struct SeriesNormalize {
|
||||||
offset: Millisecond,
|
offset: Millisecond,
|
||||||
time_index_column_name: String,
|
time_index_column_name: String,
|
||||||
@@ -51,9 +51,9 @@ pub struct SeriesNormalize {
|
|||||||
input: LogicalPlan,
|
input: LogicalPlan,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserDefinedLogicalNode for SeriesNormalize {
|
impl UserDefinedLogicalNodeCore for SeriesNormalize {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn name(&self) -> &str {
|
||||||
self as _
|
"SeriesNormalize"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||||
@@ -76,18 +76,14 @@ impl UserDefinedLogicalNode for SeriesNormalize {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_template(
|
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||||
&self,
|
|
||||||
_exprs: &[datafusion::logical_expr::Expr],
|
|
||||||
inputs: &[LogicalPlan],
|
|
||||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
|
||||||
assert!(!inputs.is_empty());
|
assert!(!inputs.is_empty());
|
||||||
|
|
||||||
Arc::new(Self {
|
Self {
|
||||||
offset: self.offset,
|
offset: self.offset,
|
||||||
time_index_column_name: self.time_index_column_name.clone(),
|
time_index_column_name: self.time_index_column_name.clone(),
|
||||||
input: inputs[0].clone(),
|
input: inputs[0].clone(),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
|||||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef};
|
use datafusion::common::{DFField, DFSchema, DFSchemaRef};
|
||||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||||
use datafusion::physical_expr::PhysicalSortExpr;
|
use datafusion::physical_expr::PhysicalSortExpr;
|
||||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||||
use datafusion::physical_plan::{
|
use datafusion::physical_plan::{
|
||||||
@@ -42,7 +42,7 @@ use crate::range_array::RangeArray;
|
|||||||
///
|
///
|
||||||
/// This plan will "fold" time index and value columns into [RangeArray]s, and truncate
|
/// This plan will "fold" time index and value columns into [RangeArray]s, and truncate
|
||||||
/// other columns to the same length with the "folded" [RangeArray] column.
|
/// other columns to the same length with the "folded" [RangeArray] column.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct RangeManipulate {
|
pub struct RangeManipulate {
|
||||||
start: Millisecond,
|
start: Millisecond,
|
||||||
end: Millisecond,
|
end: Millisecond,
|
||||||
@@ -137,9 +137,9 @@ impl RangeManipulate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserDefinedLogicalNode for RangeManipulate {
|
impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn name(&self) -> &str {
|
||||||
self as _
|
"RangeManipulate"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||||
@@ -162,14 +162,10 @@ impl UserDefinedLogicalNode for RangeManipulate {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_template(
|
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||||
&self,
|
|
||||||
_exprs: &[Expr],
|
|
||||||
inputs: &[LogicalPlan],
|
|
||||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
|
||||||
assert!(!inputs.is_empty());
|
assert!(!inputs.is_empty());
|
||||||
|
|
||||||
Arc::new(Self {
|
Self {
|
||||||
start: self.start,
|
start: self.start,
|
||||||
end: self.end,
|
end: self.end,
|
||||||
interval: self.interval,
|
interval: self.interval,
|
||||||
@@ -178,7 +174,7 @@ impl UserDefinedLogicalNode for RangeManipulate {
|
|||||||
value_columns: self.value_columns.clone(),
|
value_columns: self.value_columns.clone(),
|
||||||
input: inputs[0].clone(),
|
input: inputs[0].clone(),
|
||||||
output_schema: self.output_schema.clone(),
|
output_schema: self.output_schema.clone(),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
|||||||
use datafusion::common::DFSchemaRef;
|
use datafusion::common::DFSchemaRef;
|
||||||
use datafusion::error::Result as DataFusionResult;
|
use datafusion::error::Result as DataFusionResult;
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||||
use datafusion::physical_expr::PhysicalSortExpr;
|
use datafusion::physical_expr::PhysicalSortExpr;
|
||||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||||
use datafusion::physical_plan::{
|
use datafusion::physical_plan::{
|
||||||
@@ -33,15 +33,15 @@ use datafusion::physical_plan::{
|
|||||||
use datatypes::arrow::compute;
|
use datatypes::arrow::compute;
|
||||||
use futures::{ready, Stream, StreamExt};
|
use futures::{ready, Stream, StreamExt};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct SeriesDivide {
|
pub struct SeriesDivide {
|
||||||
tag_columns: Vec<String>,
|
tag_columns: Vec<String>,
|
||||||
input: LogicalPlan,
|
input: LogicalPlan,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserDefinedLogicalNode for SeriesDivide {
|
impl UserDefinedLogicalNodeCore for SeriesDivide {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn name(&self) -> &str {
|
||||||
self as _
|
"SeriesDivide"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||||
@@ -60,17 +60,13 @@ impl UserDefinedLogicalNode for SeriesDivide {
|
|||||||
write!(f, "PromSeriesDivide: tags={:?}", self.tag_columns)
|
write!(f, "PromSeriesDivide: tags={:?}", self.tag_columns)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_template(
|
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||||
&self,
|
|
||||||
_exprs: &[Expr],
|
|
||||||
inputs: &[LogicalPlan],
|
|
||||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
|
||||||
assert!(!inputs.is_empty());
|
assert!(!inputs.is_empty());
|
||||||
|
|
||||||
Arc::new(Self {
|
Self {
|
||||||
tag_columns: self.tag_columns.clone(),
|
tag_columns: self.tag_columns.clone(),
|
||||||
input: inputs[0].clone(),
|
input: inputs[0].clone(),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -957,7 +957,12 @@ impl PromPlanner {
|
|||||||
.tag_columns
|
.tag_columns
|
||||||
.iter()
|
.iter()
|
||||||
.chain(self.ctx.time_index_column.iter())
|
.chain(self.ctx.time_index_column.iter())
|
||||||
.map(|col| Ok(DfExpr::Column(Column::from(col))));
|
.map(|col| {
|
||||||
|
Ok(DfExpr::Column(Column::new(
|
||||||
|
self.ctx.table_name.clone(),
|
||||||
|
col,
|
||||||
|
)))
|
||||||
|
});
|
||||||
|
|
||||||
// build computation exprs
|
// build computation exprs
|
||||||
let result_value_columns = self
|
let result_value_columns = self
|
||||||
@@ -1485,7 +1490,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let expected = String::from(
|
let expected = String::from(
|
||||||
"Projection: lhs.tag_0, lhs.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\
|
"Projection: some_metric.tag_0, some_metric.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\
|
||||||
\n Inner Join: lhs.tag_0 = some_metric.tag_0, lhs.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
\n Inner Join: lhs.tag_0 = some_metric.tag_0, lhs.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||||
\n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
\n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||||
|
|||||||
@@ -21,9 +21,6 @@ mod planner;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::table_source::DfTableSourceProvider;
|
|
||||||
use catalog::CatalogListRef;
|
|
||||||
use common_base::Plugins;
|
|
||||||
use common_error::prelude::BoxedError;
|
use common_error::prelude::BoxedError;
|
||||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||||
use common_function::scalars::udf::create_udf;
|
use common_function::scalars::udf::create_udf;
|
||||||
@@ -36,115 +33,44 @@ use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream};
|
|||||||
use common_telemetry::timer;
|
use common_telemetry::timer;
|
||||||
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
|
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
|
||||||
use datafusion::physical_plan::ExecutionPlan;
|
use datafusion::physical_plan::ExecutionPlan;
|
||||||
use datafusion_sql::planner::{ParserOptions, SqlToRel};
|
|
||||||
use datatypes::schema::Schema;
|
use datatypes::schema::Schema;
|
||||||
use promql::planner::PromPlanner;
|
|
||||||
use promql_parser::parser::EvalStmt;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use sql::statements::statement::Statement;
|
|
||||||
|
|
||||||
pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
|
pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
|
||||||
pub use crate::datafusion::planner::DfContextProviderAdapter;
|
pub use crate::datafusion::planner::DfContextProviderAdapter;
|
||||||
use crate::error::{
|
use crate::error::{DataFusionSnafu, QueryExecutionSnafu, Result};
|
||||||
DataFusionSnafu, PlanSqlSnafu, QueryExecutionSnafu, QueryPlanSnafu, Result, SqlSnafu,
|
|
||||||
};
|
|
||||||
use crate::executor::QueryExecutor;
|
use crate::executor::QueryExecutor;
|
||||||
use crate::logical_optimizer::LogicalOptimizer;
|
use crate::logical_optimizer::LogicalOptimizer;
|
||||||
use crate::parser::QueryStatement;
|
|
||||||
use crate::physical_optimizer::PhysicalOptimizer;
|
use crate::physical_optimizer::PhysicalOptimizer;
|
||||||
use crate::physical_planner::PhysicalPlanner;
|
use crate::physical_planner::PhysicalPlanner;
|
||||||
use crate::plan::LogicalPlan;
|
use crate::plan::LogicalPlan;
|
||||||
|
use crate::planner::{DfLogicalPlanner, LogicalPlanner};
|
||||||
use crate::query_engine::{QueryEngineContext, QueryEngineState};
|
use crate::query_engine::{QueryEngineContext, QueryEngineState};
|
||||||
use crate::{metric, QueryEngine};
|
use crate::{metric, QueryEngine};
|
||||||
|
|
||||||
pub struct DatafusionQueryEngine {
|
pub struct DatafusionQueryEngine {
|
||||||
state: QueryEngineState,
|
state: Arc<QueryEngineState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatafusionQueryEngine {
|
impl DatafusionQueryEngine {
|
||||||
pub fn new(catalog_list: CatalogListRef, plugins: Arc<Plugins>) -> Self {
|
pub fn new(state: Arc<QueryEngineState>) -> Self {
|
||||||
Self {
|
Self { state }
|
||||||
state: QueryEngineState::new(catalog_list.clone(), plugins),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn plan_sql_stmt(
|
|
||||||
&self,
|
|
||||||
stmt: Statement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<LogicalPlan> {
|
|
||||||
let session_state = self.state.session_state();
|
|
||||||
|
|
||||||
let df_stmt = (&stmt).try_into().context(SqlSnafu)?;
|
|
||||||
|
|
||||||
let config_options = session_state.config().config_options();
|
|
||||||
let parser_options = ParserOptions {
|
|
||||||
enable_ident_normalization: config_options.sql_parser.enable_ident_normalization,
|
|
||||||
parse_float_as_decimal: config_options.sql_parser.parse_float_as_decimal,
|
|
||||||
};
|
|
||||||
|
|
||||||
let context_provider = DfContextProviderAdapter::try_new(
|
|
||||||
self.state.clone(),
|
|
||||||
session_state,
|
|
||||||
&df_stmt,
|
|
||||||
query_ctx,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let sql_to_rel = SqlToRel::new_with_options(&context_provider, parser_options);
|
|
||||||
|
|
||||||
let result = sql_to_rel.statement_to_plan(df_stmt).with_context(|_| {
|
|
||||||
let sql = if let Statement::Query(query) = stmt {
|
|
||||||
query.inner.to_string()
|
|
||||||
} else {
|
|
||||||
format!("{stmt:?}")
|
|
||||||
};
|
|
||||||
PlanSqlSnafu { sql }
|
|
||||||
})?;
|
|
||||||
Ok(LogicalPlan::DfPlan(result))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(ruihang): test this method once parser is ready.
|
|
||||||
async fn plan_promql_stmt(
|
|
||||||
&self,
|
|
||||||
stmt: EvalStmt,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<LogicalPlan> {
|
|
||||||
let table_provider = DfTableSourceProvider::new(
|
|
||||||
self.state.catalog_list().clone(),
|
|
||||||
self.state.disallow_cross_schema_query(),
|
|
||||||
query_ctx.as_ref(),
|
|
||||||
);
|
|
||||||
PromPlanner::stmt_to_plan(table_provider, stmt)
|
|
||||||
.await
|
|
||||||
.map(LogicalPlan::DfPlan)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(QueryPlanSnafu)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(LFC): Refactor consideration: extract a "Planner" that stores query context and execute queries inside.
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl QueryEngine for DatafusionQueryEngine {
|
impl QueryEngine for DatafusionQueryEngine {
|
||||||
|
fn planner(&self) -> Arc<dyn LogicalPlanner> {
|
||||||
|
Arc::new(DfLogicalPlanner::new(self.state.clone()))
|
||||||
|
}
|
||||||
|
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"datafusion"
|
"datafusion"
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn statement_to_plan(
|
async fn describe(&self, plan: LogicalPlan) -> Result<Schema> {
|
||||||
&self,
|
|
||||||
stmt: QueryStatement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<LogicalPlan> {
|
|
||||||
match stmt {
|
|
||||||
QueryStatement::Sql(stmt) => self.plan_sql_stmt(stmt, query_ctx).await,
|
|
||||||
QueryStatement::Promql(stmt) => self.plan_promql_stmt(stmt, query_ctx).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn describe(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<Schema> {
|
|
||||||
// TODO(sunng87): consider cache optmised logical plan between describe
|
// TODO(sunng87): consider cache optmised logical plan between describe
|
||||||
// and execute
|
// and execute
|
||||||
let plan = self.statement_to_plan(stmt, query_ctx).await?;
|
|
||||||
let optimised_plan = self.optimize(&plan)?;
|
let optimised_plan = self.optimize(&plan)?;
|
||||||
optimised_plan.schema()
|
optimised_plan.schema()
|
||||||
}
|
}
|
||||||
@@ -159,11 +85,6 @@ impl QueryEngine for DatafusionQueryEngine {
|
|||||||
Ok(Output::Stream(self.execute_stream(&ctx, &physical_plan)?))
|
Ok(Output::Stream(self.execute_stream(&ctx, &physical_plan)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_physical(&self, plan: &Arc<dyn PhysicalPlan>) -> Result<Output> {
|
|
||||||
let ctx = QueryEngineContext::new(self.state.session_state());
|
|
||||||
Ok(Output::Stream(self.execute_stream(&ctx, plan)?))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn register_udf(&self, udf: ScalarUdf) {
|
fn register_udf(&self, udf: ScalarUdf) {
|
||||||
self.state.register_udf(udf);
|
self.state.register_udf(udf);
|
||||||
}
|
}
|
||||||
@@ -348,7 +269,8 @@ mod tests {
|
|||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
let plan = engine
|
let plan = engine
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -369,7 +291,8 @@ mod tests {
|
|||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
let plan = engine
|
let plan = engine
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
.planner()
|
||||||
|
.plan(stmt, Arc::new(QueryContext::new()))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -406,11 +329,14 @@ mod tests {
|
|||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
|
|
||||||
let schema = engine
|
let plan = engine
|
||||||
.describe(stmt, Arc::new(QueryContext::new()))
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
let schema = engine.describe(plan).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
schema.column_schemas()[0],
|
schema.column_schemas()[0],
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ use crate::error::{CatalogSnafu, DataFusionSnafu, Result};
|
|||||||
use crate::query_engine::QueryEngineState;
|
use crate::query_engine::QueryEngineState;
|
||||||
|
|
||||||
pub struct DfContextProviderAdapter {
|
pub struct DfContextProviderAdapter {
|
||||||
engine_state: QueryEngineState,
|
engine_state: Arc<QueryEngineState>,
|
||||||
session_state: SessionState,
|
session_state: SessionState,
|
||||||
tables: HashMap<String, Arc<dyn TableSource>>,
|
tables: HashMap<String, Arc<dyn TableSource>>,
|
||||||
table_provider: DfTableSourceProvider,
|
table_provider: DfTableSourceProvider,
|
||||||
@@ -45,7 +45,7 @@ pub struct DfContextProviderAdapter {
|
|||||||
|
|
||||||
impl DfContextProviderAdapter {
|
impl DfContextProviderAdapter {
|
||||||
pub(crate) async fn try_new(
|
pub(crate) async fn try_new(
|
||||||
engine_state: QueryEngineState,
|
engine_state: Arc<QueryEngineState>,
|
||||||
session_state: SessionState,
|
session_state: SessionState,
|
||||||
df_stmt: &DfStatement,
|
df_stmt: &DfStatement,
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
|
|||||||
@@ -34,6 +34,8 @@ use datatypes::arrow::datatypes::DataType;
|
|||||||
pub struct TypeConversionRule;
|
pub struct TypeConversionRule;
|
||||||
|
|
||||||
impl OptimizerRule for TypeConversionRule {
|
impl OptimizerRule for TypeConversionRule {
|
||||||
|
// TODO(ruihang): fix this warning
|
||||||
|
#[allow(deprecated)]
|
||||||
fn try_optimize(
|
fn try_optimize(
|
||||||
&self,
|
&self,
|
||||||
plan: &LogicalPlan,
|
plan: &LogicalPlan,
|
||||||
|
|||||||
@@ -157,7 +157,7 @@ mod test {
|
|||||||
distinct: false, \
|
distinct: false, \
|
||||||
top: None, \
|
top: None, \
|
||||||
projection: \
|
projection: \
|
||||||
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None })], \
|
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None, opt_replace: None })], \
|
||||||
into: None, \
|
into: None, \
|
||||||
from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: \"t1\", quote_style: None }]\
|
from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: \"t1\", quote_style: None }]\
|
||||||
), \
|
), \
|
||||||
|
|||||||
@@ -12,12 +12,94 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use catalog::table_source::DfTableSourceProvider;
|
||||||
|
use common_error::prelude::BoxedError;
|
||||||
|
use datafusion::execution::context::SessionState;
|
||||||
|
use datafusion_sql::planner::{ParserOptions, SqlToRel};
|
||||||
|
use promql::planner::PromPlanner;
|
||||||
|
use promql_parser::parser::EvalStmt;
|
||||||
|
use session::context::QueryContextRef;
|
||||||
|
use snafu::ResultExt;
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::{PlanSqlSnafu, QueryPlanSnafu, Result, SqlSnafu};
|
||||||
|
use crate::parser::QueryStatement;
|
||||||
use crate::plan::LogicalPlan;
|
use crate::plan::LogicalPlan;
|
||||||
|
use crate::query_engine::QueryEngineState;
|
||||||
|
use crate::DfContextProviderAdapter;
|
||||||
|
|
||||||
/// SQL logical planner.
|
#[async_trait]
|
||||||
pub trait Planner: Send + Sync {
|
pub trait LogicalPlanner: Send + Sync {
|
||||||
fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan>;
|
async fn plan(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<LogicalPlan>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DfLogicalPlanner {
|
||||||
|
engine_state: Arc<QueryEngineState>,
|
||||||
|
session_state: SessionState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfLogicalPlanner {
|
||||||
|
pub fn new(engine_state: Arc<QueryEngineState>) -> Self {
|
||||||
|
let session_state = engine_state.session_state();
|
||||||
|
Self {
|
||||||
|
engine_state,
|
||||||
|
session_state,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn plan_sql(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<LogicalPlan> {
|
||||||
|
let df_stmt = (&stmt).try_into().context(SqlSnafu)?;
|
||||||
|
|
||||||
|
let context_provider = DfContextProviderAdapter::try_new(
|
||||||
|
self.engine_state.clone(),
|
||||||
|
self.session_state.clone(),
|
||||||
|
&df_stmt,
|
||||||
|
query_ctx,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let config_options = self.session_state.config().config_options();
|
||||||
|
let parser_options = ParserOptions {
|
||||||
|
enable_ident_normalization: config_options.sql_parser.enable_ident_normalization,
|
||||||
|
parse_float_as_decimal: config_options.sql_parser.parse_float_as_decimal,
|
||||||
|
};
|
||||||
|
|
||||||
|
let sql_to_rel = SqlToRel::new_with_options(&context_provider, parser_options);
|
||||||
|
|
||||||
|
let result = sql_to_rel.statement_to_plan(df_stmt).with_context(|_| {
|
||||||
|
let sql = if let Statement::Query(query) = stmt {
|
||||||
|
query.inner.to_string()
|
||||||
|
} else {
|
||||||
|
format!("{stmt:?}")
|
||||||
|
};
|
||||||
|
PlanSqlSnafu { sql }
|
||||||
|
})?;
|
||||||
|
Ok(LogicalPlan::DfPlan(result))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn plan_pql(&self, stmt: EvalStmt, query_ctx: QueryContextRef) -> Result<LogicalPlan> {
|
||||||
|
let table_provider = DfTableSourceProvider::new(
|
||||||
|
self.engine_state.catalog_list().clone(),
|
||||||
|
self.engine_state.disallow_cross_schema_query(),
|
||||||
|
query_ctx.as_ref(),
|
||||||
|
);
|
||||||
|
PromPlanner::stmt_to_plan(table_provider, stmt)
|
||||||
|
.await
|
||||||
|
.map(LogicalPlan::DfPlan)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(QueryPlanSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl LogicalPlanner for DfLogicalPlanner {
|
||||||
|
async fn plan(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<LogicalPlan> {
|
||||||
|
match stmt {
|
||||||
|
QueryStatement::Sql(stmt) => self.plan_sql(stmt, query_ctx).await,
|
||||||
|
QueryStatement::Promql(stmt) => self.plan_pql(stmt, query_ctx).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ use catalog::CatalogListRef;
|
|||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||||
use common_function::scalars::{FunctionRef, FUNCTION_REGISTRY};
|
use common_function::scalars::{FunctionRef, FUNCTION_REGISTRY};
|
||||||
use common_query::physical_plan::PhysicalPlan;
|
|
||||||
use common_query::prelude::ScalarUdf;
|
use common_query::prelude::ScalarUdf;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use datatypes::schema::Schema;
|
use datatypes::schema::Schema;
|
||||||
@@ -33,25 +32,32 @@ use crate::datafusion::DatafusionQueryEngine;
|
|||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryStatement;
|
use crate::parser::QueryStatement;
|
||||||
use crate::plan::LogicalPlan;
|
use crate::plan::LogicalPlan;
|
||||||
|
use crate::planner::LogicalPlanner;
|
||||||
pub use crate::query_engine::context::QueryEngineContext;
|
pub use crate::query_engine::context::QueryEngineContext;
|
||||||
pub use crate::query_engine::state::QueryEngineState;
|
pub use crate::query_engine::state::QueryEngineState;
|
||||||
|
|
||||||
#[async_trait]
|
pub type StatementHandlerRef = Arc<dyn StatementHandler>;
|
||||||
pub trait QueryEngine: Send + Sync {
|
|
||||||
fn name(&self) -> &str;
|
|
||||||
|
|
||||||
async fn statement_to_plan(
|
// TODO(LFC): Gradually make more statements executed in the form of logical plan, and remove this trait. Tracked in #1010.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait StatementHandler: Send + Sync {
|
||||||
|
async fn handle_statement(
|
||||||
&self,
|
&self,
|
||||||
stmt: QueryStatement,
|
stmt: QueryStatement,
|
||||||
query_ctx: QueryContextRef,
|
query_ctx: QueryContextRef,
|
||||||
) -> Result<LogicalPlan>;
|
) -> Result<Output>;
|
||||||
|
}
|
||||||
|
|
||||||
async fn describe(&self, stmt: QueryStatement, query_ctx: QueryContextRef) -> Result<Schema>;
|
#[async_trait]
|
||||||
|
pub trait QueryEngine: Send + Sync {
|
||||||
|
fn planner(&self) -> Arc<dyn LogicalPlanner>;
|
||||||
|
|
||||||
|
fn name(&self) -> &str;
|
||||||
|
|
||||||
|
async fn describe(&self, plan: LogicalPlan) -> Result<Schema>;
|
||||||
|
|
||||||
async fn execute(&self, plan: &LogicalPlan) -> Result<Output>;
|
async fn execute(&self, plan: &LogicalPlan) -> Result<Output>;
|
||||||
|
|
||||||
async fn execute_physical(&self, plan: &Arc<dyn PhysicalPlan>) -> Result<Output>;
|
|
||||||
|
|
||||||
fn register_udf(&self, udf: ScalarUdf);
|
fn register_udf(&self, udf: ScalarUdf);
|
||||||
|
|
||||||
fn register_aggregate_function(&self, func: AggregateFunctionMetaRef);
|
fn register_aggregate_function(&self, func: AggregateFunctionMetaRef);
|
||||||
@@ -65,13 +71,12 @@ pub struct QueryEngineFactory {
|
|||||||
|
|
||||||
impl QueryEngineFactory {
|
impl QueryEngineFactory {
|
||||||
pub fn new(catalog_list: CatalogListRef) -> Self {
|
pub fn new(catalog_list: CatalogListRef) -> Self {
|
||||||
let query_engine = Arc::new(DatafusionQueryEngine::new(catalog_list, Default::default()));
|
Self::new_with_plugins(catalog_list, Default::default())
|
||||||
register_functions(&query_engine);
|
|
||||||
Self { query_engine }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_with_plugins(catalog_list: CatalogListRef, plugins: Arc<Plugins>) -> Self {
|
pub fn new_with_plugins(catalog_list: CatalogListRef, plugins: Arc<Plugins>) -> Self {
|
||||||
let query_engine = Arc::new(DatafusionQueryEngine::new(catalog_list, plugins));
|
let state = Arc::new(QueryEngineState::new(catalog_list, plugins));
|
||||||
|
let query_engine = Arc::new(DatafusionQueryEngine::new(state));
|
||||||
register_functions(&query_engine);
|
register_functions(&query_engine);
|
||||||
Self { query_engine }
|
Self { query_engine }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,14 +24,10 @@ use datatypes::vectors::{Helper, StringVector};
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use sql::statements::explain::Explain;
|
|
||||||
use sql::statements::show::{ShowDatabases, ShowKind, ShowTables};
|
use sql::statements::show::{ShowDatabases, ShowKind, ShowTables};
|
||||||
use sql::statements::statement::Statement;
|
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::parser::QueryStatement;
|
|
||||||
use crate::QueryEngineRef;
|
|
||||||
|
|
||||||
const SCHEMAS_COLUMN: &str = "Schemas";
|
const SCHEMAS_COLUMN: &str = "Schemas";
|
||||||
const TABLES_COLUMN: &str = "Tables";
|
const TABLES_COLUMN: &str = "Tables";
|
||||||
@@ -156,17 +152,6 @@ pub fn show_tables(
|
|||||||
Ok(Output::RecordBatches(records))
|
Ok(Output::RecordBatches(records))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn explain(
|
|
||||||
stmt: Box<Explain>,
|
|
||||||
query_engine: QueryEngineRef,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Output> {
|
|
||||||
let plan = query_engine
|
|
||||||
.statement_to_plan(QueryStatement::Sql(Statement::Explain(*stmt)), query_ctx)
|
|
||||||
.await?;
|
|
||||||
query_engine.execute(&plan).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn describe_table(table: TableRef) -> Result<Output> {
|
pub fn describe_table(table: TableRef) -> Result<Output> {
|
||||||
let table_info = table.table_info();
|
let table_info = table.table_info();
|
||||||
let columns_schemas = table_info.meta.schema.column_schemas();
|
let columns_schemas = table_info.meta.schema.column_schemas();
|
||||||
|
|||||||
@@ -12,6 +12,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use common_query::Output;
|
||||||
|
use common_recordbatch::{util, RecordBatch};
|
||||||
|
use session::context::QueryContext;
|
||||||
|
|
||||||
|
use crate::parser::QueryLanguageParser;
|
||||||
|
use crate::QueryEngineRef;
|
||||||
|
|
||||||
mod argmax_test;
|
mod argmax_test;
|
||||||
mod argmin_test;
|
mod argmin_test;
|
||||||
mod mean_test;
|
mod mean_test;
|
||||||
@@ -25,3 +32,17 @@ mod time_range_filter_test;
|
|||||||
|
|
||||||
mod function;
|
mod function;
|
||||||
mod pow;
|
mod pow;
|
||||||
|
|
||||||
|
async fn exec_selection(engine: QueryEngineRef, sql: &str) -> Vec<RecordBatch> {
|
||||||
|
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||||
|
let plan = engine
|
||||||
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let Output::Stream(stream) = engine
|
||||||
|
.execute(&plan)
|
||||||
|
.await
|
||||||
|
.unwrap() else { unreachable!() };
|
||||||
|
util::collect(stream).await.unwrap()
|
||||||
|
}
|
||||||
|
|||||||
@@ -14,17 +14,12 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::Output;
|
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use session::context::QueryContext;
|
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::QueryEngine;
|
use crate::QueryEngine;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -52,9 +47,8 @@ async fn test_argmax_success<T>(
|
|||||||
where
|
where
|
||||||
T: WrapperType + PartialOrd,
|
T: WrapperType + PartialOrd,
|
||||||
{
|
{
|
||||||
let result = execute_argmax(column_name, table_name, engine.clone())
|
let sql = format!("select ARGMAX({column_name}) as argmax from {table_name}");
|
||||||
.await
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
.unwrap();
|
|
||||||
let value = function::get_value_from_batches("argmax", result);
|
let value = function::get_value_from_batches("argmax", result);
|
||||||
|
|
||||||
let numbers =
|
let numbers =
|
||||||
@@ -77,23 +71,3 @@ where
|
|||||||
assert_eq!(value, expected_value);
|
assert_eq!(value, expected_value);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_argmax<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!("select ARGMAX({column_name}) as argmax from {table_name}");
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,17 +14,12 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::Output;
|
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use session::context::QueryContext;
|
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::QueryEngine;
|
use crate::QueryEngine;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -52,9 +47,8 @@ async fn test_argmin_success<T>(
|
|||||||
where
|
where
|
||||||
T: WrapperType + PartialOrd,
|
T: WrapperType + PartialOrd,
|
||||||
{
|
{
|
||||||
let result = execute_argmin(column_name, table_name, engine.clone())
|
let sql = format!("select argmin({column_name}) as argmin from {table_name}");
|
||||||
.await
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
.unwrap();
|
|
||||||
let value = function::get_value_from_batches("argmin", result);
|
let value = function::get_value_from_batches("argmin", result);
|
||||||
|
|
||||||
let numbers =
|
let numbers =
|
||||||
@@ -77,23 +71,3 @@ where
|
|||||||
assert_eq!(value, expected_value);
|
assert_eq!(value, expected_value);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_argmin<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!("select argmin({column_name}) as argmin from {table_name}");
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -17,18 +17,16 @@ use std::sync::Arc;
|
|||||||
use catalog::local::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
use catalog::local::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||||
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
|
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_query::Output;
|
use common_recordbatch::RecordBatch;
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::schema::{ColumnSchema, Schema};
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use datatypes::vectors::Helper;
|
use datatypes::vectors::Helper;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use session::context::QueryContext;
|
|
||||||
use table::test_util::MemTable;
|
use table::test_util::MemTable;
|
||||||
|
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::exec_selection;
|
||||||
use crate::{QueryEngine, QueryEngineFactory};
|
use crate::{QueryEngine, QueryEngineFactory};
|
||||||
|
|
||||||
pub fn create_query_engine() -> Arc<dyn QueryEngine> {
|
pub fn create_query_engine() -> Arc<dyn QueryEngine> {
|
||||||
@@ -81,18 +79,7 @@ where
|
|||||||
T: WrapperType,
|
T: WrapperType,
|
||||||
{
|
{
|
||||||
let sql = format!("SELECT {column_name} FROM {table_name}");
|
let sql = format!("SELECT {column_name} FROM {table_name}");
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
let numbers = exec_selection(engine, &sql).await;
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
let numbers = util::collect(recordbatch_stream).await.unwrap();
|
|
||||||
|
|
||||||
let column = numbers[0].column(0);
|
let column = numbers[0].column(0);
|
||||||
let column: &<T as Scalar>::VectorType = unsafe { Helper::static_cast(column) };
|
let column: &<T as Scalar>::VectorType = unsafe { Helper::static_cast(column) };
|
||||||
|
|||||||
@@ -14,20 +14,15 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::Output;
|
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use datatypes::value::OrderedFloat;
|
use datatypes::value::OrderedFloat;
|
||||||
use format_num::NumberFormat;
|
use format_num::NumberFormat;
|
||||||
use num_traits::AsPrimitive;
|
use num_traits::AsPrimitive;
|
||||||
use session::context::QueryContext;
|
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::QueryEngine;
|
use crate::QueryEngine;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -55,9 +50,8 @@ async fn test_mean_success<T>(
|
|||||||
where
|
where
|
||||||
T: WrapperType + AsPrimitive<f64>,
|
T: WrapperType + AsPrimitive<f64>,
|
||||||
{
|
{
|
||||||
let result = execute_mean(column_name, table_name, engine.clone())
|
let sql = format!("select MEAN({column_name}) as mean from {table_name}");
|
||||||
.await
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
.unwrap();
|
|
||||||
let value = function::get_value_from_batches("mean", result);
|
let value = function::get_value_from_batches("mean", result);
|
||||||
|
|
||||||
let numbers =
|
let numbers =
|
||||||
@@ -73,23 +67,3 @@ where
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_mean<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!("select MEAN({column_name}) as mean from {table_name}");
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -24,19 +24,17 @@ use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
|||||||
use common_query::error::{CreateAccumulatorSnafu, Result as QueryResult};
|
use common_query::error::{CreateAccumulatorSnafu, Result as QueryResult};
|
||||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||||
use common_query::prelude::*;
|
use common_query::prelude::*;
|
||||||
use common_query::Output;
|
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::schema::{ColumnSchema, Schema};
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
use datatypes::types::{LogicalPrimitiveType, WrapperType};
|
use datatypes::types::{LogicalPrimitiveType, WrapperType};
|
||||||
use datatypes::vectors::Helper;
|
use datatypes::vectors::Helper;
|
||||||
use datatypes::with_match_primitive_type_id;
|
use datatypes::with_match_primitive_type_id;
|
||||||
use num_traits::AsPrimitive;
|
use num_traits::AsPrimitive;
|
||||||
use session::context::QueryContext;
|
|
||||||
use table::test_util::MemTable;
|
use table::test_util::MemTable;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::exec_selection;
|
||||||
use crate::QueryEngineFactory;
|
use crate::QueryEngineFactory;
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
@@ -220,18 +218,8 @@ where
|
|||||||
)));
|
)));
|
||||||
|
|
||||||
let sql = format!("select MY_SUM({column_name}) as my_sum from {table_name}");
|
let sql = format!("select MY_SUM({column_name}) as my_sum from {table_name}");
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
let batches = exec_selection(engine, &sql).await;
|
||||||
let plan = engine
|
let batches = RecordBatches::try_new(batches.first().unwrap().schema.clone(), batches).unwrap();
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await?;
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
let batches = util::collect_batches(recordbatch_stream).await.unwrap();
|
|
||||||
|
|
||||||
let pretty_print = batches.pretty_print().unwrap();
|
let pretty_print = batches.pretty_print().unwrap();
|
||||||
assert_eq!(expected, pretty_print);
|
assert_eq!(expected, pretty_print);
|
||||||
|
|||||||
@@ -17,21 +17,17 @@ use std::sync::Arc;
|
|||||||
use catalog::local::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
use catalog::local::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||||
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
|
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_query::Output;
|
use common_recordbatch::RecordBatch;
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::schema::{ColumnSchema, Schema};
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
use datatypes::vectors::Int32Vector;
|
use datatypes::vectors::Int32Vector;
|
||||||
use function::{create_query_engine, get_numbers_from_table};
|
use function::{create_query_engine, get_numbers_from_table};
|
||||||
use num_traits::AsPrimitive;
|
use num_traits::AsPrimitive;
|
||||||
use session::context::QueryContext;
|
|
||||||
use table::test_util::MemTable;
|
use table::test_util::MemTable;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::{QueryEngine, QueryEngineFactory};
|
use crate::{QueryEngine, QueryEngineFactory};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -55,18 +51,7 @@ async fn test_percentile_aggregator() -> Result<()> {
|
|||||||
async fn test_percentile_correctness() -> Result<()> {
|
async fn test_percentile_correctness() -> Result<()> {
|
||||||
let engine = create_correctness_engine();
|
let engine = create_correctness_engine();
|
||||||
let sql = String::from("select PERCENTILE(corr_number,88.0) as percentile from corr_numbers");
|
let sql = String::from("select PERCENTILE(corr_number,88.0) as percentile from corr_numbers");
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
let record_batch = exec_selection(engine, &sql).await;
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
let record_batch = util::collect(recordbatch_stream).await.unwrap();
|
|
||||||
let column = record_batch[0].column(0);
|
let column = record_batch[0].column(0);
|
||||||
let value = column.get(0);
|
let value = column.get(0);
|
||||||
assert_eq!(value, Value::from(9.280_000_000_000_001_f64));
|
assert_eq!(value, Value::from(9.280_000_000_000_001_f64));
|
||||||
@@ -81,9 +66,8 @@ async fn test_percentile_success<T>(
|
|||||||
where
|
where
|
||||||
T: WrapperType + AsPrimitive<f64>,
|
T: WrapperType + AsPrimitive<f64>,
|
||||||
{
|
{
|
||||||
let result = execute_percentile(column_name, table_name, engine.clone())
|
let sql = format!("select PERCENTILE({column_name},50.0) as percentile from {table_name}");
|
||||||
.await
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
.unwrap();
|
|
||||||
let value = function::get_value_from_batches("percentile", result);
|
let value = function::get_value_from_batches("percentile", result);
|
||||||
|
|
||||||
let numbers = get_numbers_from_table::<T>(column_name, table_name, engine.clone()).await;
|
let numbers = get_numbers_from_table::<T>(column_name, table_name, engine.clone()).await;
|
||||||
@@ -95,26 +79,6 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_percentile<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!("select PERCENTILE({column_name},50.0) as percentile from {table_name}");
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_correctness_engine() -> Arc<dyn QueryEngine> {
|
fn create_correctness_engine() -> Arc<dyn QueryEngine> {
|
||||||
// create engine
|
// create engine
|
||||||
let schema_provider = Arc::new(MemorySchemaProvider::new());
|
let schema_provider = Arc::new(MemorySchemaProvider::new());
|
||||||
|
|||||||
@@ -14,18 +14,13 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::Output;
|
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::prelude::*;
|
use datatypes::prelude::*;
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use num_traits::AsPrimitive;
|
use num_traits::AsPrimitive;
|
||||||
use session::context::QueryContext;
|
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::QueryEngine;
|
use crate::QueryEngine;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -57,9 +52,8 @@ where
|
|||||||
PolyT::Native: std::ops::Mul<Output = PolyT::Native> + std::iter::Sum,
|
PolyT::Native: std::ops::Mul<Output = PolyT::Native> + std::iter::Sum,
|
||||||
i64: AsPrimitive<PolyT::Native>,
|
i64: AsPrimitive<PolyT::Native>,
|
||||||
{
|
{
|
||||||
let result = execute_polyval(column_name, table_name, engine.clone())
|
let sql = format!("select POLYVAL({column_name}, 0) as polyval from {table_name}");
|
||||||
.await
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
.unwrap();
|
|
||||||
let value = function::get_value_from_batches("polyval", result);
|
let value = function::get_value_from_batches("polyval", result);
|
||||||
|
|
||||||
let numbers =
|
let numbers =
|
||||||
@@ -74,23 +68,3 @@ where
|
|||||||
assert_eq!(value, PolyT::from_native(expected_native).into());
|
assert_eq!(value, PolyT::from_native(expected_native).into());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_polyval<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!("select POLYVAL({column_name}, 0) as polyval from {table_name}");
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ use crate::parser::QueryLanguageParser;
|
|||||||
use crate::plan::LogicalPlan;
|
use crate::plan::LogicalPlan;
|
||||||
use crate::query_engine::options::QueryOptions;
|
use crate::query_engine::options::QueryOptions;
|
||||||
use crate::query_engine::QueryEngineFactory;
|
use crate::query_engine::QueryEngineFactory;
|
||||||
|
use crate::tests::exec_selection;
|
||||||
use crate::tests::pow::pow;
|
use crate::tests::pow::pow;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -138,13 +139,15 @@ async fn test_query_validate() -> Result<()> {
|
|||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_sql("select number from public.numbers").unwrap();
|
let stmt = QueryLanguageParser::parse_sql("select number from public.numbers").unwrap();
|
||||||
assert!(engine
|
assert!(engine
|
||||||
.statement_to_plan(stmt, QueryContext::arc())
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.is_ok());
|
.is_ok());
|
||||||
|
|
||||||
let stmt = QueryLanguageParser::parse_sql("select number from wrongschema.numbers").unwrap();
|
let stmt = QueryLanguageParser::parse_sql("select number from wrongschema.numbers").unwrap();
|
||||||
assert!(engine
|
assert!(engine
|
||||||
.statement_to_plan(stmt, QueryContext::arc())
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
.await
|
.await
|
||||||
.is_err());
|
.is_err());
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -174,21 +177,8 @@ async fn test_udf() -> Result<()> {
|
|||||||
|
|
||||||
engine.register_udf(udf);
|
engine.register_udf(udf);
|
||||||
|
|
||||||
let stmt =
|
let sql = "select my_pow(number, number) as p from numbers limit 10";
|
||||||
QueryLanguageParser::parse_sql("select my_pow(number, number) as p from numbers limit 10")
|
let numbers = exec_selection(engine, sql).await;
|
||||||
.unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await?;
|
|
||||||
let recordbatch = match output {
|
|
||||||
Output::Stream(recordbatch) => recordbatch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let numbers = util::collect(recordbatch).await.unwrap();
|
|
||||||
assert_eq!(1, numbers.len());
|
assert_eq!(1, numbers.len());
|
||||||
assert_eq!(numbers[0].num_columns(), 1);
|
assert_eq!(numbers[0].num_columns(), 1);
|
||||||
assert_eq!(1, numbers[0].schema.num_columns());
|
assert_eq!(1, numbers[0].schema.num_columns());
|
||||||
|
|||||||
@@ -14,19 +14,14 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::Output;
|
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use num_traits::AsPrimitive;
|
use num_traits::AsPrimitive;
|
||||||
use session::context::QueryContext;
|
|
||||||
use statrs::distribution::{ContinuousCDF, Normal};
|
use statrs::distribution::{ContinuousCDF, Normal};
|
||||||
use statrs::statistics::Statistics;
|
use statrs::statistics::Statistics;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::QueryEngine;
|
use crate::QueryEngine;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -54,9 +49,10 @@ async fn test_scipy_stats_norm_cdf_success<T>(
|
|||||||
where
|
where
|
||||||
T: WrapperType + AsPrimitive<f64>,
|
T: WrapperType + AsPrimitive<f64>,
|
||||||
{
|
{
|
||||||
let result = execute_scipy_stats_norm_cdf(column_name, table_name, engine.clone())
|
let sql = format!(
|
||||||
.await
|
"select SCIPYSTATSNORMCDF({column_name},2.0) as scipy_stats_norm_cdf from {table_name}",
|
||||||
.unwrap();
|
);
|
||||||
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
let value = function::get_value_from_batches("scipy_stats_norm_cdf", result);
|
let value = function::get_value_from_batches("scipy_stats_norm_cdf", result);
|
||||||
|
|
||||||
let numbers =
|
let numbers =
|
||||||
@@ -71,25 +67,3 @@ where
|
|||||||
assert_eq!(value, expected_value.into());
|
assert_eq!(value, expected_value.into());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_scipy_stats_norm_cdf<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!(
|
|
||||||
"select SCIPYSTATSNORMCDF({column_name},2.0) as scipy_stats_norm_cdf from {table_name}",
|
|
||||||
);
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,19 +14,14 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::Output;
|
|
||||||
use common_recordbatch::error::Result as RecordResult;
|
|
||||||
use common_recordbatch::{util, RecordBatch};
|
|
||||||
use datatypes::for_all_primitive_types;
|
use datatypes::for_all_primitive_types;
|
||||||
use datatypes::types::WrapperType;
|
use datatypes::types::WrapperType;
|
||||||
use num_traits::AsPrimitive;
|
use num_traits::AsPrimitive;
|
||||||
use session::context::QueryContext;
|
|
||||||
use statrs::distribution::{Continuous, Normal};
|
use statrs::distribution::{Continuous, Normal};
|
||||||
use statrs::statistics::Statistics;
|
use statrs::statistics::Statistics;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::{exec_selection, function};
|
||||||
use crate::tests::function;
|
|
||||||
use crate::QueryEngine;
|
use crate::QueryEngine;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -54,9 +49,10 @@ async fn test_scipy_stats_norm_pdf_success<T>(
|
|||||||
where
|
where
|
||||||
T: WrapperType + AsPrimitive<f64>,
|
T: WrapperType + AsPrimitive<f64>,
|
||||||
{
|
{
|
||||||
let result = execute_scipy_stats_norm_pdf(column_name, table_name, engine.clone())
|
let sql = format!(
|
||||||
.await
|
"select SCIPYSTATSNORMPDF({column_name},2.0) as scipy_stats_norm_pdf from {table_name}"
|
||||||
.unwrap();
|
);
|
||||||
|
let result = exec_selection(engine.clone(), &sql).await;
|
||||||
let value = function::get_value_from_batches("scipy_stats_norm_pdf", result);
|
let value = function::get_value_from_batches("scipy_stats_norm_pdf", result);
|
||||||
|
|
||||||
let numbers =
|
let numbers =
|
||||||
@@ -71,25 +67,3 @@ where
|
|||||||
assert_eq!(value, expected_value.into());
|
assert_eq!(value, expected_value.into());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn execute_scipy_stats_norm_pdf<'a>(
|
|
||||||
column_name: &'a str,
|
|
||||||
table_name: &'a str,
|
|
||||||
engine: Arc<dyn QueryEngine>,
|
|
||||||
) -> RecordResult<Vec<RecordBatch>> {
|
|
||||||
let sql = format!(
|
|
||||||
"select SCIPYSTATSNORMPDF({column_name},2.0) as scipy_stats_norm_pdf from {table_name}"
|
|
||||||
);
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(&sql).unwrap();
|
|
||||||
let plan = engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output = engine.execute(&plan).await.unwrap();
|
|
||||||
let recordbatch_stream = match output {
|
|
||||||
Output::Stream(batch) => batch,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
util::collect(recordbatch_stream).await
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -26,14 +26,13 @@ use common_time::Timestamp;
|
|||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
|
use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
|
||||||
use session::context::QueryContext;
|
|
||||||
use table::metadata::{FilterPushDownType, TableInfoRef};
|
use table::metadata::{FilterPushDownType, TableInfoRef};
|
||||||
use table::predicate::TimeRangePredicateBuilder;
|
use table::predicate::TimeRangePredicateBuilder;
|
||||||
use table::test_util::MemTable;
|
use table::test_util::MemTable;
|
||||||
use table::Table;
|
use table::Table;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::parser::QueryLanguageParser;
|
use crate::tests::exec_selection;
|
||||||
use crate::{QueryEngineFactory, QueryEngineRef};
|
use crate::{QueryEngineFactory, QueryEngineRef};
|
||||||
|
|
||||||
struct MemTableWrapper {
|
struct MemTableWrapper {
|
||||||
@@ -71,8 +70,11 @@ impl Table for MemTableWrapper {
|
|||||||
self.inner.scan(projection, filters, limit).await
|
self.inner.scan(projection, filters, limit).await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
|
fn supports_filters_pushdown(
|
||||||
Ok(FilterPushDownType::Exact)
|
&self,
|
||||||
|
filters: &[&Expr],
|
||||||
|
) -> table::Result<Vec<FilterPushDownType>> {
|
||||||
|
Ok(vec![FilterPushDownType::Exact; filters.len()])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,18 +130,7 @@ struct TimeRangeTester {
|
|||||||
|
|
||||||
impl TimeRangeTester {
|
impl TimeRangeTester {
|
||||||
async fn check(&self, sql: &str, expect: TimestampRange) {
|
async fn check(&self, sql: &str, expect: TimestampRange) {
|
||||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
let _ = exec_selection(self.engine.clone(), sql).await;
|
||||||
let _ = self
|
|
||||||
.engine
|
|
||||||
.execute(
|
|
||||||
&self
|
|
||||||
.engine
|
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
|
||||||
.await
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let filters = self.table.get_filters().await;
|
let filters = self.table.get_filters().await;
|
||||||
|
|
||||||
let range = TimeRangePredicateBuilder::new("ts", &filters).build();
|
let range = TimeRangePredicateBuilder::new("ts", &filters).build();
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ license.workspace = true
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["python"]
|
default = ["python"]
|
||||||
pyo3_backend = ["pyo3"]
|
pyo3_backend = ["dep:pyo3", "arrow/pyarrow"]
|
||||||
python = [
|
python = [
|
||||||
"dep:datafusion",
|
"dep:datafusion",
|
||||||
"dep:datafusion-common",
|
"dep:datafusion-common",
|
||||||
@@ -60,7 +60,7 @@ rustpython-vm = { git = "https://github.com/discord9/RustPython", optional = tru
|
|||||||
"default",
|
"default",
|
||||||
"codegen",
|
"codegen",
|
||||||
] }
|
] }
|
||||||
pyo3 = { version = "0.18", optional = true }
|
pyo3 = { version = "0.18", optional = true, features = ["abi3", "abi3-py37"] }
|
||||||
session = { path = "../session" }
|
session = { path = "../session" }
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu = { version = "0.7", features = ["backtraces"] }
|
||||||
sql = { path = "../sql" }
|
sql = { path = "../sql" }
|
||||||
|
|||||||
@@ -279,7 +279,8 @@ impl Script for PyScript {
|
|||||||
);
|
);
|
||||||
let plan = self
|
let plan = self
|
||||||
.query_engine
|
.query_engine
|
||||||
.statement_to_plan(stmt, Arc::new(QueryContext::new()))
|
.planner()
|
||||||
|
.plan(stmt, QueryContext::arc())
|
||||||
.await?;
|
.await?;
|
||||||
let res = self.query_engine.execute(&plan).await?;
|
let res = self.query_engine.execute(&plan).await?;
|
||||||
let copr = self.copr.clone();
|
let copr = self.copr.clone();
|
||||||
@@ -378,7 +379,8 @@ import greptime as gt
|
|||||||
|
|
||||||
@copr(args=["number"], returns = ["number"], sql = "select * from numbers")
|
@copr(args=["number"], returns = ["number"], sql = "select * from numbers")
|
||||||
def test(number) -> vector[u32]:
|
def test(number) -> vector[u32]:
|
||||||
return query.sql("select * from numbers")[0][0]
|
from greptime import query
|
||||||
|
return query().sql("select * from numbers")[0][0]
|
||||||
"#;
|
"#;
|
||||||
let script = script_engine
|
let script = script_engine
|
||||||
.compile(script, CompileContext::default())
|
.compile(script, CompileContext::default())
|
||||||
@@ -437,7 +439,8 @@ from greptime import col
|
|||||||
|
|
||||||
@copr(args=["number"], returns = ["number"], sql = "select * from numbers")
|
@copr(args=["number"], returns = ["number"], sql = "select * from numbers")
|
||||||
def test(number) -> vector[u32]:
|
def test(number) -> vector[u32]:
|
||||||
return dataframe.filter(col("number")==col("number")).collect()[0][0]
|
from greptime import dataframe
|
||||||
|
return dataframe().filter(col("number")==col("number")).collect()[0][0]
|
||||||
"#;
|
"#;
|
||||||
let script = script_engine
|
let script = script_engine
|
||||||
.compile(script, CompileContext::default())
|
.compile(script, CompileContext::default())
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user