mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
35 Commits
v0.15.0-ni
...
v0.1.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eff07d5986 | ||
|
|
40c55e4da7 | ||
|
|
8d113550cf | ||
|
|
15a0ed0853 | ||
|
|
44493e9d8c | ||
|
|
efd15839d4 | ||
|
|
1f62b36537 | ||
|
|
7b8e65ce93 | ||
|
|
6475339ad0 | ||
|
|
0bd802c70d | ||
|
|
28d07c7a2e | ||
|
|
dc33b0c0ce | ||
|
|
4b4f8f27e8 | ||
|
|
c994e0de88 | ||
|
|
d1ba9ca126 | ||
|
|
0877dabce2 | ||
|
|
8b9671f376 | ||
|
|
dcf66d9d52 | ||
|
|
65b61e78ad | ||
|
|
3638704f95 | ||
|
|
8a2f4256bf | ||
|
|
83aeadc506 | ||
|
|
f556052951 | ||
|
|
8658d428e0 | ||
|
|
e8e11072f8 | ||
|
|
6f0f72c377 | ||
|
|
32030a8194 | ||
|
|
0f7cde2411 | ||
|
|
1ece402ec8 | ||
|
|
7ee54b3e69 | ||
|
|
9b4dcba8cf | ||
|
|
c3bcb1111f | ||
|
|
a4ebd03a61 | ||
|
|
e7daf1226f | ||
|
|
05c0ea9a59 |
78
.github/workflows/apidoc.yml
vendored
78
.github/workflows/apidoc.yml
vendored
@@ -1,42 +1,42 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
# on:
|
||||
# push:
|
||||
# branches:
|
||||
# - develop
|
||||
# paths-ignore:
|
||||
# - 'docs/**'
|
||||
# - 'config/**'
|
||||
# - '**.md'
|
||||
# - '.dockerignore'
|
||||
# - 'docker/**'
|
||||
# - '.gitignore'
|
||||
|
||||
name: Build API docs
|
||||
# name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
# env:
|
||||
# RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; url='greptime/'" />
|
||||
</head>
|
||||
<body></body></html>
|
||||
EOF
|
||||
- name: Publish dist directory
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
# jobs:
|
||||
# apidoc:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# - run: cargo doc --workspace --no-deps --document-private-items
|
||||
# - run: |
|
||||
# cat <<EOF > target/doc/index.html
|
||||
# <!DOCTYPE html>
|
||||
# <html>
|
||||
# <head>
|
||||
# <meta http-equiv="refresh" content="0; url='greptime/'" />
|
||||
# </head>
|
||||
# <body></body></html>
|
||||
# EOF
|
||||
# - name: Publish dist directory
|
||||
# uses: JamesIves/github-pages-deploy-action@v4
|
||||
# with:
|
||||
# folder: target/doc
|
||||
|
||||
7
.github/workflows/develop.yml
vendored
7
.github/workflows/develop.yml
vendored
@@ -213,10 +213,11 @@ jobs:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
# - name: Install cargo-llvm-cov
|
||||
# uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
run: cargo nextest run -F pyo3_backend
|
||||
# run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
254
.github/workflows/release.yml
vendored
254
.github/workflows/release.yml
vendored
@@ -2,9 +2,9 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# schedule:
|
||||
# # At 00:00 on Monday.
|
||||
# - cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Release
|
||||
@@ -29,21 +29,23 @@ jobs:
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
# opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: true
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: true
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: true
|
||||
# opts: "-F pyo3_backend"
|
||||
# - arch: aarch64-apple-darwin
|
||||
# os: macos-latest
|
||||
# file: greptime-darwin-arm64
|
||||
# continue-on-error: true
|
||||
# - arch: x86_64-apple-darwin
|
||||
# os: macos-latest
|
||||
# file: greptime-darwin-amd64
|
||||
# continue-on-error: true
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb-edge'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -103,8 +105,6 @@ jobs:
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python310-aarch64/lib
|
||||
echo $PYO3_CROSS_LIB_DIR
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -118,8 +118,18 @@ jobs:
|
||||
- name: Run tests
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=python3
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
@@ -144,7 +154,7 @@ jobs:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb-edge'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -183,100 +193,142 @@ jobs:
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
# docker:
|
||||
# name: Build docker image
|
||||
# needs: [build]
|
||||
# runs-on: ubuntu-latest
|
||||
# if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
# steps:
|
||||
# - name: Checkout sources
|
||||
# uses: actions/checkout@v3
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uhub.service.ucloud.cn
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
# - name: Login to UCloud Container Registry
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# registry: uhub.service.ucloud.cn
|
||||
# username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
# password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# - name: Login to Dockerhub
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
# shell: bash
|
||||
# if: github.event_name == 'schedule'
|
||||
# run: |
|
||||
# buildTime=`date "+%Y%m%d"`
|
||||
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
# shell: bash
|
||||
# if: github.event_name != 'schedule'
|
||||
# run: |
|
||||
# VERSION=${{ github.ref_name }}
|
||||
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
# - name: Set up QEMU
|
||||
# uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# - name: Set up buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
# - name: Download amd64 binary
|
||||
# uses: actions/download-artifact@v3
|
||||
# with:
|
||||
# name: greptime-linux-amd64
|
||||
# path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
# - name: Unzip the amd64 artifacts
|
||||
# run: |
|
||||
# cd amd64
|
||||
# tar xvf greptime-linux-amd64.tgz
|
||||
# rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
path: arm64
|
||||
# - name: Download arm64 binary
|
||||
# id: download-arm64
|
||||
# uses: actions/download-artifact@v3
|
||||
# with:
|
||||
# name: greptime-linux-arm64
|
||||
# path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
# - name: Unzip the arm64 artifacts
|
||||
# id: unzip-arm64
|
||||
# if: success() || steps.download-arm64.conclusion == 'success'
|
||||
# run: |
|
||||
# cd arm64
|
||||
# tar xvf greptime-linux-arm64.tgz
|
||||
# rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
# - name: Build and push all
|
||||
# uses: docker/build-push-action@v3
|
||||
# if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
# with:
|
||||
# context: .
|
||||
# file: ./docker/ci/Dockerfile
|
||||
# push: true
|
||||
# platforms: linux/amd64,linux/arm64
|
||||
# tags: |
|
||||
# greptime/greptimedb:latest
|
||||
# greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
# - name: Build and push amd64 only
|
||||
# uses: docker/build-push-action@v3
|
||||
# if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
# with:
|
||||
# context: .
|
||||
# file: ./docker/ci/Dockerfile
|
||||
# push: true
|
||||
# platforms: linux/amd64
|
||||
# tags: |
|
||||
# greptime/greptimedb:latest
|
||||
# greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
# docker-push-uhub:
|
||||
# name: Push docker image to UCloud Container Registry
|
||||
# needs: [docker]
|
||||
# runs-on: ubuntu-latest
|
||||
# if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
# # Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||
# continue-on-error: true
|
||||
# steps:
|
||||
# - name: Checkout sources
|
||||
# uses: actions/checkout@v3
|
||||
|
||||
# - name: Set up QEMU
|
||||
# uses: docker/setup-qemu-action@v2
|
||||
|
||||
# - name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
|
||||
# - name: Login to UCloud Container Registry
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# registry: uhub.service.ucloud.cn
|
||||
# username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
# password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
# shell: bash
|
||||
# if: github.event_name == 'schedule'
|
||||
# run: |
|
||||
# buildTime=`date "+%Y%m%d"`
|
||||
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
# shell: bash
|
||||
# if: github.event_name != 'schedule'
|
||||
# run: |
|
||||
# VERSION=${{ github.ref_name }}
|
||||
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
# - name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
# run: |
|
||||
# docker buildx imagetools create \
|
||||
# --tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
|
||||
# --tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
# greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
226
Cargo.lock
generated
226
Cargo.lock
generated
@@ -135,7 +135,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arrow-flight",
|
||||
"common-base",
|
||||
@@ -190,9 +190,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
|
||||
|
||||
[[package]]
|
||||
name = "arrow"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3724c874f1517cf898cd1c3ad18ab5071edf893c48e73139ab1e16cf0f2affe"
|
||||
checksum = "f410d3907b6b3647b9e7bca4551274b2e3d716aa940afb67b7287257401da921"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow-arith",
|
||||
@@ -214,9 +214,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-arith"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e958823b8383ca14d0a2e973de478dd7674cd9f72837f8c41c132a0fda6a4e5e"
|
||||
checksum = "f87391cf46473c9bc53dab68cb8872c3a81d4dfd1703f1c8aa397dba9880a043"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -229,9 +229,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-array"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db670eab50e76654065b5aed930f4367101fcddcb2223802007d1e0b4d5a2579"
|
||||
checksum = "d35d5475e65c57cffba06d0022e3006b677515f99b54af33a7cd54f6cdd4a5b5"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow-buffer",
|
||||
@@ -245,9 +245,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-buffer"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9f0e01c931882448c0407bd32311a624b9f099739e94e786af68adc97016b5f2"
|
||||
checksum = "68b4ec72eda7c0207727df96cf200f539749d736b21f3e782ece113e18c1a0a7"
|
||||
dependencies = [
|
||||
"half 2.2.1",
|
||||
"num",
|
||||
@@ -255,9 +255,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-cast"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4bf35d78836c93f80d9362f3ccb47ff5e2c5ecfc270ff42cdf1ef80334961d44"
|
||||
checksum = "0a7285272c9897321dfdba59de29f5b05aeafd3cdedf104a941256d155f6d304"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -271,9 +271,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-csv"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6aa7c2531d89d01fed8c469a9b1bf97132a0bdf70b4724fe4bbb4537a50880"
|
||||
checksum = "981ee4e7f6a120da04e00d0b39182e1eeacccb59c8da74511de753c56b7fddf7"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -290,9 +290,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-data"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea50db4d1e1e4c2da2bfdea7b6d2722eef64267d5ab680d815f7ae42428057f5"
|
||||
checksum = "27cc673ee6989ea6e4b4e8c7d461f7e06026a096c8f0b1a7288885ff71ae1e56"
|
||||
dependencies = [
|
||||
"arrow-buffer",
|
||||
"arrow-schema",
|
||||
@@ -302,9 +302,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-flight"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ad4c883d509d89f05b2891ad889729f17ab2191b5fd22b0cf3660a28cc40af5"
|
||||
checksum = "bd16945f8f3be0f6170b8ced60d414e56239d91a16a3f8800bc1504bc58b2592"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -325,9 +325,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-ipc"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4042fe6585155d1ec28a8e4937ec901a3ca7a19a22b9f6cd3f551b935cd84f5"
|
||||
checksum = "e37b8b69d9e59116b6b538e8514e0ec63a30f08b617ce800d31cb44e3ef64c1a"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -339,9 +339,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-json"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c907c4ab4f26970a3719dc06e78e8054a01d0c96da3664d23b941e201b33d2b"
|
||||
checksum = "80c3fa0bed7cfebf6d18e46b733f9cb8a1cb43ce8e6539055ca3e1e48a426266"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -358,9 +358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-ord"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e131b447242a32129efc7932f58ed8931b42f35d8701c1a08f9f524da13b1d3c"
|
||||
checksum = "d247dce7bed6a8d6a3c6debfa707a3a2f694383f0c692a39d736a593eae5ef94"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -372,9 +372,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-row"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b591ef70d76f4ac28dd7666093295fece0e5f9298f49af51ea49c001e1635bb6"
|
||||
checksum = "8d609c0181f963cea5c70fddf9a388595b5be441f3aa1d1cdbf728ca834bbd3a"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow-array",
|
||||
@@ -387,9 +387,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-schema"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb327717d87eb94be5eff3b0cb8987f54059d343ee5235abf7f143c85f54cfc8"
|
||||
checksum = "64951898473bfb8e22293e83a44f02874d2257514d49cd95f9aa4afcff183fbc"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"serde",
|
||||
@@ -397,9 +397,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-select"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79d3c389d1cea86793934f31594f914c8547d82e91e3411d4833ad0aac3266a7"
|
||||
checksum = "2a513d89c2e1ac22b28380900036cf1f3992c6443efc5e079de631dcf83c6888"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -410,9 +410,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrow-string"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30ee67790496dd310ddbf5096870324431e89aa76453e010020ac29b1184d356"
|
||||
checksum = "5288979b2705dae1114c864d73150629add9153b9b8f1d7ee3963db94c372ba5"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -477,6 +477,8 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"xz2",
|
||||
"zstd 0.11.2+zstd.1.5.2",
|
||||
"zstd-safe 5.0.2+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -752,7 +754,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"clap 4.1.8",
|
||||
@@ -1086,7 +1088,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1337,7 +1339,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1360,7 +1362,7 @@ dependencies = [
|
||||
"prost",
|
||||
"rand",
|
||||
"snafu",
|
||||
"substrait 0.1.1",
|
||||
"substrait 0.1.0",
|
||||
"substrait 0.4.1",
|
||||
"tokio",
|
||||
"tonic",
|
||||
@@ -1390,7 +1392,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"build-data",
|
||||
@@ -1418,7 +1420,7 @@ dependencies = [
|
||||
"servers",
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.1.1",
|
||||
"substrait 0.1.0",
|
||||
"tikv-jemalloc-ctl",
|
||||
"tikv-jemallocator",
|
||||
"tokio",
|
||||
@@ -1454,7 +1456,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1468,7 +1470,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"chrono",
|
||||
@@ -1485,7 +1487,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"snafu",
|
||||
"strum",
|
||||
@@ -1493,7 +1495,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"chrono-tz",
|
||||
@@ -1516,7 +1518,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function-macro"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -1530,7 +1532,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1556,7 +1558,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1574,7 +1576,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"snafu",
|
||||
@@ -1587,7 +1589,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1607,7 +1609,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-base",
|
||||
@@ -1625,7 +1627,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"datafusion",
|
||||
@@ -1641,7 +1643,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-telemetry",
|
||||
@@ -1655,7 +1657,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-error",
|
||||
@@ -1677,14 +1679,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -2106,7 +2108,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow",
|
||||
@@ -2117,6 +2119,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"dashmap",
|
||||
"datafusion-common",
|
||||
"datafusion-execution",
|
||||
"datafusion-expr",
|
||||
"datafusion-optimizer",
|
||||
"datafusion-physical-expr",
|
||||
@@ -2147,12 +2150,13 @@ dependencies = [
|
||||
"url",
|
||||
"uuid",
|
||||
"xz2",
|
||||
"zstd 0.12.3+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2162,10 +2166,27 @@ dependencies = [
|
||||
"sqlparser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-execution"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"dashmap",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"hashbrown 0.13.2",
|
||||
"log",
|
||||
"object_store",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"tempfile",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow",
|
||||
@@ -2177,7 +2198,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2194,7 +2215,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow",
|
||||
@@ -2214,6 +2235,7 @@ dependencies = [
|
||||
"md-5",
|
||||
"num-traits",
|
||||
"paste",
|
||||
"petgraph",
|
||||
"rand",
|
||||
"regex",
|
||||
"sha2",
|
||||
@@ -2224,7 +2246,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-row"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -2235,7 +2257,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "19.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
|
||||
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
|
||||
dependencies = [
|
||||
"arrow-schema",
|
||||
"datafusion-common",
|
||||
@@ -2246,7 +2268,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-compat",
|
||||
@@ -2297,7 +2319,7 @@ dependencies = [
|
||||
"sql",
|
||||
"storage",
|
||||
"store-api",
|
||||
"substrait 0.1.1",
|
||||
"substrait 0.1.0",
|
||||
"table",
|
||||
"table-procedure",
|
||||
"tokio",
|
||||
@@ -2311,7 +2333,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -2759,7 +2781,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-stream",
|
||||
@@ -2802,7 +2824,7 @@ dependencies = [
|
||||
"sql",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.1.1",
|
||||
"substrait 0.1.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml",
|
||||
@@ -3063,7 +3085,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3a715150563b89d5dfc81a5838eac1f66a5658a1#3a715150563b89d5dfc81a5838eac1f66a5658a1"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0a7b790ed41364b5599dff806d1080bd59c5c9f6#0a7b790ed41364b5599dff806d1080bd59c5c9f6"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"tonic",
|
||||
@@ -3710,7 +3732,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-stream",
|
||||
@@ -3953,7 +3975,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3980,7 +4002,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -4115,7 +4137,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"arc-swap",
|
||||
@@ -4518,7 +4540,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -4785,9 +4807,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "parquet"
|
||||
version = "33.0.0"
|
||||
version = "34.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1b076829801167d889795cd1957989055543430fa1469cb1f6e32b789bfc764"
|
||||
checksum = "7ac135ecf63ebb5f53dda0921b0b76d6048b3ef631a5f4760b9e8f863ff00cfa"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"arrow-array",
|
||||
@@ -4813,7 +4835,7 @@ dependencies = [
|
||||
"thrift 0.17.0",
|
||||
"tokio",
|
||||
"twox-hash",
|
||||
"zstd",
|
||||
"zstd 0.12.3+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4840,7 +4862,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
@@ -5361,7 +5383,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -5406,9 +5428,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost-build"
|
||||
version = "0.11.6"
|
||||
version = "0.11.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e"
|
||||
checksum = "a24be1d23b4552a012093e1b93697b73d644ae9590e3253d878d0e77d411b614"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"heck 0.4.1",
|
||||
@@ -5593,7 +5615,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"approx_eq",
|
||||
"arc-swap",
|
||||
@@ -6606,7 +6628,7 @@ checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -6836,7 +6858,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -6912,7 +6934,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-catalog",
|
||||
@@ -7149,7 +7171,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"catalog",
|
||||
@@ -7184,7 +7206,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"client",
|
||||
@@ -7201,9 +7223,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.30.0"
|
||||
version = "0.32.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db67dc6ef36edb658196c3fef0464a80b53dbbc194a904e81f9bd4190f9ecc5b"
|
||||
checksum = "0366f270dbabb5cc2e4c88427dc4c08bba144f81e32fbd459a013f26a4d16aa0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"sqlparser_derive",
|
||||
@@ -7262,7 +7284,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "storage"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"arrow",
|
||||
@@ -7310,7 +7332,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -7442,7 +7464,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -7536,7 +7558,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -7553,6 +7575,7 @@ dependencies = [
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-physical-expr",
|
||||
"datatypes",
|
||||
"derive_builder 0.11.2",
|
||||
"futures",
|
||||
@@ -7571,7 +7594,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table-procedure"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"catalog",
|
||||
@@ -7653,7 +7676,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"axum",
|
||||
@@ -9097,13 +9120,32 @@ version = "1.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.11.2+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
|
||||
dependencies = [
|
||||
"zstd-safe 5.0.2+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.12.3+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806"
|
||||
dependencies = [
|
||||
"zstd-safe",
|
||||
"zstd-safe 6.0.4+zstd.1.5.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd-safe"
|
||||
version = "5.0.2+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"zstd-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
27
Cargo.toml
27
Cargo.toml
@@ -45,33 +45,34 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "33.0" }
|
||||
arrow-array = "33.0"
|
||||
arrow-flight = "33.0"
|
||||
arrow-schema = { version = "33.0", features = ["serde"] }
|
||||
arrow = { version = "34.0" }
|
||||
arrow-array = "34.0"
|
||||
arrow-flight = "34.0"
|
||||
arrow-schema = { version = "34.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
# TODO(LFC): Use official DataFusion, when https://github.com/apache/arrow-datafusion/pull/5542 got merged
|
||||
datafusion = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-common = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-optimizer = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-physical-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
datafusion-sql = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "33.0"
|
||||
parquet = "34.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.30"
|
||||
sqlparser = "0.32"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
|
||||
11
config/edge.example.toml
Normal file
11
config/edge.example.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory.
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_dir = "/tmp/greptimedb/data"
|
||||
@@ -22,20 +22,27 @@ RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
|
||||
COPY ./rust-toolchain.toml .
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
COPY . .
|
||||
# Update dependency, using separate `RUN` to separate cache
|
||||
RUN cargo fetch
|
||||
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=${PWD}/python_arm64_build
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# Build the project in release mode. Set Net fetch with git cli to true to avoid git error.
|
||||
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||
# Build the project in release mode.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI=1 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
|
||||
@@ -26,7 +26,7 @@ make install
|
||||
cd ..
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
export PY_INSTALL_PATH=${PWD}/python_arm64_build
|
||||
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
|
||||
cd Python-3.10.10 && \
|
||||
make clean && \
|
||||
make distclean && \
|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3a715150563b89d5dfc81a5838eac1f66a5658a1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0a7b790ed41364b5599dff806d1080bd59c5c9f6" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
@@ -23,6 +24,10 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
@@ -118,7 +123,7 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
@@ -131,11 +136,23 @@ impl Client {
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,15 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
||||
InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
@@ -31,7 +30,9 @@ use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -78,8 +79,26 @@ impl Database {
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
@@ -135,6 +154,13 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
@@ -148,7 +174,7 @@ impl Database {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
@@ -157,22 +183,22 @@ impl Database {
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
e.code(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
@@ -203,12 +229,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -68,6 +69,13 @@ pub enum Error {
|
||||
/// Error deserialized from gRPC metadata
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -77,7 +85,10 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
@@ -95,3 +106,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,39 @@ struct Command {
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
Datanode(datanode::Instance),
|
||||
Frontend(frontend::Instance),
|
||||
Metasrv(metasrv::Instance),
|
||||
Standalone(standalone::Instance),
|
||||
Cli(cli::Instance),
|
||||
}
|
||||
|
||||
impl Application {
|
||||
async fn run(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.run().await,
|
||||
Application::Frontend(instance) => instance.run().await,
|
||||
Application::Metasrv(instance) => instance.run().await,
|
||||
Application::Standalone(instance) => instance.run().await,
|
||||
Application::Cli(instance) => instance.run().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.stop().await,
|
||||
Application::Frontend(instance) => instance.stop().await,
|
||||
Application::Metasrv(instance) => instance.stop().await,
|
||||
Application::Standalone(instance) => instance.stop().await,
|
||||
Application::Cli(instance) => instance.stop().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
async fn build(self) -> Result<Application> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,13 +81,28 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Application> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.run().await,
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Cli(cmd) => cmd.run().await,
|
||||
SubCommand::Datanode(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
SubCommand::Metasrv(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
SubCommand::Standalone(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
SubCommand::Cli(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,13 +149,18 @@ async fn main() -> Result<()> {
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||
|
||||
let mut app = cmd.build().await?;
|
||||
|
||||
tokio::select! {
|
||||
result = cmd.run() => {
|
||||
result = app.run() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,10 +17,25 @@ mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use repl::Repl;
|
||||
pub use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.repl.run().await
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle cli shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +43,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.cmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.cmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +54,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.run().await,
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,8 +72,8 @@ pub(crate) struct AttachCommand {
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let mut repl = Repl::try_new(&self).await?;
|
||||
repl.run().await
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance { repl })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ use crate::error::{
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub(crate) struct Repl {
|
||||
pub struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
|
||||
@@ -24,6 +24,21 @@ use snafu::ResultExt;
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle datanode shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -31,8 +46,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,9 +57,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -72,19 +87,16 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
|
||||
let opts: DatanodeOptions = self.try_into()?;
|
||||
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
Datanode::new(opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?
|
||||
.start()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,24 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop datanode, source: {}", source))]
|
||||
StopDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
StartMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -138,6 +150,7 @@ impl ErrorExt for Error {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
@@ -156,6 +169,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
|
||||
Error::StopDatanode { source } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ use common_base::Plugins;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance};
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
@@ -34,6 +34,24 @@ use snafu::ResultExt;
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle frontend shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -41,8 +59,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,9 +70,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -90,11 +108,11 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let mut instance = Instance::try_new_distributed(&opts, plugins.clone())
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
@@ -103,7 +121,7 @@ impl StartCommand {
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
instance.start().await.context(error::StartFrontendSnafu)
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,13 +14,32 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap;
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle metasrv shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +47,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +58,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,16 +82,17 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
let opts: MetaSrvOptions = self.try_into()?;
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
bootstrap::bootstrap_meta_srv(opts)
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
@@ -36,7 +37,9 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu, StopDatanodeSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -47,8 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,9 +61,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,6 +136,40 @@ impl StandaloneOptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode
|
||||
.start_instance()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(StopDatanodeSnafu)?;
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(StopDatanodeSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
@@ -164,7 +201,7 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
@@ -184,25 +221,18 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let mut datanode = Datanode::new(dn_opts.clone())
|
||||
let datanode = Datanode::new(dn_opts.clone())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
datanode
|
||||
.start_instance()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts, plugins)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
Ok(Instance { datanode, frontend })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -77,6 +77,7 @@ impl Default for ObjectStoreConfig {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal directory
|
||||
pub dir: String,
|
||||
@@ -108,6 +109,7 @@ impl Default for WalConfig {
|
||||
|
||||
/// Options for table compaction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct CompactionConfig {
|
||||
/// Max task number that can concurrently run.
|
||||
pub max_inflight_tasks: usize,
|
||||
|
||||
@@ -169,6 +169,13 @@ pub enum Error {
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
|
||||
FlushTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start server, source: {}", source))]
|
||||
StartServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -539,6 +546,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
DropTable { source, .. } => source.status_code(),
|
||||
FlushTable { source, .. } => source.status_code(),
|
||||
|
||||
Insert { source, .. } => source.status_code(),
|
||||
Delete { source, .. } => source.status_code(),
|
||||
|
||||
@@ -37,12 +37,14 @@ use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder
|
||||
use object_store::{util, ObjectStore, ObjectStoreBuilder};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use storage::compaction::{CompactionHandler, CompactionSchedulerRef, SimplePicker};
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::{LocalScheduler, SchedulerConfig};
|
||||
use storage::EngineImpl;
|
||||
use store_api::logstore::LogStore;
|
||||
use table::requests::FlushTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProviderRef;
|
||||
use table::Table;
|
||||
@@ -56,7 +58,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
mod grpc;
|
||||
mod script;
|
||||
@@ -233,6 +235,8 @@ impl Instance {
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
}
|
||||
|
||||
self.flush_tables().await?;
|
||||
|
||||
self.sql_handler
|
||||
.close()
|
||||
.await
|
||||
@@ -240,6 +244,42 @@ impl Instance {
|
||||
.context(ShutdownInstanceSnafu)
|
||||
}
|
||||
|
||||
pub async fn flush_tables(&self) -> Result<()> {
|
||||
info!("going to flush all schemas");
|
||||
let schema_list = self
|
||||
.catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?
|
||||
.expect("Default schema not found")
|
||||
.schema_names()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
let flush_requests = schema_list
|
||||
.into_iter()
|
||||
.map(|schema_name| {
|
||||
SqlRequest::FlushTable(FlushTableRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name,
|
||||
table_name: None,
|
||||
region_number: None,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let flush_result = futures::future::try_join_all(
|
||||
flush_requests
|
||||
.into_iter()
|
||||
.map(|request| self.sql_handler.execute(request, QueryContext::arc())),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu);
|
||||
info!("flush success: {}", flush_result.is_ok());
|
||||
flush_result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sql_handler(&self) -> &SqlHandler {
|
||||
&self.sql_handler
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ impl Instance {
|
||||
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
|
||||
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
|
||||
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
|
||||
DdlExpr::FlushTable(_) => todo!(),
|
||||
DdlExpr::FlushTable(expr) => self.handle_flush_table(expr).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr};
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, FlushTableExpr};
|
||||
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
|
||||
use common_query::Output;
|
||||
use common_telemetry::info;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use table::requests::DropTableRequest;
|
||||
use table::requests::{DropTableRequest, FlushTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu,
|
||||
@@ -82,6 +82,24 @@ impl Instance {
|
||||
.execute(SqlRequest::DropTable(req), QueryContext::arc())
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
let table_name = if expr.table_name.trim().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(expr.table_name)
|
||||
};
|
||||
|
||||
let req = FlushTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name,
|
||||
region_number: expr.region_id,
|
||||
};
|
||||
self.sql_handler()
|
||||
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -136,7 +154,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
fn test_create_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "a".to_string(),
|
||||
|
||||
@@ -39,6 +39,7 @@ mod copy_table_from;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
mod flush_table;
|
||||
pub(crate) mod insert;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -48,6 +49,7 @@ pub enum SqlRequest {
|
||||
CreateDatabase(CreateDatabaseRequest),
|
||||
Alter(AlterTableRequest),
|
||||
DropTable(DropTableRequest),
|
||||
FlushTable(FlushTableRequest),
|
||||
ShowDatabases(ShowDatabases),
|
||||
ShowTables(ShowTables),
|
||||
DescribeTable(DescribeTable),
|
||||
@@ -116,6 +118,7 @@ impl SqlHandler {
|
||||
})?;
|
||||
describe_table(table).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::FlushTable(req) => self.flush_table(req).await,
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
error!(e; "{query_ctx}");
|
||||
|
||||
83
src/datanode/src/sql/flush_table.rs
Normal file
83
src/datanode/src/sql/flush_table.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_query::Output;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::FlushTableRequest;
|
||||
|
||||
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
|
||||
let all_table_names = schema.table_names().context(CatalogSnafu)?;
|
||||
futures::future::join_all(all_table_names.iter().map(|table| {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
}))
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
}
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
async fn flush_table_inner(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
region: Option<u32>,
|
||||
) -> Result<()> {
|
||||
if schema == DEFAULT_SCHEMA_NAME && table == "numbers" {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_ref = TableReference {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
};
|
||||
|
||||
let full_table_name = table_ref.to_string();
|
||||
let table = self.get_table(&table_ref)?;
|
||||
table.flush(region).await.context(error::FlushTableSnafu {
|
||||
table_name: full_table_name,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -19,8 +19,8 @@ use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::{
|
||||
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, InsertRequest,
|
||||
TableId,
|
||||
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, FlushTableExpr,
|
||||
InsertRequest, TableId,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use catalog::helper::{SchemaKey, SchemaValue};
|
||||
@@ -39,7 +39,7 @@ use meta_client::client::MetaClient;
|
||||
use meta_client::rpc::router::DeleteRequest as MetaDeleteRequest;
|
||||
use meta_client::rpc::{
|
||||
CompareAndPutRequest, CreateRequest as MetaCreateRequest, Partition as MetaPartition,
|
||||
RouteResponse, TableName,
|
||||
RouteRequest, RouteResponse, TableName,
|
||||
};
|
||||
use partition::partition::{PartitionBound, PartitionDef};
|
||||
use query::error::QueryExecutionSnafu;
|
||||
@@ -259,6 +259,61 @@ impl DistInstance {
|
||||
Ok(Output::AffectedRows(1))
|
||||
}
|
||||
|
||||
async fn flush_table(&self, table_name: TableName, region_id: Option<u32>) -> Result<Output> {
|
||||
let _ = self
|
||||
.catalog_manager
|
||||
.table(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?;
|
||||
|
||||
let route_response = self
|
||||
.meta_client
|
||||
.route(RouteRequest {
|
||||
table_names: vec![table_name.clone()],
|
||||
})
|
||||
.await
|
||||
.context(RequestMetaSnafu)?;
|
||||
|
||||
let expr = FlushTableExpr {
|
||||
catalog_name: table_name.catalog_name.clone(),
|
||||
schema_name: table_name.schema_name.clone(),
|
||||
table_name: table_name.table_name.clone(),
|
||||
region_id,
|
||||
};
|
||||
|
||||
for table_route in &route_response.table_routes {
|
||||
let should_send_rpc = table_route.region_routes.iter().any(|route| {
|
||||
if let Some(region_id) = region_id {
|
||||
region_id == route.region.id as u32
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
if !should_send_rpc {
|
||||
continue;
|
||||
}
|
||||
for datanode in table_route.find_leaders() {
|
||||
debug!("Flushing table {table_name} on Datanode {datanode:?}");
|
||||
|
||||
let client = self.datanode_clients.get_client(&datanode).await;
|
||||
let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
|
||||
client
|
||||
.flush_table(expr.clone())
|
||||
.await
|
||||
.context(RequestDatanodeSnafu)?;
|
||||
}
|
||||
}
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
async fn handle_statement(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
|
||||
@@ -57,7 +57,11 @@ impl GrpcQueryHandler for DistInstance {
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.drop_table(table_name).await
|
||||
}
|
||||
DdlExpr::FlushTable(_) => todo!(),
|
||||
DdlExpr::FlushTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.flush_table(table_name, expr.region_id).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,14 +91,15 @@ mod test {
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::{
|
||||
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
|
||||
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, InsertRequest,
|
||||
QueryRequest,
|
||||
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||
InsertRequest, QueryRequest,
|
||||
};
|
||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
use tests::{has_parquet_file, test_region_dir};
|
||||
|
||||
use super::*;
|
||||
use crate::table::DistTable;
|
||||
@@ -352,6 +353,108 @@ CREATE TABLE {table_name} (
|
||||
test_insert_and_query_on_auto_created_table(instance).await
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_flush_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
|
||||
let data_tmp_dirs = instance.data_tmp_dirs();
|
||||
let frontend = instance.frontend.as_ref();
|
||||
|
||||
let table_name = "my_dist_table";
|
||||
let sql = format!(
|
||||
r"
|
||||
CREATE TABLE {table_name} (
|
||||
a INT,
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) PARTITION BY RANGE COLUMNS(a) (
|
||||
PARTITION r0 VALUES LESS THAN (10),
|
||||
PARTITION r1 VALUES LESS THAN (20),
|
||||
PARTITION r2 VALUES LESS THAN (50),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)"
|
||||
);
|
||||
create_table(frontend, sql).await;
|
||||
|
||||
test_insert_and_query_on_existing_table(frontend, table_name).await;
|
||||
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
// Wait for previous task finished
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
|
||||
let table_id = 1024;
|
||||
|
||||
let table = instance
|
||||
.frontend
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||
|
||||
let TableGlobalValue { regions_id_map, .. } = table
|
||||
.table_global_value(&TableGlobalKey {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_to_dn_map = regions_id_map
|
||||
.iter()
|
||||
.map(|(k, v)| (v[0], *k))
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
// data_tmp_dirs -> dn: 1..4
|
||||
let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
|
||||
let region_dir = test_region_dir(
|
||||
data_tmp_dir.path().to_str().unwrap(),
|
||||
"greptime",
|
||||
"public",
|
||||
table_id,
|
||||
*region,
|
||||
);
|
||||
has_parquet_file(®ion_dir);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_flush_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
|
||||
let instance = &standalone.instance;
|
||||
let data_tmp_dir = standalone.data_tmp_dir();
|
||||
|
||||
let table_name = "my_table";
|
||||
let sql = format!("CREATE TABLE {table_name} (a INT, ts TIMESTAMP, TIME INDEX (ts))");
|
||||
|
||||
create_table(instance, sql).await;
|
||||
|
||||
test_insert_and_query_on_existing_table(instance, table_name).await;
|
||||
|
||||
let table_id = 1024;
|
||||
let region_id = 0;
|
||||
let region_dir = test_region_dir(
|
||||
data_tmp_dir.path().to_str().unwrap(),
|
||||
"greptime",
|
||||
"public",
|
||||
table_id,
|
||||
region_id,
|
||||
);
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||
// Wait for previous task finished
|
||||
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
async fn create_table(frontend: &Instance, sql: String) {
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql)),
|
||||
@@ -360,6 +463,26 @@ CREATE TABLE {table_name} (
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
|
||||
async fn flush_table(
|
||||
frontend: &Instance,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
region_id: Option<u32>,
|
||||
) {
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(FlushTableExpr {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
region_id,
|
||||
})),
|
||||
});
|
||||
|
||||
let output = query(frontend, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
|
||||
async fn test_insert_and_query_on_existing_table(instance: &Instance, table_name: &str) {
|
||||
let insert = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
|
||||
@@ -152,6 +152,7 @@ impl Services {
|
||||
|
||||
let mut http_server = HttpServer::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
|
||||
http_options.clone(),
|
||||
);
|
||||
if let Some(user_provider) = user_provider.clone() {
|
||||
|
||||
@@ -140,8 +140,11 @@ impl Table for DistTable {
|
||||
Ok(Arc::new(dist_scan))
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Inexact)
|
||||
fn supports_filters_pushdown(
|
||||
&self,
|
||||
filters: &[&Expr],
|
||||
) -> table::Result<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Inexact; filters.len()])
|
||||
}
|
||||
|
||||
async fn alter(&self, context: AlterContext, request: &AlterTableRequest) -> table::Result<()> {
|
||||
|
||||
@@ -74,8 +74,7 @@ impl DistTable {
|
||||
|
||||
let mut success = 0;
|
||||
for join in joins {
|
||||
let object_result = join.await.context(error::JoinTaskSnafu)??;
|
||||
let Output::AffectedRows(rows) = object_result else { unreachable!() };
|
||||
let rows = join.await.context(error::JoinTaskSnafu)?? as usize;
|
||||
success += rows;
|
||||
}
|
||||
Ok(Output::AffectedRows(success))
|
||||
|
||||
@@ -47,7 +47,7 @@ impl DatanodeInstance {
|
||||
Self { table, db }
|
||||
}
|
||||
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<Output> {
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<u32> {
|
||||
self.db.insert(request).await
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ use partition::route::TableRoutes;
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||
use servers::Mode;
|
||||
use table::engine::{region_name, table_dir};
|
||||
use tonic::transport::Server;
|
||||
use tower::service_fn;
|
||||
|
||||
@@ -56,11 +57,23 @@ pub(crate) struct MockDistributedInstance {
|
||||
_guards: Vec<TestGuard>,
|
||||
}
|
||||
|
||||
impl MockDistributedInstance {
|
||||
pub fn data_tmp_dirs(&self) -> Vec<&TempDir> {
|
||||
self._guards.iter().map(|g| &g._data_tmp_dir).collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct MockStandaloneInstance {
|
||||
pub(crate) instance: Arc<Instance>,
|
||||
_guard: TestGuard,
|
||||
}
|
||||
|
||||
impl MockStandaloneInstance {
|
||||
pub fn data_tmp_dir(&self) -> &TempDir {
|
||||
&self._guard._data_tmp_dir
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandaloneInstance {
|
||||
let (opts, guard) = create_tmp_dir_and_datanode_opts(test_name);
|
||||
let datanode_instance = DatanodeInstance::new(&opts).await.unwrap();
|
||||
@@ -112,15 +125,15 @@ pub(crate) async fn create_datanode_client(
|
||||
|
||||
// create a mock datanode grpc service, see example here:
|
||||
// https://github.com/hyperium/tonic/blob/master/examples/src/mock/mock.rs
|
||||
let datanode_service = GrpcServer::new(
|
||||
let grpc_server = GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
|
||||
None,
|
||||
runtime,
|
||||
)
|
||||
.create_service();
|
||||
);
|
||||
tokio::spawn(async move {
|
||||
Server::builder()
|
||||
.add_service(datanode_service)
|
||||
.add_service(grpc_server.create_flight_service())
|
||||
.add_service(grpc_server.create_database_service())
|
||||
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
|
||||
.await
|
||||
});
|
||||
@@ -269,3 +282,29 @@ pub(crate) async fn create_distributed_instance(test_name: &str) -> MockDistribu
|
||||
_guards: test_guards,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn test_region_dir(
|
||||
dir: &str,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_id: u32,
|
||||
region_id: u32,
|
||||
) -> String {
|
||||
let table_dir = table_dir(catalog_name, schema_name, table_id);
|
||||
let region_name = region_name(table_id, region_id);
|
||||
|
||||
format!("{}/{}/{}", dir, table_dir, region_name)
|
||||
}
|
||||
|
||||
pub fn has_parquet_file(sst_dir: &str) -> bool {
|
||||
for entry in std::fs::read_dir(sst_dir).unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
let path = entry.path();
|
||||
if !path.is_dir() {
|
||||
assert_eq!("parquet", path.extension().unwrap());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
@@ -39,18 +39,45 @@ use crate::service::store::kv::ResettableKvStoreRef;
|
||||
use crate::service::store::memory::MemStore;
|
||||
use crate::{error, Result};
|
||||
|
||||
// Bootstrap the rpc server to serve incoming request
|
||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||
let meta_srv = make_meta_srv(opts.clone()).await?;
|
||||
bootstrap_meta_srv_with_router(opts, router(meta_srv)).await
|
||||
#[derive(Clone)]
|
||||
pub struct MetaSrvInstance {
|
||||
meta_srv: MetaSrv,
|
||||
|
||||
opts: MetaSrvOptions,
|
||||
}
|
||||
|
||||
pub async fn bootstrap_meta_srv_with_router(opts: MetaSrvOptions, router: Router) -> Result<()> {
|
||||
let listener = TcpListener::bind(&opts.bind_addr)
|
||||
impl MetaSrvInstance {
|
||||
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
|
||||
let meta_srv = build_meta_srv(&opts).await?;
|
||||
|
||||
Ok(MetaSrvInstance { meta_srv, opts })
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
self.meta_srv.start().await;
|
||||
bootstrap_meta_srv_with_router(&self.opts.bind_addr, router(self.meta_srv.clone())).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
// TODO: shutdown the router
|
||||
self.meta_srv.shutdown();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Bootstrap the rpc server to serve incoming request
|
||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||
let meta_srv = make_meta_srv(&opts).await?;
|
||||
bootstrap_meta_srv_with_router(&opts.bind_addr, router(meta_srv)).await
|
||||
}
|
||||
|
||||
pub async fn bootstrap_meta_srv_with_router(bind_addr: &str, router: Router) -> Result<()> {
|
||||
let listener = TcpListener::bind(bind_addr)
|
||||
.await
|
||||
.context(error::TcpBindSnafu {
|
||||
addr: &opts.bind_addr,
|
||||
})?;
|
||||
.context(error::TcpBindSnafu { addr: bind_addr })?;
|
||||
let listener = TcpListenerStream::new(listener);
|
||||
|
||||
router
|
||||
@@ -72,7 +99,7 @@ pub fn router(meta_srv: MetaSrv) -> Router {
|
||||
.add_service(admin::make_admin_service(meta_srv))
|
||||
}
|
||||
|
||||
pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
pub async fn build_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
|
||||
let (kv_store, election, lock) = if opts.use_memory_store {
|
||||
(Arc::new(MemStore::new()) as _, None, None)
|
||||
} else {
|
||||
@@ -107,7 +134,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
};
|
||||
|
||||
let meta_srv = MetaSrvBuilder::new()
|
||||
.options(opts)
|
||||
.options(opts.clone())
|
||||
.kv_store(kv_store)
|
||||
.in_memory(in_memory)
|
||||
.selector(selector)
|
||||
@@ -117,6 +144,12 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
.build()
|
||||
.await;
|
||||
|
||||
Ok(meta_srv)
|
||||
}
|
||||
|
||||
pub async fn make_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
|
||||
let meta_srv = build_meta_srv(opts).await?;
|
||||
|
||||
meta_srv.start().await;
|
||||
|
||||
Ok(meta_srv)
|
||||
|
||||
@@ -31,13 +31,14 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::{
|
||||
ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder, ColumnId,
|
||||
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, Region,
|
||||
RegionDescriptorBuilder, RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
||||
RegionDescriptorBuilder, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
||||
};
|
||||
use table::engine::{
|
||||
region_id, region_name, table_dir, EngineContext, TableEngine, TableEngineProcedure,
|
||||
TableReference,
|
||||
};
|
||||
use table::engine::{EngineContext, TableEngine, TableEngineProcedure, TableReference};
|
||||
use table::error::TableOperationSnafu;
|
||||
use table::metadata::{
|
||||
TableId, TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion,
|
||||
};
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion};
|
||||
use table::requests::{
|
||||
AlterKind, AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
};
|
||||
@@ -59,22 +60,6 @@ pub const MITO_ENGINE: &str = "mito";
|
||||
pub const INIT_COLUMN_ID: ColumnId = 0;
|
||||
const INIT_TABLE_VERSION: TableVersion = 0;
|
||||
|
||||
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
|
||||
#[inline]
|
||||
fn region_name(table_id: TableId, n: u32) -> String {
|
||||
format!("{table_id}_{n:010}")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn region_id(table_id: TableId, n: u32) -> RegionId {
|
||||
(u64::from(table_id) << 32) | u64::from(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> String {
|
||||
format!("{catalog_name}/{schema_name}/{table_id}/")
|
||||
}
|
||||
|
||||
/// [TableEngine] implementation.
|
||||
///
|
||||
/// About mito <https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo>.
|
||||
|
||||
@@ -25,6 +25,7 @@ use store_api::storage::{
|
||||
ColumnId, CreateOptions, EngineContext, OpenOptions, RegionDescriptorBuilder, RegionNumber,
|
||||
StorageEngine,
|
||||
};
|
||||
use table::engine::{region_id, table_dir};
|
||||
use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType};
|
||||
use table::requests::CreateTableRequest;
|
||||
|
||||
@@ -146,7 +147,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
/// Creates regions for the table.
|
||||
async fn on_create_regions(&mut self) -> Result<Status> {
|
||||
let engine_ctx = EngineContext::default();
|
||||
let table_dir = engine::table_dir(
|
||||
let table_dir = table_dir(
|
||||
&self.data.request.catalog_name,
|
||||
&self.data.request.schema_name,
|
||||
self.data.request.id,
|
||||
@@ -203,7 +204,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
}
|
||||
|
||||
// We need to create that region.
|
||||
let region_id = engine::region_id(self.data.request.id, *number);
|
||||
let region_id = region_id(self.data.request.id, *number);
|
||||
let region_desc = RegionDescriptorBuilder::default()
|
||||
.id(region_id)
|
||||
.name(region_name.clone())
|
||||
@@ -234,7 +235,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
|
||||
/// Writes metadata to the table manifest.
|
||||
async fn on_write_table_manifest(&mut self) -> Result<Status> {
|
||||
let table_dir = engine::table_dir(
|
||||
let table_dir = table_dir(
|
||||
&self.data.request.catalog_name,
|
||||
&self.data.request.schema_name,
|
||||
self.data.request.id,
|
||||
|
||||
@@ -31,14 +31,28 @@ use storage::region::RegionImpl;
|
||||
use storage::EngineImpl;
|
||||
use store_api::manifest::Manifest;
|
||||
use store_api::storage::ReadContext;
|
||||
use table::requests::{AddColumnRequest, AlterKind, DeleteRequest, TableOptions};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, DeleteRequest, FlushTableRequest, TableOptions,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
use crate::table::test_util;
|
||||
use crate::table::test_util::{
|
||||
new_insert_request, schema_for_test, TestEngineComponents, TABLE_NAME,
|
||||
self, new_insert_request, schema_for_test, setup_table, TestEngineComponents, TABLE_NAME,
|
||||
};
|
||||
|
||||
pub fn has_parquet_file(sst_dir: &str) -> bool {
|
||||
for entry in std::fs::read_dir(sst_dir).unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
let path = entry.path();
|
||||
if !path.is_dir() {
|
||||
assert_eq!("parquet", path.extension().unwrap());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
async fn setup_table_with_column_default_constraint() -> (TempDir, String, TableRef) {
|
||||
let table_name = "test_default_constraint";
|
||||
let column_schemas = vec![
|
||||
@@ -752,3 +766,76 @@ async fn test_table_delete_rows() {
|
||||
+-------+-----+--------+-------------------------+"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_table_all_regions() {
|
||||
let TestEngineComponents {
|
||||
table_ref: table,
|
||||
dir,
|
||||
..
|
||||
} = test_util::setup_test_engine_and_table().await;
|
||||
|
||||
setup_table(table.clone()).await;
|
||||
|
||||
let table_id = 1u32;
|
||||
let region_name = region_name(table_id, 0);
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
|
||||
|
||||
let region_dir = format!(
|
||||
"{}/{}/{}",
|
||||
dir.path().to_str().unwrap(),
|
||||
table_dir,
|
||||
region_name
|
||||
);
|
||||
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(None).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(None).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_table_with_region_id() {
|
||||
let TestEngineComponents {
|
||||
table_ref: table,
|
||||
dir,
|
||||
..
|
||||
} = test_util::setup_test_engine_and_table().await;
|
||||
|
||||
setup_table(table.clone()).await;
|
||||
|
||||
let table_id = 1u32;
|
||||
let region_name = region_name(table_id, 0);
|
||||
|
||||
let table_info = table.table_info();
|
||||
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
|
||||
|
||||
let region_dir = format!(
|
||||
"{}/{}/{}",
|
||||
dir.path().to_str().unwrap(),
|
||||
table_dir,
|
||||
region_name
|
||||
);
|
||||
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
let req = FlushTableRequest {
|
||||
region_number: Some(0),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
@@ -208,8 +208,8 @@ impl<R: Region> Table for MitoTable<R> {
|
||||
Ok(Arc::new(SimpleTableScan::new(stream)))
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::error::Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Inexact)
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> TableResult<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Inexact; filters.len()])
|
||||
}
|
||||
|
||||
/// Alter table changes the schemas of the table.
|
||||
@@ -323,6 +323,25 @@ impl<R: Region> Table for MitoTable<R> {
|
||||
Ok(rows_deleted)
|
||||
}
|
||||
|
||||
async fn flush(&self, region_number: Option<RegionNumber>) -> TableResult<()> {
|
||||
if let Some(region_number) = region_number {
|
||||
if let Some(region) = self.regions.get(®ion_number) {
|
||||
region
|
||||
.flush()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
}
|
||||
} else {
|
||||
futures::future::try_join_all(self.regions.values().map(|region| region.flush()))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close(&self) -> TableResult<()> {
|
||||
futures::future::try_join_all(self.regions.values().map(|region| region.close()))
|
||||
.await
|
||||
|
||||
@@ -20,7 +20,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector, VectorRef};
|
||||
use log_store::NoopLogStore;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
@@ -30,7 +30,7 @@ use storage::EngineImpl;
|
||||
use table::engine::{EngineContext, TableEngine};
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
|
||||
use table::requests::{CreateTableRequest, InsertRequest, TableOptions};
|
||||
use table::TableRef;
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::config::EngineConfig;
|
||||
use crate::engine::{MitoEngine, MITO_ENGINE};
|
||||
@@ -178,3 +178,19 @@ pub async fn setup_mock_engine_and_table(
|
||||
|
||||
(mock_engine, table_engine, table, object_store, dir)
|
||||
}
|
||||
|
||||
pub async fn setup_table(table: Arc<dyn Table>) {
|
||||
let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
|
||||
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2", "host3", "host4"]));
|
||||
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
|
||||
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
|
||||
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2, 2, 1]));
|
||||
|
||||
columns_values.insert("host".to_string(), hosts.clone());
|
||||
columns_values.insert("cpu".to_string(), cpus.clone());
|
||||
columns_values.insert("memory".to_string(), memories.clone());
|
||||
columns_values.insert("ts".to_string(), tss.clone());
|
||||
|
||||
let insert_req = new_insert_request("demo".to_string(), columns_values);
|
||||
assert_eq!(4, table.insert(insert_req).await.unwrap());
|
||||
}
|
||||
|
||||
@@ -200,6 +200,10 @@ impl Region for MockRegion {
|
||||
fn disk_usage_bytes(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl MockRegionInner {
|
||||
|
||||
@@ -23,7 +23,7 @@ use datafusion::arrow::datatypes::{DataType, TimeUnit};
|
||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -37,7 +37,7 @@ use futures::Stream;
|
||||
|
||||
use crate::extension_plan::Millisecond;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct EmptyMetric {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
@@ -86,9 +86,9 @@ impl EmptyMetric {
|
||||
}
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for EmptyMetric {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for EmptyMetric {
|
||||
fn name(&self) -> &str {
|
||||
"EmptyMetric"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -111,12 +111,8 @@ impl UserDefinedLogicalNode for EmptyMetric {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[datafusion::prelude::Expr],
|
||||
_inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
Arc::new(self.clone())
|
||||
fn from_template(&self, _expr: &[Expr], _inputs: &[LogicalPlan]) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
||||
use datafusion::common::DFSchemaRef;
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -42,7 +42,7 @@ use crate::extension_plan::Millisecond;
|
||||
/// This plan will try to align the input time series, for every timestamp between
|
||||
/// `start` and `end` with step `interval`. Find in the `lookback` range if data
|
||||
/// is missing at the given timestamp.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct InstantManipulate {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
@@ -52,9 +52,9 @@ pub struct InstantManipulate {
|
||||
input: LogicalPlan,
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for InstantManipulate {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for InstantManipulate {
|
||||
fn name(&self) -> &str {
|
||||
"InstantManipulate"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -77,21 +77,17 @@ impl UserDefinedLogicalNode for InstantManipulate {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
lookback_delta: self.lookback_delta,
|
||||
interval: self.interval,
|
||||
time_index_column: self.time_index_column.clone(),
|
||||
input: inputs[0].clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ use datafusion::arrow::compute;
|
||||
use datafusion::common::{DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -43,7 +43,7 @@ use crate::extension_plan::Millisecond;
|
||||
/// - bias sample's timestamp by offset
|
||||
/// - sort the record batch based on timestamp column
|
||||
/// - remove NaN values
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct SeriesNormalize {
|
||||
offset: Millisecond,
|
||||
time_index_column_name: String,
|
||||
@@ -51,9 +51,9 @@ pub struct SeriesNormalize {
|
||||
input: LogicalPlan,
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for SeriesNormalize {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for SeriesNormalize {
|
||||
fn name(&self) -> &str {
|
||||
"SeriesNormalize"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -76,18 +76,14 @@ impl UserDefinedLogicalNode for SeriesNormalize {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[datafusion::logical_expr::Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
offset: self.offset,
|
||||
time_index_column_name: self.time_index_column_name.clone(),
|
||||
input: inputs[0].clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef};
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -42,7 +42,7 @@ use crate::range_array::RangeArray;
|
||||
///
|
||||
/// This plan will "fold" time index and value columns into [RangeArray]s, and truncate
|
||||
/// other columns to the same length with the "folded" [RangeArray] column.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct RangeManipulate {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
@@ -137,9 +137,9 @@ impl RangeManipulate {
|
||||
}
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for RangeManipulate {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||
fn name(&self) -> &str {
|
||||
"RangeManipulate"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -162,14 +162,10 @@ impl UserDefinedLogicalNode for RangeManipulate {
|
||||
)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
interval: self.interval,
|
||||
@@ -178,7 +174,7 @@ impl UserDefinedLogicalNode for RangeManipulate {
|
||||
value_columns: self.value_columns.clone(),
|
||||
input: inputs[0].clone(),
|
||||
output_schema: self.output_schema.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ use datafusion::arrow::record_batch::RecordBatch;
|
||||
use datafusion::common::DFSchemaRef;
|
||||
use datafusion::error::Result as DataFusionResult;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
|
||||
use datafusion::physical_expr::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
@@ -33,15 +33,15 @@ use datafusion::physical_plan::{
|
||||
use datatypes::arrow::compute;
|
||||
use futures::{ready, Stream, StreamExt};
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct SeriesDivide {
|
||||
tag_columns: Vec<String>,
|
||||
input: LogicalPlan,
|
||||
}
|
||||
|
||||
impl UserDefinedLogicalNode for SeriesDivide {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as _
|
||||
impl UserDefinedLogicalNodeCore for SeriesDivide {
|
||||
fn name(&self) -> &str {
|
||||
"SeriesDivide"
|
||||
}
|
||||
|
||||
fn inputs(&self) -> Vec<&LogicalPlan> {
|
||||
@@ -60,17 +60,13 @@ impl UserDefinedLogicalNode for SeriesDivide {
|
||||
write!(f, "PromSeriesDivide: tags={:?}", self.tag_columns)
|
||||
}
|
||||
|
||||
fn from_template(
|
||||
&self,
|
||||
_exprs: &[Expr],
|
||||
inputs: &[LogicalPlan],
|
||||
) -> Arc<dyn UserDefinedLogicalNode> {
|
||||
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
|
||||
assert!(!inputs.is_empty());
|
||||
|
||||
Arc::new(Self {
|
||||
Self {
|
||||
tag_columns: self.tag_columns.clone(),
|
||||
input: inputs[0].clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ mod test {
|
||||
distinct: false, \
|
||||
top: None, \
|
||||
projection: \
|
||||
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None })], \
|
||||
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None, opt_replace: None })], \
|
||||
into: None, \
|
||||
from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: \"t1\", quote_style: None }]\
|
||||
), \
|
||||
|
||||
@@ -70,8 +70,11 @@ impl Table for MemTableWrapper {
|
||||
self.inner.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Exact)
|
||||
fn supports_filters_pushdown(
|
||||
&self,
|
||||
filters: &[&Expr],
|
||||
) -> table::Result<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Exact; filters.len()])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -263,8 +263,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: common_mem_prof::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid prepare statement: {}", err_msg))]
|
||||
InvalidPrepareStatement { err_msg: String },
|
||||
|
||||
#[snafu(display("Invalid flush argument: {}", err_msg))]
|
||||
InvalidFlushArgument { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -327,6 +331,7 @@ impl ErrorExt for Error {
|
||||
DatabaseNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
#[cfg(feature = "mem-prof")]
|
||||
DumpProfileData { source, .. } => source.status_code(),
|
||||
InvalidFlushArgument { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod database;
|
||||
pub mod flight;
|
||||
pub mod handler;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_server::{GreptimeDatabase, GreptimeDatabaseServer};
|
||||
use arrow_flight::flight_service_server::{FlightService, FlightServiceServer};
|
||||
use async_trait::async_trait;
|
||||
use common_runtime::Runtime;
|
||||
@@ -27,18 +30,21 @@ use tokio::net::TcpListener;
|
||||
use tokio::sync::oneshot::{self, Sender};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_stream::wrappers::TcpListenerStream;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::auth::UserProviderRef;
|
||||
use crate::error::{AlreadyStartedSnafu, Result, StartGrpcSnafu, TcpBindSnafu};
|
||||
use crate::grpc::database::DatabaseService;
|
||||
use crate::grpc::flight::FlightHandler;
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::server::Server;
|
||||
|
||||
type TonicResult<T> = std::result::Result<T, Status>;
|
||||
|
||||
pub struct GrpcServer {
|
||||
query_handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
shutdown_tx: Mutex<Option<Sender<()>>>,
|
||||
runtime: Arc<Runtime>,
|
||||
request_handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl GrpcServer {
|
||||
@@ -47,21 +53,23 @@ impl GrpcServer {
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
let request_handler = Arc::new(GreptimeRequestHandler::new(
|
||||
query_handler,
|
||||
user_provider,
|
||||
shutdown_tx: Mutex::new(None),
|
||||
runtime,
|
||||
));
|
||||
Self {
|
||||
shutdown_tx: Mutex::new(None),
|
||||
request_handler,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
let service = FlightHandler::new(
|
||||
self.query_handler.clone(),
|
||||
self.user_provider.clone(),
|
||||
self.runtime.clone(),
|
||||
);
|
||||
FlightServiceServer::new(service)
|
||||
pub fn create_flight_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
FlightServiceServer::new(FlightHandler::new(self.request_handler.clone()))
|
||||
}
|
||||
|
||||
pub fn create_database_service(&self) -> GreptimeDatabaseServer<impl GreptimeDatabase> {
|
||||
GreptimeDatabaseServer::new(DatabaseService::new(self.request_handler.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +111,8 @@ impl Server for GrpcServer {
|
||||
|
||||
// Would block to serve requests.
|
||||
tonic::transport::Server::builder()
|
||||
.add_service(self.create_service())
|
||||
.add_service(self.create_flight_service())
|
||||
.add_service(self.create_database_service())
|
||||
.serve_with_incoming_shutdown(TcpListenerStream::new(listener), rx.map(drop))
|
||||
.await
|
||||
.context(StartGrpcSnafu)?;
|
||||
|
||||
57
src/servers/src/grpc/database.rs
Normal file
57
src/servers/src/grpc/database.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_server::GreptimeDatabase;
|
||||
use api::v1::{greptime_response, AffectedRows, GreptimeRequest, GreptimeResponse};
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::grpc::TonicResult;
|
||||
|
||||
pub(crate) struct DatabaseService {
|
||||
handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl DatabaseService {
|
||||
pub(crate) fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GreptimeDatabase for DatabaseService {
|
||||
async fn handle(
|
||||
&self,
|
||||
request: Request<GreptimeRequest>,
|
||||
) -> TonicResult<Response<GreptimeResponse>> {
|
||||
let request = request.into_inner();
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
let response = match output {
|
||||
Output::AffectedRows(rows) => GreptimeResponse {
|
||||
header: None,
|
||||
response: Some(greptime_response::Response::AffectedRows(AffectedRows {
|
||||
value: rows as _,
|
||||
})),
|
||||
},
|
||||
Output::Stream(_) | Output::RecordBatches(_) => {
|
||||
return Err(Status::unimplemented("GreptimeDatabase::handle for query"));
|
||||
}
|
||||
};
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,7 @@ mod stream;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{Basic, GreptimeRequest, RequestHeader};
|
||||
use api::v1::GreptimeRequest;
|
||||
use arrow_flight::flight_service_server::FlightService;
|
||||
use arrow_flight::{
|
||||
Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo,
|
||||
@@ -27,40 +26,25 @@ use arrow_flight::{
|
||||
use async_trait::async_trait;
|
||||
use common_grpc::flight::{FlightEncoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use futures::Stream;
|
||||
use prost::Message;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
|
||||
use crate::auth::{Identity, UserProviderRef};
|
||||
use crate::error;
|
||||
use crate::error::Error::Auth;
|
||||
use crate::error::{NotFoundAuthHeaderSnafu, UnsupportedAuthSchemeSnafu};
|
||||
use crate::grpc::flight::stream::FlightRecordBatchStream;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::grpc::TonicResult;
|
||||
|
||||
type TonicResult<T> = Result<T, Status>;
|
||||
type TonicStream<T> = Pin<Box<dyn Stream<Item = TonicResult<T>> + Send + Sync + 'static>>;
|
||||
|
||||
pub struct FlightHandler {
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl FlightHandler {
|
||||
pub fn new(
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handler,
|
||||
user_provider,
|
||||
runtime,
|
||||
}
|
||||
pub fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,40 +89,8 @@ impl FlightService for FlightHandler {
|
||||
let request =
|
||||
GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
|
||||
|
||||
let query = request.request.context(error::InvalidQuerySnafu {
|
||||
reason: "Expecting non-empty GreptimeRequest.",
|
||||
})?;
|
||||
let query_ctx = create_query_context(request.header.as_ref());
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
|
||||
auth(
|
||||
self.user_provider.as_ref(),
|
||||
request.header.as_ref(),
|
||||
&query_ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let handler = self.handler.clone();
|
||||
|
||||
// Executes requests in another runtime to
|
||||
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
|
||||
// - Refer to our blog for the rational behind it:
|
||||
// https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
|
||||
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
|
||||
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
|
||||
// 2. avoid the handler blocks the gRPC runtime incidentally.
|
||||
let handle = self
|
||||
.runtime
|
||||
.spawn(async move { handler.do_query(query, query_ctx).await });
|
||||
|
||||
let output = handle.await.map_err(|e| {
|
||||
if e.is_cancelled() {
|
||||
Status::cancelled(e.to_string())
|
||||
} else if e.is_panic() {
|
||||
Status::internal(format!("{:?}", e.into_panic()))
|
||||
} else {
|
||||
Status::unknown(e.to_string())
|
||||
}
|
||||
})??;
|
||||
let stream = to_flight_data_stream(output);
|
||||
Ok(Response::new(stream))
|
||||
}
|
||||
@@ -195,56 +147,3 @@ fn to_flight_data_stream(output: Output) -> TonicStream<FlightData> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
|
||||
let ctx = QueryContext::arc();
|
||||
if let Some(header) = header {
|
||||
if !header.catalog.is_empty() {
|
||||
ctx.set_current_catalog(&header.catalog);
|
||||
}
|
||||
|
||||
if !header.schema.is_empty() {
|
||||
ctx.set_current_schema(&header.schema);
|
||||
}
|
||||
};
|
||||
ctx
|
||||
}
|
||||
|
||||
async fn auth(
|
||||
user_provider: Option<&UserProviderRef>,
|
||||
request_header: Option<&RequestHeader>,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> TonicResult<()> {
|
||||
let Some(user_provider) = user_provider else { return Ok(()) };
|
||||
|
||||
let user_info = match request_header
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
.clone()
|
||||
.authorization
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
.auth_scheme
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
{
|
||||
AuthScheme::Basic(Basic { username, password }) => user_provider
|
||||
.authenticate(
|
||||
Identity::UserId(&username, None),
|
||||
crate::auth::Password::PlainText(&password),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Auth { source: e }),
|
||||
AuthScheme::Token(_) => UnsupportedAuthSchemeSnafu {
|
||||
name: "Token AuthScheme",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
.map_err(|e| Status::unauthenticated(e.to_string()))?;
|
||||
|
||||
user_provider
|
||||
.authorize(
|
||||
&query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
&user_info,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::permission_denied(e.to_string()))
|
||||
}
|
||||
|
||||
137
src/servers/src/grpc/handler.rs
Normal file
137
src/servers/src/grpc/handler.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{Basic, GreptimeRequest, RequestHeader};
|
||||
use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::OptionExt;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::auth::{Identity, Password, UserProviderRef};
|
||||
use crate::error::Error::{Auth, UnsupportedAuthScheme};
|
||||
use crate::error::{InvalidQuerySnafu, NotFoundAuthHeaderSnafu};
|
||||
use crate::grpc::TonicResult;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
|
||||
pub struct GreptimeRequestHandler {
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
}
|
||||
|
||||
impl GreptimeRequestHandler {
|
||||
pub fn new(
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handler,
|
||||
user_provider,
|
||||
runtime,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_request(&self, request: GreptimeRequest) -> TonicResult<Output> {
|
||||
let query = request.request.context(InvalidQuerySnafu {
|
||||
reason: "Expecting non-empty GreptimeRequest.",
|
||||
})?;
|
||||
|
||||
let header = request.header.as_ref();
|
||||
let query_ctx = create_query_context(header);
|
||||
|
||||
self.auth(header, &query_ctx).await?;
|
||||
|
||||
let handler = self.handler.clone();
|
||||
|
||||
// Executes requests in another runtime to
|
||||
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
|
||||
// - Refer to our blog for the rational behind it:
|
||||
// https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
|
||||
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
|
||||
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
|
||||
// 2. avoid the handler blocks the gRPC runtime incidentally.
|
||||
let handle = self
|
||||
.runtime
|
||||
.spawn(async move { handler.do_query(query, query_ctx).await });
|
||||
|
||||
let output = handle.await.map_err(|e| {
|
||||
if e.is_cancelled() {
|
||||
Status::cancelled(e.to_string())
|
||||
} else if e.is_panic() {
|
||||
Status::internal(format!("{:?}", e.into_panic()))
|
||||
} else {
|
||||
Status::unknown(e.to_string())
|
||||
}
|
||||
})??;
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
async fn auth(
|
||||
&self,
|
||||
header: Option<&RequestHeader>,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> TonicResult<()> {
|
||||
let Some(user_provider) = self.user_provider.as_ref() else { return Ok(()) };
|
||||
|
||||
let auth_scheme = header
|
||||
.and_then(|header| {
|
||||
header
|
||||
.authorization
|
||||
.as_ref()
|
||||
.and_then(|x| x.auth_scheme.clone())
|
||||
})
|
||||
.context(NotFoundAuthHeaderSnafu)?;
|
||||
|
||||
let user_info = match auth_scheme {
|
||||
AuthScheme::Basic(Basic { username, password }) => user_provider
|
||||
.authenticate(
|
||||
Identity::UserId(&username, None),
|
||||
Password::PlainText(&password),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Auth { source: e }),
|
||||
AuthScheme::Token(_) => Err(UnsupportedAuthScheme {
|
||||
name: "Token AuthScheme".to_string(),
|
||||
}),
|
||||
}
|
||||
.map_err(|e| Status::unauthenticated(e.to_string()))?;
|
||||
|
||||
user_provider
|
||||
.authorize(
|
||||
&query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
&user_info,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::permission_denied(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
|
||||
let ctx = QueryContext::arc();
|
||||
if let Some(header) = header {
|
||||
if !header.catalog.is_empty() {
|
||||
ctx.set_current_catalog(&header.catalog);
|
||||
}
|
||||
if !header.schema.is_empty() {
|
||||
ctx.set_current_schema(&header.schema);
|
||||
}
|
||||
};
|
||||
ctx
|
||||
}
|
||||
@@ -19,6 +19,7 @@ pub mod opentsdb;
|
||||
pub mod prometheus;
|
||||
pub mod script;
|
||||
|
||||
mod admin;
|
||||
#[cfg(feature = "mem-prof")]
|
||||
pub mod mem_prof;
|
||||
|
||||
@@ -56,6 +57,8 @@ use self::authorize::HttpAuth;
|
||||
use self::influxdb::{influxdb_health, influxdb_ping, influxdb_write};
|
||||
use crate::auth::UserProviderRef;
|
||||
use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu};
|
||||
use crate::http::admin::flush;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
|
||||
use crate::query_handler::{
|
||||
InfluxdbLineProtocolHandlerRef, OpentsdbProtocolHandlerRef, PrometheusProtocolHandlerRef,
|
||||
@@ -96,6 +99,7 @@ pub static PUBLIC_APIS: [&str; 2] = ["/v1/influxdb/ping", "/v1/influxdb/health"]
|
||||
|
||||
pub struct HttpServer {
|
||||
sql_handler: ServerSqlQueryHandlerRef,
|
||||
grpc_handler: ServerGrpcQueryHandlerRef,
|
||||
options: HttpOptions,
|
||||
influxdb_handler: Option<InfluxdbLineProtocolHandlerRef>,
|
||||
opentsdb_handler: Option<OpentsdbProtocolHandlerRef>,
|
||||
@@ -349,9 +353,14 @@ pub struct ApiState {
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
pub fn new(sql_handler: ServerSqlQueryHandlerRef, options: HttpOptions) -> Self {
|
||||
pub fn new(
|
||||
sql_handler: ServerSqlQueryHandlerRef,
|
||||
grpc_handler: ServerGrpcQueryHandlerRef,
|
||||
options: HttpOptions,
|
||||
) -> Self {
|
||||
Self {
|
||||
sql_handler,
|
||||
grpc_handler,
|
||||
options,
|
||||
opentsdb_handler: None,
|
||||
influxdb_handler: None,
|
||||
@@ -426,6 +435,10 @@ impl HttpServer {
|
||||
.layer(Extension(api));
|
||||
|
||||
let mut router = Router::new().nest(&format!("/{HTTP_API_VERSION}"), sql_router);
|
||||
router = router.nest(
|
||||
&format!("/{HTTP_API_VERSION}/admin"),
|
||||
self.route_admin(self.grpc_handler.clone()),
|
||||
);
|
||||
|
||||
if let Some(opentsdb_handler) = self.opentsdb_handler.clone() {
|
||||
router = router.nest(
|
||||
@@ -517,6 +530,12 @@ impl HttpServer {
|
||||
.route("/api/put", routing::post(opentsdb::put))
|
||||
.with_state(opentsdb_handler)
|
||||
}
|
||||
|
||||
fn route_admin<S>(&self, grpc_handler: ServerGrpcQueryHandlerRef) -> Router<S> {
|
||||
Router::new()
|
||||
.route("/flush", routing::post(flush))
|
||||
.with_state(grpc_handler)
|
||||
}
|
||||
}
|
||||
|
||||
pub const HTTP_SERVER: &str = "HTTP_SERVER";
|
||||
@@ -578,6 +597,7 @@ mod test {
|
||||
use std::future::pending;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use axum::handler::Handler;
|
||||
use axum::http::StatusCode;
|
||||
use axum::routing::get;
|
||||
@@ -592,12 +612,26 @@ mod test {
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::query_handler::grpc::{GrpcQueryHandler, ServerGrpcQueryHandlerAdaptor};
|
||||
use crate::query_handler::sql::{ServerSqlQueryHandlerAdaptor, SqlQueryHandler};
|
||||
|
||||
struct DummyInstance {
|
||||
_tx: mpsc::Sender<(String, Vec<u8>)>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DummyInstance {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for DummyInstance {
|
||||
type Error = Error;
|
||||
@@ -637,8 +671,10 @@ mod test {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { _tx: tx });
|
||||
let instance = ServerSqlQueryHandlerAdaptor::arc(instance);
|
||||
let server = HttpServer::new(instance, HttpOptions::default());
|
||||
let sql_instance = ServerSqlQueryHandlerAdaptor::arc(instance.clone());
|
||||
let grpc_instance = ServerGrpcQueryHandlerAdaptor::arc(instance);
|
||||
|
||||
let server = HttpServer::new(sql_instance, grpc_instance, HttpOptions::default());
|
||||
server.make_app().route(
|
||||
"/test/timeout",
|
||||
get(forever.layer(
|
||||
|
||||
69
src/servers/src/http/admin.rs
Normal file
69
src/servers/src/http/admin.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::ddl_request::Expr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{DdlRequest, FlushTableExpr};
|
||||
use axum::extract::{Query, RawBody, State};
|
||||
use axum::http::StatusCode as HttpStatusCode;
|
||||
use axum::Json;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
|
||||
#[axum_macros::debug_handler]
|
||||
pub async fn flush(
|
||||
State(grpc_handler): State<ServerGrpcQueryHandlerRef>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
RawBody(_): RawBody,
|
||||
) -> Result<(HttpStatusCode, Json<String>)> {
|
||||
let catalog_name = params
|
||||
.get("catalog_name")
|
||||
.cloned()
|
||||
.unwrap_or("greptime".to_string());
|
||||
let schema_name =
|
||||
params
|
||||
.get("schema_name")
|
||||
.cloned()
|
||||
.context(error::InvalidFlushArgumentSnafu {
|
||||
err_msg: "schema_name is not present",
|
||||
})?;
|
||||
|
||||
// if table name is not present, flush all tables inside schema
|
||||
let table_name = params.get("table_name").cloned().unwrap_or_default();
|
||||
|
||||
let region_id: Option<u32> = params
|
||||
.get("region")
|
||||
.map(|v| v.parse())
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten();
|
||||
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(Expr::FlushTable(FlushTableExpr {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
region_id,
|
||||
})),
|
||||
});
|
||||
|
||||
grpc_handler.do_query(request, QueryContext::arc()).await?;
|
||||
Ok((HttpStatusCode::OK, Json::from("done".to_string())))
|
||||
}
|
||||
@@ -43,6 +43,7 @@ pub async fn sql(
|
||||
Form(form_params): Form<SqlQuery>,
|
||||
) -> Json<JsonResponse> {
|
||||
let sql_handler = &state.sql_handler;
|
||||
|
||||
let start = Instant::now();
|
||||
let sql = query_params.sql.or(form_params.sql);
|
||||
let db = query_params.db.or(form_params.db);
|
||||
|
||||
@@ -24,6 +24,7 @@ use common_runtime::{Builder as RuntimeBuilder, Runtime};
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::error::{Result, StartGrpcSnafu, TcpBindSnafu};
|
||||
use servers::grpc::flight::FlightHandler;
|
||||
use servers::grpc::handler::GreptimeRequestHandler;
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use servers::server::Server;
|
||||
use snafu::ResultExt;
|
||||
@@ -54,11 +55,11 @@ impl MockGrpcServer {
|
||||
}
|
||||
|
||||
fn create_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
let service = FlightHandler::new(
|
||||
let service = FlightHandler::new(Arc::new(GreptimeRequestHandler::new(
|
||||
self.query_handler.clone(),
|
||||
self.user_provider.clone(),
|
||||
self.runtime.clone(),
|
||||
);
|
||||
)));
|
||||
FlightServiceServer::new(service)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,11 +17,12 @@ use axum_test_helper::TestClient;
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use table::test_util::MemTable;
|
||||
|
||||
use crate::create_testing_sql_query_handler;
|
||||
use crate::{create_testing_grpc_query_handler, create_testing_sql_query_handler};
|
||||
|
||||
fn make_test_app() -> Router {
|
||||
let server = HttpServer::new(
|
||||
create_testing_sql_query_handler(MemTable::default_numbers_table()),
|
||||
create_testing_grpc_query_handler(MemTable::default_numbers_table()),
|
||||
HttpOptions::default(),
|
||||
);
|
||||
server.make_app()
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::InsertRequest;
|
||||
use async_trait::async_trait;
|
||||
use axum::{http, Router};
|
||||
@@ -24,6 +25,7 @@ use query::parser::PromQuery;
|
||||
use servers::error::{Error, Result};
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use servers::influxdb::InfluxdbRequest;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::InfluxdbLineProtocolHandler;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -35,6 +37,19 @@ struct DummyInstance {
|
||||
tx: Arc<mpsc::Sender<(String, String)>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DummyInstance {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl InfluxdbLineProtocolHandler for DummyInstance {
|
||||
async fn exec(&self, request: &InfluxdbRequest, ctx: QueryContextRef) -> Result<()> {
|
||||
@@ -79,7 +94,7 @@ impl SqlQueryHandler for DummyInstance {
|
||||
|
||||
fn make_test_app(tx: Arc<mpsc::Sender<(String, String)>>, db_name: Option<&str>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { tx });
|
||||
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
|
||||
let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default());
|
||||
let mut user_provider = MockUserProvider::default();
|
||||
if let Some(name) = db_name {
|
||||
user_provider.set_authorization_info(DatabaseAuthInfo {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use async_trait::async_trait;
|
||||
use axum::Router;
|
||||
use axum_test_helper::TestClient;
|
||||
@@ -23,6 +24,7 @@ use query::parser::PromQuery;
|
||||
use servers::error::{self, Result};
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use servers::opentsdb::codec::DataPoint;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::OpentsdbProtocolHandler;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -32,6 +34,19 @@ struct DummyInstance {
|
||||
tx: mpsc::Sender<String>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DummyInstance {
|
||||
type Error = crate::Error;
|
||||
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl OpentsdbProtocolHandler for DummyInstance {
|
||||
async fn exec(&self, data_point: &DataPoint, _ctx: QueryContextRef) -> Result<()> {
|
||||
@@ -77,7 +92,7 @@ impl SqlQueryHandler for DummyInstance {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<String>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { tx });
|
||||
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
|
||||
let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default());
|
||||
server.set_opentsdb_handler(instance);
|
||||
server.make_app()
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
||||
use api::prometheus::remote::{
|
||||
LabelMatcher, Query, QueryResult, ReadRequest, ReadResponse, WriteRequest,
|
||||
};
|
||||
use api::v1::greptime_request::Request;
|
||||
use async_trait::async_trait;
|
||||
use axum::Router;
|
||||
use axum_test_helper::TestClient;
|
||||
@@ -28,6 +29,7 @@ use servers::error::{Error, Result};
|
||||
use servers::http::{HttpOptions, HttpServer};
|
||||
use servers::prometheus;
|
||||
use servers::prometheus::{snappy_compress, Metrics};
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse};
|
||||
use session::context::QueryContextRef;
|
||||
@@ -37,6 +39,19 @@ struct DummyInstance {
|
||||
tx: mpsc::Sender<(String, Vec<u8>)>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DummyInstance {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PrometheusProtocolHandler for DummyInstance {
|
||||
async fn write(&self, request: WriteRequest, ctx: QueryContextRef) -> Result<()> {
|
||||
@@ -102,7 +117,7 @@ impl SqlQueryHandler for DummyInstance {
|
||||
|
||||
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
|
||||
let instance = Arc::new(DummyInstance { tx });
|
||||
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
|
||||
let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default());
|
||||
server.set_prom_handler(instance);
|
||||
server.make_app()
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ mod py_script;
|
||||
|
||||
const LOCALHOST_WITH_0: &str = "127.0.0.1:0";
|
||||
|
||||
struct DummyInstance {
|
||||
pub struct DummyInstance {
|
||||
query_engine: QueryEngineRef,
|
||||
py_engine: Arc<PyEngine>,
|
||||
scripts: RwLock<HashMap<String, Arc<PyScript>>>,
|
||||
|
||||
@@ -135,6 +135,10 @@ impl<S: LogStore> Region for RegionImpl<S> {
|
||||
.map(|level_ssts| level_ssts.files().map(|sst| sst.file_size()).sum::<u64>())
|
||||
.sum()
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
self.inner.flush().await
|
||||
}
|
||||
}
|
||||
|
||||
/// Storage related config for region.
|
||||
@@ -560,4 +564,18 @@ impl<S: LogStore> RegionInner<S> {
|
||||
async fn close(&self) -> Result<()> {
|
||||
self.writer.close().await
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
let writer_ctx = WriterContext {
|
||||
shared: &self.shared,
|
||||
flush_strategy: &self.flush_strategy,
|
||||
flush_scheduler: &self.flush_scheduler,
|
||||
compaction_scheduler: &self.compaction_scheduler,
|
||||
sst_layer: &self.sst_layer,
|
||||
wal: &self.wal,
|
||||
writer: &self.writer,
|
||||
manifest: &self.manifest,
|
||||
};
|
||||
self.writer.flush(writer_ctx).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
use store_api::storage::{OpenOptions, WriteResponse};
|
||||
use store_api::storage::{OpenOptions, Region, WriteResponse};
|
||||
|
||||
use crate::engine;
|
||||
use crate::flush::FlushStrategyRef;
|
||||
@@ -94,6 +94,10 @@ impl FlushTester {
|
||||
async fn wait_flush_done(&self) {
|
||||
self.base().region.wait_flush_done().await.unwrap();
|
||||
}
|
||||
|
||||
async fn flush(&self) {
|
||||
self.base().region.flush().await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -124,6 +128,30 @@ async fn test_flush_and_stall() {
|
||||
assert!(has_parquet_file(&sst_dir));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_manual_flush() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let dir = create_temp_dir("manual_flush");
|
||||
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
|
||||
let flush_switch = Arc::new(FlushSwitch::default());
|
||||
let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
|
||||
|
||||
let data = [(1000, Some(100))];
|
||||
// Put one element so we have content to flush.
|
||||
tester.put(&data).await;
|
||||
|
||||
// No parquet file should be flushed.
|
||||
let sst_dir = format!("{}/{}", store_dir, engine::region_sst_dir("", REGION_NAME));
|
||||
assert!(!has_parquet_file(&sst_dir));
|
||||
|
||||
tester.flush().await;
|
||||
tester.wait_flush_done().await;
|
||||
|
||||
assert!(has_parquet_file(&sst_dir));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_empty() {
|
||||
let dir = create_temp_dir("flush-empty");
|
||||
|
||||
@@ -260,6 +260,22 @@ impl RegionWriter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush task manually
|
||||
pub async fn flush<S: LogStore>(&self, writer_ctx: WriterContext<'_, S>) -> Result<()> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
ensure!(!inner.is_closed(), error::ClosedRegionSnafu);
|
||||
|
||||
inner.manual_flush(writer_ctx).await?;
|
||||
|
||||
// Wait flush.
|
||||
if let Some(handle) = inner.flush_handle.take() {
|
||||
handle.join().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Cancel flush task if any
|
||||
async fn cancel_flush(&self) -> Result<()> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
@@ -375,6 +391,7 @@ impl WriterInner {
|
||||
let next_sequence = committed_sequence + 1;
|
||||
|
||||
let version = version_control.current();
|
||||
|
||||
let wal_header = WalHeader::with_last_manifest_version(version.manifest_version());
|
||||
writer_ctx
|
||||
.wal
|
||||
@@ -680,6 +697,11 @@ impl WriterInner {
|
||||
Some(schedule_compaction_cb)
|
||||
}
|
||||
|
||||
async fn manual_flush<S: LogStore>(&mut self, writer_ctx: WriterContext<'_, S>) -> Result<()> {
|
||||
self.trigger_flush(&writer_ctx).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_closed(&self) -> bool {
|
||||
self.closed
|
||||
|
||||
@@ -43,7 +43,7 @@ use parquet::basic::{Compression, Encoding};
|
||||
use parquet::file::metadata::KeyValue;
|
||||
use parquet::file::properties::WriterProperties;
|
||||
use parquet::format::FileMetaData;
|
||||
use parquet::schema::types::SchemaDescriptor;
|
||||
use parquet::schema::types::{ColumnPath, SchemaDescriptor};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::predicate::Predicate;
|
||||
use tokio::io::BufReader;
|
||||
@@ -71,7 +71,7 @@ impl<'a> ParquetWriter<'a> {
|
||||
file_path,
|
||||
source,
|
||||
object_store,
|
||||
max_row_group_size: 4096, // TODO(hl): make this configurable
|
||||
max_row_group_size: 64 * 1024, // TODO(hl): make this configurable
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,9 +88,25 @@ impl<'a> ParquetWriter<'a> {
|
||||
let schema = store_schema.arrow_schema().clone();
|
||||
let object = self.object_store.object(self.file_path);
|
||||
|
||||
let ts_col_name = store_schema
|
||||
.schema()
|
||||
.timestamp_column()
|
||||
.unwrap()
|
||||
.name
|
||||
.clone();
|
||||
|
||||
let writer_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD)
|
||||
.set_encoding(Encoding::PLAIN)
|
||||
.set_column_dictionary_enabled(ColumnPath::new(vec![ts_col_name.clone()]), false)
|
||||
.set_column_encoding(
|
||||
ColumnPath::new(vec![ts_col_name]),
|
||||
Encoding::DELTA_BINARY_PACKED,
|
||||
)
|
||||
.set_column_dictionary_enabled(ColumnPath::new(vec!["__sequence".to_string()]), false)
|
||||
.set_column_encoding(
|
||||
ColumnPath::new(vec!["__sequence".to_string()]),
|
||||
Encoding::DELTA_BINARY_PACKED,
|
||||
)
|
||||
.set_max_row_group_size(self.max_row_group_size)
|
||||
.set_key_value_metadata(extra_meta.map(|map| {
|
||||
map.iter()
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging;
|
||||
use store_api::manifest::ManifestVersion;
|
||||
use store_api::storage::{SchemaRef, SequenceNumber};
|
||||
|
||||
@@ -248,7 +248,7 @@ impl Version {
|
||||
.ssts
|
||||
.merge(handles_to_add, edit.files_to_remove.into_iter());
|
||||
|
||||
info!(
|
||||
logging::debug!(
|
||||
"After apply edit, region: {}, SST files: {:?}",
|
||||
self.metadata.id(),
|
||||
merged_ssts
|
||||
|
||||
@@ -106,6 +106,9 @@ impl<S: LogStore> Wal<S> {
|
||||
mut header: WalHeader,
|
||||
payload: Option<&Payload>,
|
||||
) -> Result<Id> {
|
||||
if !cfg!(test) && (self.region_id >> 32) >= 1024 {
|
||||
return Ok(seq);
|
||||
}
|
||||
if let Some(p) = payload {
|
||||
header.mutation_types = wal::gen_mutation_types(p);
|
||||
}
|
||||
|
||||
@@ -76,6 +76,8 @@ pub trait Region: Send + Sync + Clone + std::fmt::Debug + 'static {
|
||||
async fn close(&self) -> Result<(), Self::Error>;
|
||||
|
||||
fn disk_usage_bytes(&self) -> u64;
|
||||
|
||||
async fn flush(&self) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Context for write operations.
|
||||
|
||||
@@ -19,6 +19,7 @@ common-time = { path = "../common/time" }
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-physical-expr.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
derive_builder = "0.11"
|
||||
futures.workspace = true
|
||||
|
||||
@@ -16,8 +16,10 @@ use std::fmt::{self, Display};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::BoxedProcedure;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::metadata::TableId;
|
||||
use crate::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
|
||||
use crate::TableRef;
|
||||
|
||||
@@ -123,6 +125,22 @@ pub trait TableEngineProcedure: Send + Sync {
|
||||
|
||||
pub type TableEngineProcedureRef = Arc<dyn TableEngineProcedure>;
|
||||
|
||||
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
|
||||
#[inline]
|
||||
pub fn region_name(table_id: TableId, n: u32) -> String {
|
||||
format!("{table_id}_{n:010}")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn region_id(table_id: TableId, n: u32) -> RegionId {
|
||||
(u64::from(table_id) << 32) | u64::from(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> String {
|
||||
format!("{catalog_name}/{schema_name}/{table_id}/")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datafusion_expr::TableProviderFilterPushDown;
|
||||
pub use datatypes::error::{Error as ConvertError, Result as ConvertResult};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
|
||||
use derive_builder::Builder;
|
||||
@@ -47,6 +48,26 @@ pub enum FilterPushDownType {
|
||||
Exact,
|
||||
}
|
||||
|
||||
impl From<TableProviderFilterPushDown> for FilterPushDownType {
|
||||
fn from(value: TableProviderFilterPushDown) -> Self {
|
||||
match value {
|
||||
TableProviderFilterPushDown::Unsupported => FilterPushDownType::Unsupported,
|
||||
TableProviderFilterPushDown::Inexact => FilterPushDownType::Inexact,
|
||||
TableProviderFilterPushDown::Exact => FilterPushDownType::Exact,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FilterPushDownType> for TableProviderFilterPushDown {
|
||||
fn from(value: FilterPushDownType) -> Self {
|
||||
match value {
|
||||
FilterPushDownType::Unsupported => TableProviderFilterPushDown::Unsupported,
|
||||
FilterPushDownType::Inexact => TableProviderFilterPushDown::Inexact,
|
||||
FilterPushDownType::Exact => TableProviderFilterPushDown::Exact,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates the type of this table for metadata/catalog purposes.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TableType {
|
||||
|
||||
@@ -18,7 +18,10 @@ use common_time::range::TimestampRange;
|
||||
use common_time::Timestamp;
|
||||
use datafusion::parquet::file::metadata::RowGroupMetaData;
|
||||
use datafusion::physical_optimizer::pruning::PruningPredicate;
|
||||
use datafusion_common::ToDFSchema;
|
||||
use datafusion_expr::{Between, BinaryExpr, Operator};
|
||||
use datafusion_physical_expr::create_physical_expr;
|
||||
use datafusion_physical_expr::execution_props::ExecutionProps;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::value::scalar_value_to_timestamp;
|
||||
|
||||
@@ -46,8 +49,26 @@ impl Predicate {
|
||||
row_groups: &[RowGroupMetaData],
|
||||
) -> Vec<bool> {
|
||||
let mut res = vec![true; row_groups.len()];
|
||||
let arrow_schema = (*schema.arrow_schema()).clone();
|
||||
let df_schema = arrow_schema.clone().to_dfschema_ref();
|
||||
let df_schema = match df_schema {
|
||||
Ok(x) => x,
|
||||
Err(e) => {
|
||||
warn!("Failed to create Datafusion schema when trying to prune row groups, error: {e}");
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
let execution_props = &ExecutionProps::new();
|
||||
for expr in &self.exprs {
|
||||
match PruningPredicate::try_new(expr.df_expr().clone(), schema.arrow_schema().clone()) {
|
||||
match create_physical_expr(
|
||||
expr.df_expr(),
|
||||
df_schema.as_ref(),
|
||||
arrow_schema.as_ref(),
|
||||
execution_props,
|
||||
)
|
||||
.and_then(|expr| PruningPredicate::try_new(expr, arrow_schema.clone()))
|
||||
{
|
||||
Ok(p) => {
|
||||
let stat = RowGroupPruningStatistics::new(row_groups, &schema);
|
||||
match p.prune(&stat) {
|
||||
|
||||
@@ -209,6 +209,14 @@ pub struct CopyTableFromRequest {
|
||||
pub from: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FlushTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: Option<String>,
|
||||
pub region_number: Option<RegionNumber>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -23,6 +23,7 @@ use async_trait::async_trait;
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use store_api::storage::RegionNumber;
|
||||
|
||||
use crate::error::{Result, UnsupportedSnafu};
|
||||
use crate::metadata::{FilterPushDownType, TableId, TableInfoRef, TableType};
|
||||
@@ -70,10 +71,10 @@ pub trait Table: Send + Sync {
|
||||
limit: Option<usize>,
|
||||
) -> Result<PhysicalPlanRef>;
|
||||
|
||||
/// Tests whether the table provider can make use of a filter expression
|
||||
/// Tests whether the table provider can make use of any or all filter expressions
|
||||
/// to optimise data retrieval.
|
||||
fn supports_filter_pushdown(&self, _filter: &Expr) -> Result<FilterPushDownType> {
|
||||
Ok(FilterPushDownType::Unsupported)
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>> {
|
||||
Ok(vec![FilterPushDownType::Unsupported; filters.len()])
|
||||
}
|
||||
|
||||
/// Alter table.
|
||||
@@ -94,6 +95,12 @@ pub trait Table: Send + Sync {
|
||||
.fail()?
|
||||
}
|
||||
|
||||
/// Flush table.
|
||||
async fn flush(&self, region_number: Option<RegionNumber>) -> Result<()> {
|
||||
let _ = region_number;
|
||||
UnsupportedSnafu { operation: "FLUSH" }.fail()?
|
||||
}
|
||||
|
||||
/// Close the table.
|
||||
async fn close(&self) -> Result<()> {
|
||||
Ok(())
|
||||
|
||||
@@ -78,15 +78,18 @@ impl TableProvider for DfTableProviderAdapter {
|
||||
Ok(Arc::new(DfPhysicalPlanAdapter(inner)))
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, filter: &DfExpr) -> DfResult<DfTableProviderFilterPushDown> {
|
||||
let p = self
|
||||
fn supports_filters_pushdown(
|
||||
&self,
|
||||
filters: &[&DfExpr],
|
||||
) -> DfResult<Vec<DfTableProviderFilterPushDown>> {
|
||||
let filters = filters
|
||||
.iter()
|
||||
.map(|&x| x.clone().into())
|
||||
.collect::<Vec<_>>();
|
||||
Ok(self
|
||||
.table
|
||||
.supports_filter_pushdown(&filter.clone().into())?;
|
||||
match p {
|
||||
FilterPushDownType::Unsupported => Ok(DfTableProviderFilterPushDown::Unsupported),
|
||||
FilterPushDownType::Inexact => Ok(DfTableProviderFilterPushDown::Inexact),
|
||||
FilterPushDownType::Exact => Ok(DfTableProviderFilterPushDown::Exact),
|
||||
}
|
||||
.supports_filters_pushdown(&filters.iter().collect::<Vec<_>>())
|
||||
.map(|v| v.into_iter().map(Into::into).collect::<Vec<_>>())?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,16 +158,11 @@ impl Table for TableAdapter {
|
||||
Ok(Arc::new(PhysicalPlanAdapter::new(schema, execution_plan)))
|
||||
}
|
||||
|
||||
fn supports_filter_pushdown(&self, filter: &Expr) -> Result<FilterPushDownType> {
|
||||
match self
|
||||
.table_provider
|
||||
.supports_filter_pushdown(filter.df_expr())
|
||||
.context(error::DatafusionSnafu)?
|
||||
{
|
||||
DfTableProviderFilterPushDown::Unsupported => Ok(FilterPushDownType::Unsupported),
|
||||
DfTableProviderFilterPushDown::Inexact => Ok(FilterPushDownType::Inexact),
|
||||
DfTableProviderFilterPushDown::Exact => Ok(FilterPushDownType::Exact),
|
||||
}
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>> {
|
||||
self.table_provider
|
||||
.supports_filters_pushdown(&filters.iter().map(|x| x.df_expr()).collect::<Vec<_>>())
|
||||
.context(error::DatafusionSnafu)
|
||||
.map(|v| v.into_iter().map(Into::into).collect::<Vec<_>>())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -275,7 +275,8 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router
|
||||
.await
|
||||
.unwrap();
|
||||
let http_server = HttpServer::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(Arc::new(build_frontend_instance(instance))),
|
||||
ServerSqlQueryHandlerAdaptor::arc(Arc::new(build_frontend_instance(instance.clone()))),
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
|
||||
HttpOptions::default(),
|
||||
);
|
||||
(http_server.make_app(), guard)
|
||||
@@ -296,8 +297,10 @@ pub async fn setup_test_http_app_with_frontend(
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let frontend_ref = Arc::new(frontend);
|
||||
let mut http_server = HttpServer::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(Arc::new(frontend)),
|
||||
ServerSqlQueryHandlerAdaptor::arc(frontend_ref.clone()),
|
||||
ServerGrpcQueryHandlerAdaptor::arc(frontend_ref),
|
||||
HttpOptions::default(),
|
||||
);
|
||||
http_server.set_script_handler(instance.clone());
|
||||
|
||||
@@ -183,7 +183,7 @@ async fn insert_and_assert(db: &Database) {
|
||||
row_count: 4,
|
||||
};
|
||||
let result = db.insert(request).await;
|
||||
result.unwrap();
|
||||
assert_eq!(result.unwrap(), 4);
|
||||
|
||||
let result = db
|
||||
.sql(
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
CREATE TABLE integers(i INTEGER, j BIGINT TIME INDEX);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO integers VALUES (1, 1), (2, 2), (3, 3), (NULL, 4);
|
||||
|
||||
Affected Rows: 4
|
||||
|
||||
SELECT i1.i, i2.i FROM integers i1, integers i2 WHERE i1.i=i2.i ORDER BY 1;
|
||||
|
||||
+---+---+
|
||||
| i | i |
|
||||
+---+---+
|
||||
| 1 | 1 |
|
||||
| 2 | 2 |
|
||||
| 3 | 3 |
|
||||
+---+---+
|
||||
|
||||
SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i1.i=i2.i AND i1.i>1 ORDER BY 1;
|
||||
|
||||
+---+---+
|
||||
| i | i |
|
||||
+---+---+
|
||||
| 2 | 2 |
|
||||
| 3 | 3 |
|
||||
+---+---+
|
||||
|
||||
SELECT i1.i,i2.i,i3.i FROM integers i1, integers i2, integers i3 WHERE i1.i=i2.i AND i1.i=i3.i AND i1.i>1 ORDER BY 1;
|
||||
|
||||
+---+---+---+
|
||||
| i | i | i |
|
||||
+---+---+---+
|
||||
| 2 | 2 | 2 |
|
||||
| 3 | 3 | 3 |
|
||||
+---+---+---+
|
||||
|
||||
SELECT i1.i,i2.i FROM integers i1 JOIN integers i2 ON i1.i=i2.i WHERE i1.i>1 ORDER BY 1;
|
||||
|
||||
+---+---+
|
||||
| i | i |
|
||||
+---+---+
|
||||
| 2 | 2 |
|
||||
| 3 | 3 |
|
||||
+---+---+
|
||||
|
||||
DROP TABLE integers;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
CREATE TABLE integers(i INTEGER, j BIGINT TIME INDEX);
|
||||
|
||||
INSERT INTO integers VALUES (1, 1), (2, 2), (3, 3), (NULL, 4);
|
||||
|
||||
SELECT i1.i, i2.i FROM integers i1, integers i2 WHERE i1.i=i2.i ORDER BY 1;
|
||||
|
||||
SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i1.i=i2.i AND i1.i>1 ORDER BY 1;
|
||||
|
||||
SELECT i1.i,i2.i,i3.i FROM integers i1, integers i2, integers i3 WHERE i1.i=i2.i AND i1.i=i3.i AND i1.i>1 ORDER BY 1;
|
||||
|
||||
SELECT i1.i,i2.i FROM integers i1 JOIN integers i2 ON i1.i=i2.i WHERE i1.i>1 ORDER BY 1;
|
||||
|
||||
-- TODO(LFC): Resolve #790, then port remaining test case from standalone.
|
||||
|
||||
DROP TABLE integers;
|
||||
@@ -92,15 +92,32 @@ SELECT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=1 WHERE i1.i=
|
||||
|
||||
SELECT * FROM integers WHERE i IN ((SELECT i FROM integers)) ORDER BY i;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), This feature is not implemented: Physical plan does not support logical expression (<subquery>)
|
||||
+---+---+
|
||||
| i | j |
|
||||
+---+---+
|
||||
| 1 | 1 |
|
||||
| 2 | 2 |
|
||||
| 3 | 3 |
|
||||
+---+---+
|
||||
|
||||
SELECT * FROM integers WHERE i NOT IN ((SELECT i FROM integers WHERE i=1)) ORDER BY i;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), This feature is not implemented: Physical plan does not support logical expression (<subquery>)
|
||||
+---+---+
|
||||
| i | j |
|
||||
+---+---+
|
||||
| 2 | 2 |
|
||||
| 3 | 3 |
|
||||
| | 4 |
|
||||
+---+---+
|
||||
|
||||
SELECT * FROM integers WHERE i IN ((SELECT i FROM integers)) AND i<3 ORDER BY i;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), This feature is not implemented: Physical plan does not support logical expression (<subquery>)
|
||||
+---+---+
|
||||
| i | j |
|
||||
+---+---+
|
||||
| 1 | 1 |
|
||||
| 2 | 2 |
|
||||
+---+---+
|
||||
|
||||
SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i IN ((SELECT i FROM integers)) AND i1.i=i2.i ORDER BY 1;
|
||||
|
||||
Reference in New Issue
Block a user