Compare commits

...

23 Commits

Author SHA1 Message Date
zyy17
64441616db ci: refactor compile-python.sh and use the python310 to build amd64 binary (#1199) 2023-03-18 16:16:15 +08:00
zyy17
09491d6aee ci: release the standalone binaries with pyo3 and install python utils in images (#1194)
* ci: install python3 and python3-dev in CI Dockerfile

* ci: release the standalone binaries with pyo3 support for multiple platforms

* refactor: install pip and pyarrow

* refactor: specify the python version
2023-03-17 15:42:13 +08:00
Weny Xu
7cfa30b2ab feat: add shutdown for standalone and metasrv (#1174) 2023-03-17 11:35:17 +08:00
Ning Sun
a7676d8860 refactor: port div_ceil from stdlib to avoid unstable features (#1191)
* refactor: use float div&ceil to avoid unstable features

* refactor: port div_ceil from rust stdlib
2023-03-16 22:55:35 +08:00
zyy17
62e2a60b7b ci: release artifacts after binary and container is ready (#1192)
ci: release artifacts before binary and container is ready
2023-03-16 09:20:03 +00:00
zyy17
128c5cabe1 ci: disable run tests temporarily (#1187) 2023-03-16 14:12:19 +08:00
Yingwen
9a001d3392 chore(datanode): derive serde default for Wal/CompactionConfig (#1173) 2023-03-16 11:56:28 +08:00
Weny Xu
facdda4d9f feat: implement CONNECTION clause of Copy To (#1163)
* feat: implement CONNECTION clause of Copy To

* test: add tests for s3 backend

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-16 11:36:38 +08:00
Lei, HUANG
17eb99bc52 feat: allow manual table flush through HTTP API (#1184) 2023-03-15 20:15:34 +08:00
Xieqijun
cd8be77968 feat(procedure): Max retry time (#1095)
* feat: procedure config

* fix: modify config

* feat: add retry logic

* feat: add error

* feat: add it

* feat: add it

* feat: add it

* feat: rm retry from runner

* feat: use backon

* feat: add retry_interval

* feat: add retry_interval

* fix: conflict

* fix: cr

* feat: add retry error and id

* feat: rename

* refactor: execute

* feat: use config dir

* fix: cr

* fix: cr

* fix: fmt

* fix: fmt

* fix: pr

* fix: it

* fix: rm unless cmd params

* feat: add toml

* fix: ut

* feat: add rolling back

* fix: cr

* fix: cr

* fix: cr

* fix: ci

* fix: ci

* fix: ci

* chore: Apply suggestions from code review

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-15 08:28:08 +00:00
Eugene Tolbakov
b530ac9e60 chore(from_unixtime): remove UDF from_unixtime (#1179)
* chore(from_unixtime): remove UDF from_unixtime

* chore(from_unixtime): restore timestamp.rs for further usage

* chore(from_unixtime): address fmt issue
2023-03-15 16:27:09 +08:00
zyy17
76f1a79f1b ci: set 'continue-on-error' to false since the problem of compiling binary was resolved (#1182)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-03-15 15:41:36 +08:00
LFC
4705245d60 docs: region failover RFC (#1139)
* docs: region failover RFC

* fix: resolve PR comments
2023-03-15 15:21:58 +08:00
Zheming Li
f712f978cf feat: Report disk usage stats to metasrv thru heartbeat (#1167)
* feat: Report disk usage stats to metasrv thru heartbeat

Signed-off-by: Zheming Li <nkdudu@126.com>

* Update src/catalog/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/catalog/src/lib.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/mito/src/table.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-03-15 03:11:32 +00:00
discord9
cbf64e65b9 refactor: put dataframe & query into greptime module (#1172)
* feat: impl getitem for `vector`

* feat: mv `query`&`dataframe` into `greptime` for PyO3

* refactor: allow call dataframe&query

* refactor: pyo3 query&dataframe

* chore: CR advices
2023-03-15 11:01:43 +08:00
zyy17
242ce5c2aa ci: add pyo3 options for mac (#1178) 2023-03-14 13:51:58 +00:00
Ruihang Xia
e8d2e82335 fix: ambiguous column reference (#1177)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-14 13:18:43 +00:00
zyy17
0086cc2d3d fix: export 'PYO3_CROSS_LIB_DIR' when cargo build for aarch64-linux and refactor matrix opts (#1171) 2023-03-14 15:35:29 +08:00
Weny Xu
cdc111b607 refactor: make the cmd hold the application instance (#1159) 2023-03-14 15:18:50 +08:00
zyy17
81ca1d8399 refactor: add the separate GitHub Action job to push the image to the UCloud registry (#1170) 2023-03-14 11:35:18 +08:00
LFC
8d3999df5f fix: failed to run subquery wrapped in two parentheses (#1157) 2023-03-14 10:59:43 +08:00
discord9
a60788e92e fix: use correct env var (#1166)
* fix: use correct env var

* fix: move COPY up so rustup know it's nightly

* fix: add `pyo3_backend` in GHA yml

* chore: name for `TODO`

* temp: not set `pyo3_backend` before find DSO

* fix: release linux with pyo3_backend
2023-03-14 10:57:13 +08:00
Weny Xu
296c6dfcbf feat: implement table flush (#1121)
* feat: add flush method for trait

* feat: implement flush via grpc

* chore: move table_dir/region_name/region_id to table crate

* chore: Update src/mito/src/table.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-13 20:10:37 +08:00
114 changed files with 2598 additions and 782 deletions

View File

@@ -2,7 +2,7 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id

View File

@@ -18,6 +18,9 @@ env:
CARGO_PROFILE: nightly
## FIXME(zyy17): Enable it after the tests are stabled.
DISABLE_RUN_TESTS: true
jobs:
build:
name: Build binary
@@ -32,15 +35,35 @@ jobs:
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: true
continue-on-error: false
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: true
continue-on-error: false
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: true
continue-on-error: false
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
@@ -98,13 +121,12 @@ jobs:
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
- name: Compile Python 3.10.10 from source for Aarch64
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh
export PYO3_CROSS_LIB_DIR=${PWD}/python310-aarch64/lib
echo $PYO3_CROSS_LIB_DIR
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
@@ -116,10 +138,55 @@ jobs:
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
shell: bash
@@ -140,9 +207,98 @@ jobs:
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
docker:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64-pyo3
path: amd64
- name: Unzip the amd64 artifacts
run: |
cd amd64
tar xvf greptime-linux-amd64-pyo3.tgz
rm greptime-linux-amd64-pyo3.tgz
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64-pyo3
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
cd arm64
tar xvf greptime-linux-arm64-pyo3.tgz
rm greptime-linux-arm64-pyo3.tgz
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
needs: [build]
# Release artifacts only when all the artifacts are built successfully.
needs: [build,docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
@@ -183,15 +339,23 @@ jobs:
files: |
**/greptime-*
docker:
name: Build docker image
needs: [build]
docker-push-uhub:
name: Push docker image to UCloud Container Registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
continue-on-error: true
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
@@ -199,12 +363,6 @@ jobs:
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
@@ -220,63 +378,9 @@ jobs:
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64
path: amd64
- name: Unzip the amd64 artifacts
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
cd amd64
tar xvf greptime-linux-amd64.tgz
rm greptime-linux-amd64.tgz
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
cd arm64
tar xvf greptime-linux-arm64.tgz
rm greptime-linux-arm64.tgz
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
docker buildx imagetools create \
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}

151
Cargo.lock generated
View File

@@ -190,9 +190,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]]
name = "arrow"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3724c874f1517cf898cd1c3ad18ab5071edf893c48e73139ab1e16cf0f2affe"
checksum = "f410d3907b6b3647b9e7bca4551274b2e3d716aa940afb67b7287257401da921"
dependencies = [
"ahash 0.8.3",
"arrow-arith",
@@ -214,9 +214,9 @@ dependencies = [
[[package]]
name = "arrow-arith"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e958823b8383ca14d0a2e973de478dd7674cd9f72837f8c41c132a0fda6a4e5e"
checksum = "f87391cf46473c9bc53dab68cb8872c3a81d4dfd1703f1c8aa397dba9880a043"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -229,9 +229,9 @@ dependencies = [
[[package]]
name = "arrow-array"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db670eab50e76654065b5aed930f4367101fcddcb2223802007d1e0b4d5a2579"
checksum = "d35d5475e65c57cffba06d0022e3006b677515f99b54af33a7cd54f6cdd4a5b5"
dependencies = [
"ahash 0.8.3",
"arrow-buffer",
@@ -245,9 +245,9 @@ dependencies = [
[[package]]
name = "arrow-buffer"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f0e01c931882448c0407bd32311a624b9f099739e94e786af68adc97016b5f2"
checksum = "68b4ec72eda7c0207727df96cf200f539749d736b21f3e782ece113e18c1a0a7"
dependencies = [
"half 2.2.1",
"num",
@@ -255,9 +255,9 @@ dependencies = [
[[package]]
name = "arrow-cast"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bf35d78836c93f80d9362f3ccb47ff5e2c5ecfc270ff42cdf1ef80334961d44"
checksum = "0a7285272c9897321dfdba59de29f5b05aeafd3cdedf104a941256d155f6d304"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -271,9 +271,9 @@ dependencies = [
[[package]]
name = "arrow-csv"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6aa7c2531d89d01fed8c469a9b1bf97132a0bdf70b4724fe4bbb4537a50880"
checksum = "981ee4e7f6a120da04e00d0b39182e1eeacccb59c8da74511de753c56b7fddf7"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -290,9 +290,9 @@ dependencies = [
[[package]]
name = "arrow-data"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea50db4d1e1e4c2da2bfdea7b6d2722eef64267d5ab680d815f7ae42428057f5"
checksum = "27cc673ee6989ea6e4b4e8c7d461f7e06026a096c8f0b1a7288885ff71ae1e56"
dependencies = [
"arrow-buffer",
"arrow-schema",
@@ -302,9 +302,9 @@ dependencies = [
[[package]]
name = "arrow-flight"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ad4c883d509d89f05b2891ad889729f17ab2191b5fd22b0cf3660a28cc40af5"
checksum = "bd16945f8f3be0f6170b8ced60d414e56239d91a16a3f8800bc1504bc58b2592"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -325,9 +325,9 @@ dependencies = [
[[package]]
name = "arrow-ipc"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4042fe6585155d1ec28a8e4937ec901a3ca7a19a22b9f6cd3f551b935cd84f5"
checksum = "e37b8b69d9e59116b6b538e8514e0ec63a30f08b617ce800d31cb44e3ef64c1a"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -339,9 +339,9 @@ dependencies = [
[[package]]
name = "arrow-json"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c907c4ab4f26970a3719dc06e78e8054a01d0c96da3664d23b941e201b33d2b"
checksum = "80c3fa0bed7cfebf6d18e46b733f9cb8a1cb43ce8e6539055ca3e1e48a426266"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -358,9 +358,9 @@ dependencies = [
[[package]]
name = "arrow-ord"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e131b447242a32129efc7932f58ed8931b42f35d8701c1a08f9f524da13b1d3c"
checksum = "d247dce7bed6a8d6a3c6debfa707a3a2f694383f0c692a39d736a593eae5ef94"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -372,9 +372,9 @@ dependencies = [
[[package]]
name = "arrow-row"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b591ef70d76f4ac28dd7666093295fece0e5f9298f49af51ea49c001e1635bb6"
checksum = "8d609c0181f963cea5c70fddf9a388595b5be441f3aa1d1cdbf728ca834bbd3a"
dependencies = [
"ahash 0.8.3",
"arrow-array",
@@ -387,9 +387,9 @@ dependencies = [
[[package]]
name = "arrow-schema"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb327717d87eb94be5eff3b0cb8987f54059d343ee5235abf7f143c85f54cfc8"
checksum = "64951898473bfb8e22293e83a44f02874d2257514d49cd95f9aa4afcff183fbc"
dependencies = [
"bitflags",
"serde",
@@ -397,9 +397,9 @@ dependencies = [
[[package]]
name = "arrow-select"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79d3c389d1cea86793934f31594f914c8547d82e91e3411d4833ad0aac3266a7"
checksum = "2a513d89c2e1ac22b28380900036cf1f3992c6443efc5e079de631dcf83c6888"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -410,9 +410,9 @@ dependencies = [
[[package]]
name = "arrow-string"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30ee67790496dd310ddbf5096870324431e89aa76453e010020ac29b1184d356"
checksum = "5288979b2705dae1114c864d73150629add9153b9b8f1d7ee3963db94c372ba5"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -477,6 +477,8 @@ dependencies = [
"pin-project-lite",
"tokio",
"xz2",
"zstd 0.11.2+zstd.1.5.2",
"zstd-safe 5.0.2+zstd.1.5.2",
]
[[package]]
@@ -1590,6 +1592,7 @@ name = "common-procedure"
version = "0.1.1"
dependencies = [
"async-trait",
"backon 0.4.0",
"common-error",
"common-runtime",
"common-telemetry",
@@ -2105,8 +2108,8 @@ dependencies = [
[[package]]
name = "datafusion"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2117,6 +2120,7 @@ dependencies = [
"chrono",
"dashmap",
"datafusion-common",
"datafusion-execution",
"datafusion-expr",
"datafusion-optimizer",
"datafusion-physical-expr",
@@ -2134,7 +2138,6 @@ dependencies = [
"object_store",
"parking_lot",
"parquet",
"paste",
"percent-encoding",
"pin-project-lite",
"rand",
@@ -2147,12 +2150,13 @@ dependencies = [
"url",
"uuid",
"xz2",
"zstd 0.12.3+zstd.1.5.2",
]
[[package]]
name = "datafusion-common"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow",
"chrono",
@@ -2162,22 +2166,38 @@ dependencies = [
"sqlparser",
]
[[package]]
name = "datafusion-execution"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"dashmap",
"datafusion-common",
"datafusion-expr",
"hashbrown 0.13.2",
"log",
"object_store",
"parking_lot",
"rand",
"tempfile",
"url",
]
[[package]]
name = "datafusion-expr"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"ahash 0.8.3",
"arrow",
"datafusion-common",
"log",
"sqlparser",
]
[[package]]
name = "datafusion-optimizer"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow",
"async-trait",
@@ -2193,8 +2213,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2212,8 +2232,8 @@ dependencies = [
"itertools",
"lazy_static",
"md-5",
"num-traits",
"paste",
"petgraph",
"rand",
"regex",
"sha2",
@@ -2223,8 +2243,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow",
"datafusion-common",
@@ -2234,8 +2254,8 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "19.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fad360df0132a2fcb264a7c07b2b02f0b1dfc644#fad360df0132a2fcb264a7c07b2b02f0b1dfc644"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow-schema",
"datafusion-common",
@@ -2307,6 +2327,7 @@ dependencies = [
"tower",
"tower-http",
"url",
"uuid",
]
[[package]]
@@ -4785,9 +4806,9 @@ dependencies = [
[[package]]
name = "parquet"
version = "33.0.0"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1b076829801167d889795cd1957989055543430fa1469cb1f6e32b789bfc764"
checksum = "7ac135ecf63ebb5f53dda0921b0b76d6048b3ef631a5f4760b9e8f863ff00cfa"
dependencies = [
"ahash 0.8.3",
"arrow-array",
@@ -4813,7 +4834,7 @@ dependencies = [
"thrift 0.17.0",
"tokio",
"twox-hash",
"zstd",
"zstd 0.12.3+zstd.1.5.2",
]
[[package]]
@@ -5406,9 +5427,9 @@ dependencies = [
[[package]]
name = "prost-build"
version = "0.11.6"
version = "0.11.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e"
checksum = "a24be1d23b4552a012093e1b93697b73d644ae9590e3253d878d0e77d411b614"
dependencies = [
"bytes",
"heck 0.4.1",
@@ -7201,9 +7222,9 @@ dependencies = [
[[package]]
name = "sqlparser"
version = "0.30.0"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db67dc6ef36edb658196c3fef0464a80b53dbbc194a904e81f9bd4190f9ecc5b"
checksum = "0366f270dbabb5cc2e4c88427dc4c08bba144f81e32fbd459a013f26a4d16aa0"
dependencies = [
"log",
"sqlparser_derive",
@@ -7553,6 +7574,7 @@ dependencies = [
"datafusion",
"datafusion-common",
"datafusion-expr",
"datafusion-physical-expr",
"datatypes",
"derive_builder 0.11.2",
"futures",
@@ -9097,13 +9119,32 @@ version = "1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zstd"
version = "0.11.2+zstd.1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
dependencies = [
"zstd-safe 5.0.2+zstd.1.5.2",
]
[[package]]
name = "zstd"
version = "0.12.3+zstd.1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806"
dependencies = [
"zstd-safe",
"zstd-safe 6.0.4+zstd.1.5.4",
]
[[package]]
name = "zstd-safe"
version = "5.0.2+zstd.1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"
dependencies = [
"libc",
"zstd-sys",
]
[[package]]

View File

@@ -50,28 +50,28 @@ edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = { version = "33.0" }
arrow-array = "33.0"
arrow-flight = "33.0"
arrow-schema = { version = "33.0", features = ["serde"] }
arrow = { version = "34.0" }
arrow-array = "34.0"
arrow-flight = "34.0"
arrow-schema = { version = "34.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
futures = "0.3"
futures-util = "0.3"
parquet = "33.0"
parquet = "34.0"
paste = "1.0"
prost = "0.11"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.30"
sqlparser = "0.32"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = "0.7"

View File

@@ -44,5 +44,7 @@ max_purge_tasks = 32
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = 'File'
# data_dir = '/tmp/greptimedb/procedure/'
# type = "File"
# data_dir = "/tmp/greptimedb/procedure/"
# max_retry_times = 3
# retry_delay = "500ms"

View File

@@ -114,3 +114,7 @@ max_purge_tasks = 32
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"
# # Procedure max retry time.
# max_retry_times = 3
# # Initial retry delay of procedures, increases exponentially
# retry_delay = "500ms"

View File

@@ -22,20 +22,27 @@ RUN apt-get -y update && \
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
apt-get install binutils-aarch64-linux-gnu
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
RUN chmod +x ./docker/aarch64/compile-python.sh && \
./docker/aarch64/compile-python.sh
COPY ./rust-toolchain.toml .
# Install rustup target for cross compiling.
RUN rustup target add aarch64-unknown-linux-gnu
COPY . .
# Update dependency, using separate `RUN` to separate cache
RUN cargo fetch
# This three env var is set in script, so I set it manually in dockerfile.
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
ENV PY_INSTALL_PATH=${PWD}/python_arm64_build
RUN chmod +x ./docker/aarch64/compile-python.sh && \
./docker/aarch64/compile-python.sh
# Install rustup target for cross compiling.
RUN rustup target add aarch64-unknown-linux-gnu
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
# Set the environment variable for cross compiling and compile it
# Build the project in release mode. Set Net fetch with git cli to true to avoid git error.
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
# Build the project in release mode.
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
alias python=python3 && \
CARGO_NET_GIT_FETCH_WITH_CLI=1 && \
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
# Exporting the binary to the clean image

View File

@@ -1,9 +1,36 @@
#!/usr/bin/env bash
set -e
# this script will download Python source code, compile it, and install it to /usr/local/lib
# then use this python to compile cross-compiled python for aarch64
ARCH=$1
PYTHON_VERSION=3.10.10
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
function download_python_source_code() {
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
tar -xvf Python-$PYTHON_VERSION.tgz
}
function compile_for_amd64_platform() {
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
echo "Compiling for amd64 platform..."
./configure \
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make install
}
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
tar -xvf Python-3.10.10.tgz
cd Python-3.10.10
# explain Python compile options here a bit:s
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
@@ -14,33 +41,47 @@ cd Python-3.10.10
# ac_cv_have_long_long_format=yes: target platform supports long long type
# disable-ipv6: disable ipv6 support, we don't need it in here
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
function compile_for_aarch64_platform() {
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
echo "Compiling for aarch64 platform..."
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo "LIBRARY_PATH: $LIBRARY_PATH"
echo "PATH: $PATH"
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make altinstall
}
# Main script starts here.
download_python_source_code
# Enter the python source code directory.
cd $PYTHON_SOURCE_DIR || exit 1
# Build local python first, then build cross-compiled python.
./configure \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
make
make install
cd ..
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
export PY_INSTALL_PATH=${PWD}/python_arm64_build
cd Python-3.10.10 && \
make clean && \
make distclean && \
alias python=python3 && \
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix=$PY_INSTALL_PATH --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
make && make altinstall && \
cd ..
compile_for_amd64_platform
# Clean the build directory.
make clean && make distclean
# Cross compile python for aarch64.
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
compile_for_aarch64_platform
fi

View File

@@ -1,6 +1,12 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip
RUN python3 -m pip install pyarrow
ARG TARGETARCH

View File

@@ -0,0 +1,196 @@
---
Feature Name: "Fault Tolerance for Region"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
Date: 2023-03-08
Author: "Luo Fucong <luofucong@greptime.com>"
---
Fault Tolerance for Region
----------------------
# Summary
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
# Motivation
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
# Details
## Background
Some backgrounds about region in distributed mode:
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
## Failure Detection
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
```protobuf
message HeartbeatRequest {
...
// Region stats on this node
repeated RegionStat region_stats = 6;
...
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
...
}
```
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
> The heartbeat interval is now hardcoded to five seconds.
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
To conclude, the failure detection pseudo-codes are like this:
```rust
// in Metasrv:
fn failure_detection() {
loop {
// passive detection
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
// find the datanodes that contains the failed regions
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
// active detection
for (datanode, regions) in datanodes_and_regions {
if !datanode.is_healthy(regions) {
do_failover(datanode, regions);
}
}
sleep(config.detect_interval);
}
}
```
Some design considerations:
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
So we detect some regions are not available. How to regain the availability back?
## Region Failover
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
```protobuf
message Region {
uint64 id = 1;
string name = 2;
Partition partition = 3;
message State {
Active,
Passive,
}
State state = 4;
map<string, string> attrs = 100;
}
```
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
A picture is worth a 1000 words:
```text
+-------------------------+
| Metasrv detects region |
| failure |
+-------------------------+
|
v
+----------------------------+
| Metasrv chooses candidates |
| to hold failed regions |
+----------------------------+
|
v
+-------------------------+ +-------------------------+
| Metasrv "passive" the |------>| Frontend rejects writes |
| failed regions | | to "passive" regions |
+-------------------------+ +-------------------------+
|
v
+--------------------------+ +---------------------------+
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
| regions from remote WAL | | "open" region requests |
+--------------------------+ +---------------------------+
|
|
| +-------------------------+ +-------------------------+
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
| failed regions | | restriction to regions |
+-------------------------+ +-------------------------+
|
v
+-------------------------+
| Region failover done, |
| HA regain |
+-------------------------+
```
# Alternatives
## The "Neon" Way
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
However, the main concerns we don't adopt this method are two-fold:
1. Remote WAL is planned to be quorum based, it can be efficiently written;
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
## No WAL, Replication instead
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
# Future Work
Some optimizations we could take:
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.

View File

@@ -204,6 +204,21 @@ pub enum Error {
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display(
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
catalog,
schema,
table,
source
))]
RegionStats {
catalog: String,
schema: String,
table: String,
#[snafu(backtrace)]
source: table::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -238,7 +253,8 @@ impl ErrorExt for Error {
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. } => source.status_code(),
| Error::DeregisterTable { source, .. }
| Error::RegionStats { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),

View File

@@ -18,6 +18,7 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::meta::{RegionStat, TableName};
use common_telemetry::info;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
@@ -225,10 +226,10 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
Ok(())
}
/// The number of regions in the datanode node.
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
let mut region_number: u64 = 0;
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<RegionStat>> {
let mut region_stats = Vec::new();
for catalog_name in catalog_manager.catalog_names()? {
let catalog =
catalog_manager
@@ -254,10 +255,28 @@ pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
table_info: &table_name,
})?;
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
region_stats.extend(
table
.region_stats()
.context(error::RegionStatsSnafu {
catalog: &catalog_name,
schema: &schema_name,
table: &table_name,
})?
.into_iter()
.map(|stat| RegionStat {
region_id: stat.region_id,
table_name: Some(TableName {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
..Default::default()
}),
);
}
}
}
Ok(region_number)
Ok(region_stats)
}

View File

@@ -19,8 +19,8 @@ use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
};
use arrow_flight::{FlightData, Ticket};
use common_error::prelude::*;
@@ -135,6 +135,13 @@ impl Database {
.await
}
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::FlushTable(expr)),
}))
.await
}
async fn do_get(&self, request: Request) -> Result<Output> {
let request = GreptimeRequest {
header: Some(RequestHeader {

View File

@@ -30,9 +30,39 @@ struct Command {
subcmd: SubCommand,
}
pub enum Application {
Datanode(datanode::Instance),
Frontend(frontend::Instance),
Metasrv(metasrv::Instance),
Standalone(standalone::Instance),
Cli(cli::Instance),
}
impl Application {
async fn run(&mut self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.run().await,
Application::Frontend(instance) => instance.run().await,
Application::Metasrv(instance) => instance.run().await,
Application::Standalone(instance) => instance.run().await,
Application::Cli(instance) => instance.run().await,
}
}
async fn stop(&self) -> Result<()> {
match self {
Application::Datanode(instance) => instance.stop().await,
Application::Frontend(instance) => instance.stop().await,
Application::Metasrv(instance) => instance.stop().await,
Application::Standalone(instance) => instance.stop().await,
Application::Cli(instance) => instance.stop().await,
}
}
}
impl Command {
async fn run(self) -> Result<()> {
self.subcmd.run().await
async fn build(self) -> Result<Application> {
self.subcmd.build().await
}
}
@@ -51,13 +81,28 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Application> {
match self {
SubCommand::Datanode(cmd) => cmd.run().await,
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
SubCommand::Standalone(cmd) => cmd.run().await,
SubCommand::Cli(cmd) => cmd.run().await,
SubCommand::Datanode(cmd) => {
let app = cmd.build().await?;
Ok(Application::Datanode(app))
}
SubCommand::Frontend(cmd) => {
let app = cmd.build().await?;
Ok(Application::Frontend(app))
}
SubCommand::Metasrv(cmd) => {
let app = cmd.build().await?;
Ok(Application::Metasrv(app))
}
SubCommand::Standalone(cmd) => {
let app = cmd.build().await?;
Ok(Application::Standalone(app))
}
SubCommand::Cli(cmd) => {
let app = cmd.build().await?;
Ok(Application::Cli(app))
}
}
}
}
@@ -104,13 +149,18 @@ async fn main() -> Result<()> {
common_telemetry::init_default_metrics_recorder();
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
let mut app = cmd.build().await?;
tokio::select! {
result = cmd.run() => {
result = app.run() => {
if let Err(err) = result {
error!(err; "Fatal error occurs!");
}
}
_ = tokio::signal::ctrl_c() => {
if let Err(err) = app.stop().await {
error!(err; "Fatal error occurs!");
}
info!("Goodbye!");
}
}

View File

@@ -17,10 +17,24 @@ mod helper;
mod repl;
use clap::Parser;
use repl::Repl;
pub use repl::Repl;
use crate::error::Result;
pub struct Instance {
repl: Repl,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.repl.run().await
}
pub async fn stop(&self) -> Result<()> {
Ok(())
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -28,8 +42,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.cmd.run().await
pub async fn build(self) -> Result<Instance> {
self.cmd.build().await
}
}
@@ -39,9 +53,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Attach(cmd) => cmd.run().await,
SubCommand::Attach(cmd) => cmd.build().await,
}
}
}
@@ -57,8 +71,8 @@ pub(crate) struct AttachCommand {
}
impl AttachCommand {
async fn run(self) -> Result<()> {
let mut repl = Repl::try_new(&self).await?;
repl.run().await
async fn build(self) -> Result<Instance> {
let repl = Repl::try_new(&self).await?;
Ok(Instance { repl })
}
}

View File

@@ -50,7 +50,7 @@ use crate::error::{
};
/// Captures the state of the repl, gathers commands and executes them one by one
pub(crate) struct Repl {
pub struct Repl {
/// Rustyline editor for interacting with user on command line
rl: Editor<RustylineHelper>,

View File

@@ -21,9 +21,26 @@ use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::toml_loader;
pub struct Instance {
datanode: Datanode,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.datanode.start().await.context(StartDatanodeSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.datanode
.shutdown()
.await
.context(ShutdownDatanodeSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -31,8 +48,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -42,9 +59,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -72,19 +89,16 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
logging::info!("Datanode start command: {:#?}", self);
let opts: DatanodeOptions = self.try_into()?;
logging::info!("Datanode options: {:#?}", opts);
Datanode::new(opts)
.await
.context(StartDatanodeSnafu)?
.start()
.await
.context(StartDatanodeSnafu)
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
Ok(Instance { datanode })
}
}
@@ -138,7 +152,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(wal_dir) = cmd.wal_dir {
opts.wal.dir = wal_dir;
}
if let Some(procedure_dir) = cmd.procedure_dir {
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
}

View File

@@ -26,18 +26,42 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(backtrace)]
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to start meta server, source: {}", source))]
StartMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
path: String,
@@ -137,7 +161,11 @@ impl ErrorExt for Error {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments

View File

@@ -19,7 +19,7 @@ use common_base::Plugins;
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::{FrontendInstance, Instance};
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
@@ -34,6 +34,26 @@ use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::toml_loader;
pub struct Instance {
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.frontend
.start()
.await
.context(error::StartFrontendSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(error::ShutdownFrontendSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -41,8 +61,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -52,9 +72,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -90,11 +110,11 @@ pub struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let opts: FrontendOptions = self.try_into()?;
let mut instance = Instance::try_new_distributed(&opts, plugins.clone())
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
.await
.context(error::StartFrontendSnafu)?;
@@ -103,7 +123,7 @@ impl StartCommand {
.await
.context(error::StartFrontendSnafu)?;
instance.start().await.context(error::StartFrontendSnafu)
Ok(Instance { frontend: instance })
}
}

View File

@@ -14,13 +14,33 @@
use clap::Parser;
use common_telemetry::{info, logging, warn};
use meta_srv::bootstrap;
use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::{Error, Result};
use crate::{error, toml_loader};
pub struct Instance {
instance: MetaSrvInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
self.instance
.start()
.await
.context(error::StartMetaServerSnafu)
}
pub async fn stop(&self) -> Result<()> {
self.instance
.shutdown()
.await
.context(error::ShutdownMetaServerSnafu)
}
}
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
@@ -28,8 +48,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -39,9 +59,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -63,16 +83,17 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
logging::info!("MetaSrv start command: {:#?}", self);
let opts: MetaSrvOptions = self.try_into()?;
logging::info!("MetaSrv options: {:#?}", opts);
bootstrap::bootstrap_meta_srv(opts)
let instance = MetaSrvInstance::new(opts)
.await
.context(error::StartMetaServerSnafu)
.context(error::BuildMetaServerSnafu)?;
Ok(Instance { instance })
}
}

View File

@@ -36,7 +36,10 @@ use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
use crate::error::{
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFrontendSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::toml_loader;
@@ -47,8 +50,8 @@ pub struct Command {
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
pub async fn build(self) -> Result<Instance> {
self.subcmd.build().await
}
}
@@ -58,9 +61,9 @@ enum SubCommand {
}
impl SubCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
SubCommand::Start(cmd) => cmd.build().await,
}
}
}
@@ -133,6 +136,40 @@ impl StandaloneOptions {
}
}
pub struct Instance {
datanode: Datanode,
frontend: FeInstance,
}
impl Instance {
pub async fn run(&mut self) -> Result<()> {
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
self.datanode
.start_instance()
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
self.frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(ShutdownFrontendSnafu)?;
self.datanode
.shutdown_instance()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
Ok(())
}
}
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
@@ -164,7 +201,7 @@ struct StartCommand {
}
impl StartCommand {
async fn run(self) -> Result<()> {
async fn build(self) -> Result<Instance> {
let enable_memory_catalog = self.enable_memory_catalog;
let config_file = self.config_file.clone();
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
@@ -184,25 +221,18 @@ impl StartCommand {
fe_opts, dn_opts
);
let mut datanode = Datanode::new(dn_opts.clone())
let datanode = Datanode::new(dn_opts.clone())
.await
.context(StartDatanodeSnafu)?;
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
datanode
.start_instance()
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
frontend
.build_servers(&fe_opts, plugins)
.await
.context(StartFrontendSnafu)?;
frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
Ok(Instance { datanode, frontend })
}
}

View File

@@ -12,17 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod from_unixtime;
use from_unixtime::FromUnixtimeFunction;
use crate::scalars::function_registry::FunctionRegistry;
pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(FromUnixtimeFunction::default()));
}
pub fn register(_registry: &FunctionRegistry) {}
}

View File

@@ -1,133 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! from_unixtime function.
/// TODO(dennis) It can be removed after we upgrade datafusion.
use std::fmt;
use std::sync::Arc;
use common_query::error::{
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
use snafu::ResultExt;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct FromUnixtimeFunction;
const NAME: &str = "from_unixtime";
impl Function for FromUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_millisecond_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(
1,
vec![ConcreteDataType::int64_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
match columns[0].data_type() {
ConcreteDataType::Int64(_) => {
let array = columns[0].to_arrow_array();
// Our timestamp vector's time unit is millisecond
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
.context(ArrowComputeSnafu)?;
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
Ok(Arc::new(
TimestampMillisecondVector::try_from_arrow_array(
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
typ: ArrowDatatype::Int64,
})?,
)
.context(IntoVectorSnafu {
data_type: arrow_datatype.clone(),
})?,
))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for FromUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FROM_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::value::Value;
use datatypes::vectors::Int64Vector;
use super::*;
#[test]
fn test_from_unixtime() {
let f = FromUnixtimeFunction::default();
assert_eq!("from_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::int64_datatype()]
));
let times = vec![Some(1494410783), None, Some(1494410983)];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(3, vector.len());
for (i, t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), t.unwrap() * 1000);
}
_ => unreachable!(),
}
}
}
}

View File

@@ -14,6 +14,7 @@ object-store = { path = "../../object-store" }
serde.workspace = true
serde_json = "1.0"
smallvec = "1"
backon = "0.4.0"
snafu.workspace = true
tokio.workspace = true
uuid.workspace = true

View File

@@ -97,6 +97,16 @@ pub enum Error {
source: Arc<Error>,
backtrace: Backtrace,
},
#[snafu(display(
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
procedure_id,
source
))]
RetryTimesExceeded {
source: Arc<Error>,
procedure_id: ProcedureId,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -111,6 +121,7 @@ impl ErrorExt for Error {
| Error::ListState { .. }
| Error::ReadState { .. }
| Error::FromJson { .. }
| Error::RetryTimesExceeded { .. }
| Error::RetryLater { .. }
| Error::WaitWatcher { .. } => StatusCode::Internal,
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {

View File

@@ -17,8 +17,10 @@ mod runner;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use async_trait::async_trait;
use backon::ExponentialBuilder;
use common_telemetry::logging;
use object_store::ObjectStore;
use snafu::ensure;
@@ -291,12 +293,16 @@ impl ManagerContext {
pub struct ManagerConfig {
/// Object store
pub object_store: ObjectStore,
pub max_retry_times: usize,
pub retry_delay: Duration,
}
/// A [ProcedureManager] that maintains procedure states locally.
pub struct LocalManager {
manager_ctx: Arc<ManagerContext>,
state_store: StateStoreRef,
max_retry_times: usize,
retry_delay: Duration,
}
impl LocalManager {
@@ -305,6 +311,8 @@ impl LocalManager {
LocalManager {
manager_ctx: Arc::new(ManagerContext::new()),
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
max_retry_times: config.max_retry_times,
retry_delay: config.retry_delay,
}
}
@@ -321,7 +329,11 @@ impl LocalManager {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: ExponentialBuilder::default()
.with_min_delay(self.retry_delay)
.with_max_times(self.max_retry_times),
store: ProcedureStore::new(self.state_store.clone()),
rolling_back: false,
};
let watcher = meta.state_receiver.clone();
@@ -543,6 +555,8 @@ mod tests {
let dir = create_temp_dir("register");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -562,6 +576,8 @@ mod tests {
let object_store = test_util::new_object_store(&dir);
let config = ManagerConfig {
object_store: object_store.clone(),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -606,6 +622,8 @@ mod tests {
let dir = create_temp_dir("submit");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -652,6 +670,8 @@ mod tests {
let dir = create_temp_dir("on_err");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);

View File

@@ -15,15 +15,15 @@
use std::sync::Arc;
use std::time::Duration;
use backon::{BackoffBuilder, ExponentialBuilder};
use common_telemetry::logging;
use tokio::time;
use crate::error::{ProcedurePanicSnafu, Result};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
use crate::store::ProcedureStore;
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
use crate::ProcedureState::Retrying;
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
#[derive(Debug)]
enum ExecResult {
@@ -108,7 +108,9 @@ pub(crate) struct Runner {
pub(crate) procedure: BoxedProcedure,
pub(crate) manager_ctx: Arc<ManagerContext>,
pub(crate) step: u32,
pub(crate) exponential_builder: ExponentialBuilder,
pub(crate) store: ProcedureStore,
pub(crate) rolling_back: bool,
}
impl Runner {
@@ -164,18 +166,56 @@ impl Runner {
provider: self.manager_ctx.clone(),
};
self.rolling_back = false;
self.execute_once_with_retry(&ctx).await;
}
async fn execute_once_with_retry(&mut self, ctx: &Context) {
let mut retry = self.exponential_builder.build();
let mut retry_times = 0;
loop {
match self.execute_once(&ctx).await {
ExecResult::Continue => (),
match self.execute_once(ctx).await {
ExecResult::Done | ExecResult::Failed => return,
ExecResult::Continue => (),
ExecResult::RetryLater => {
self.wait_on_err().await;
retry_times += 1;
if let Some(d) = retry.next() {
self.wait_on_err(d, retry_times).await;
} else {
assert!(self.meta.state().is_retrying());
if let Retrying { error } = self.meta.state() {
self.meta.set_state(ProcedureState::failed(Arc::new(
Error::RetryTimesExceeded {
source: error,
procedure_id: self.meta.id,
},
)))
}
return;
}
}
}
}
}
async fn rollback(&mut self, error: Arc<Error>) -> ExecResult {
if let Err(e) = self.rollback_procedure().await {
self.rolling_back = true;
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(error));
ExecResult::Failed
}
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
// if rolling_back, there is no need to execute again.
if self.rolling_back {
// We can definitely get the previous error here.
let state = self.meta.state();
let err = state.error().unwrap();
return self.rollback(err.clone()).await;
}
match self.procedure.execute(ctx).await {
Ok(status) => {
logging::debug!(
@@ -186,8 +226,11 @@ impl Runner {
status.need_persist(),
);
if status.need_persist() && self.persist_procedure().await.is_err() {
return ExecResult::RetryLater;
if status.need_persist() {
if let Err(err) = self.persist_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
return ExecResult::RetryLater;
}
}
match status {
@@ -196,7 +239,8 @@ impl Runner {
self.on_suspended(subprocedures).await;
}
Status::Done => {
if self.commit_procedure().await.is_err() {
if let Err(e) = self.commit_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
@@ -217,17 +261,12 @@ impl Runner {
);
if e.is_retry_later() {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
// Write rollback key so we can skip this procedure while recovering procedures.
if self.rollback_procedure().await.is_err() {
return ExecResult::RetryLater;
}
ExecResult::Failed
self.rollback(Arc::new(e)).await
}
}
}
@@ -261,7 +300,9 @@ impl Runner {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: self.exponential_builder.clone(),
store: self.store.clone(),
rolling_back: false,
};
// Insert the procedure. We already check the procedure existence before inserting
@@ -285,8 +326,16 @@ impl Runner {
});
}
async fn wait_on_err(&self) {
time::sleep(ERR_WAIT_DURATION).await;
/// Extend the retry time to wait for the next retry.
async fn wait_on_err(&self, d: Duration, i: u64) {
logging::info!(
"Procedure {}-{} retry for the {} times after {} millis",
self.procedure.type_name(),
self.meta.id,
i,
d.as_millis(),
);
time::sleep(d).await;
}
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
@@ -416,7 +465,9 @@ mod tests {
procedure,
manager_ctx: Arc::new(ManagerContext::new()),
step: 0,
exponential_builder: ExponentialBuilder::default(),
store,
rolling_back: false,
}
}
@@ -744,7 +795,7 @@ mod tests {
let res = runner.execute_once(&ctx).await;
assert!(res.is_retry_later(), "{res:?}");
assert!(meta.state().is_running());
assert!(meta.state().is_retrying());
let res = runner.execute_once(&ctx).await;
assert!(res.is_done(), "{res:?}");
@@ -752,6 +803,36 @@ mod tests {
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
}
#[tokio::test]
async fn test_execute_exceed_max_retry_later() {
let exec_fn =
|_| async { Err(Error::retry_later(MockError::new(StatusCode::Unexpected))) }.boxed();
let exceed_max_retry_later = ProcedureAdapter {
data: "exceed_max_retry_later".to_string(),
lock_key: LockKey::single("catalog.schema.table"),
exec_fn,
};
let dir = create_temp_dir("exceed_max_retry_later");
let meta = exceed_max_retry_later.new_meta(ROOT_ID);
let object_store = test_util::new_object_store(&dir);
let procedure_store = ProcedureStore::from(object_store.clone());
let mut runner = new_runner(
meta.clone(),
Box::new(exceed_max_retry_later),
procedure_store,
);
runner.exponential_builder = ExponentialBuilder::default()
.with_min_delay(Duration::from_millis(1))
.with_max_times(3);
// Run the runner and execute the procedure.
runner.execute_procedure_in_loop().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("Procedure retry exceeded max times"));
}
#[tokio::test]
async fn test_child_error() {
let mut times = 0;
@@ -819,7 +900,7 @@ mod tests {
// Replace the manager ctx.
runner.manager_ctx = manager_ctx;
// Run the runer and execute the procedure.
// Run the runner and execute the procedure.
runner.run().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("subprocedure failed"), "{err}");

View File

@@ -206,6 +206,8 @@ pub enum ProcedureState {
Running,
/// The procedure is finished.
Done,
/// The procedure is failed and can be retried.
Retrying { error: Arc<Error> },
/// The procedure is failed and cannot proceed anymore.
Failed { error: Arc<Error> },
}
@@ -216,6 +218,11 @@ impl ProcedureState {
ProcedureState::Failed { error }
}
/// Returns a [ProcedureState] with retrying state.
pub fn retrying(error: Arc<Error>) -> ProcedureState {
ProcedureState::Retrying { error }
}
/// Returns true if the procedure state is running.
pub fn is_running(&self) -> bool {
matches!(self, ProcedureState::Running)
@@ -231,10 +238,16 @@ impl ProcedureState {
matches!(self, ProcedureState::Failed { .. })
}
/// Returns true if the procedure state is retrying.
pub fn is_retrying(&self) -> bool {
matches!(self, ProcedureState::Retrying { .. })
}
/// Returns the error.
pub fn error(&self) -> Option<&Arc<Error>> {
match self {
ProcedureState::Failed { error } => Some(error),
ProcedureState::Retrying { error } => Some(error),
_ => None,
}
}

View File

@@ -33,6 +33,9 @@ pub async fn wait(watcher: &mut Watcher) -> Result<()> {
ProcedureState::Failed { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
}
ProcedureState::Retrying { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
}
}
}
}

View File

@@ -1,4 +1,3 @@
#![feature(int_roundings)]
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -26,6 +26,7 @@ use snafu::{OptionExt, ResultExt};
use crate::error;
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
use crate::util::div_ceil;
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct Timestamp {
@@ -143,7 +144,7 @@ impl Timestamp {
Some(Timestamp::new(value, unit))
} else {
let mul = unit.factor() / self.unit().factor();
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
Some(Timestamp::new(div_ceil(self.value, mul as i64), unit))
}
}

View File

@@ -17,6 +17,17 @@ pub fn current_time_millis() -> i64 {
chrono::Utc::now().timestamp_millis()
}
/// Port of rust unstable features `int_roundings`.
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
let d = this / rhs;
let r = this % rhs;
if r > 0 && rhs > 0 {
d + 1
} else {
d
}
}
#[cfg(test)]
mod tests {
use std::time::{self, SystemTime};
@@ -42,4 +53,10 @@ mod tests {
assert_eq!(datetime_std.hour(), datetime_now.hour());
assert_eq!(datetime_std.minute(), datetime_now.minute());
}
#[test]
fn test_div_ceil() {
let v0 = 9223372036854676001;
assert_eq!(9223372036854677, div_ceil(v0, 1000));
}
}

View File

@@ -64,6 +64,7 @@ tonic.workspace = true
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
url = "2.3.1"
uuid.workspace = true
[dev-dependencies]
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }

View File

@@ -77,6 +77,7 @@ impl Default for ObjectStoreConfig {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct WalConfig {
// wal directory
pub dir: String,
@@ -108,6 +109,7 @@ impl Default for WalConfig {
/// Options for table compaction
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
#[serde(default)]
pub struct CompactionConfig {
/// Max task number that can concurrently run.
pub max_inflight_tasks: usize,
@@ -149,11 +151,22 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
pub struct ProcedureConfig {
/// Storage config for procedure manager.
pub store: ObjectStoreConfig,
/// Max retry times of procedure.
pub max_retry_times: usize,
/// Initial retry delay of procedures, increases exponentially.
#[serde(with = "humantime_serde")]
pub retry_delay: Duration,
}
impl Default for ProcedureConfig {
fn default() -> ProcedureConfig {
ProcedureConfig::from_file_path("/tmp/greptimedb/procedure/".to_string())
ProcedureConfig {
store: ObjectStoreConfig::File(FileConfig {
data_dir: "/tmp/greptimedb/procedure/".to_string(),
}),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
}
}
}
@@ -161,6 +174,7 @@ impl ProcedureConfig {
pub fn from_file_path(path: String) -> ProcedureConfig {
ProcedureConfig {
store: ObjectStoreConfig::File(FileConfig { data_dir: path }),
..Default::default()
}
}
}
@@ -241,7 +255,7 @@ impl Datanode {
self.instance.clone()
}
async fn shutdown_instance(&self) -> Result<()> {
pub async fn shutdown_instance(&self) -> Result<()> {
self.instance.shutdown().await
}

View File

@@ -169,6 +169,13 @@ pub enum Error {
source: TableError,
},
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
FlushTable {
table_name: String,
#[snafu(backtrace)]
source: TableError,
},
#[snafu(display("Failed to start server, source: {}", source))]
StartServer {
#[snafu(backtrace)]
@@ -539,6 +546,7 @@ impl ErrorExt for Error {
source.status_code()
}
DropTable { source, .. } => source.status_code(),
FlushTable { source, .. } => source.status_code(),
Insert { source, .. } => source.status_code(),
Delete { source, .. } => source.status_code(),

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
use catalog::{region_number, CatalogManagerRef};
use catalog::{region_stats, CatalogManagerRef};
use common_telemetry::{error, info, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt;
@@ -106,11 +106,11 @@ impl HeartbeatTask {
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
common_runtime::spawn_bg(async move {
while running.load(Ordering::Acquire) {
let region_num = match region_number(&catalog_manager_clone).await {
Ok(region_num) => region_num as i64,
let (region_num, region_stats) = match region_stats(&catalog_manager_clone).await {
Ok(region_stats) => (region_stats.len() as i64, region_stats),
Err(e) => {
error!("failed to get region number, err: {e:?}");
-1
error!("failed to get region status, err: {e:?}");
(-1, vec![])
}
};
@@ -123,6 +123,7 @@ impl HeartbeatTask {
region_num,
..Default::default()
}),
region_stats,
..Default::default()
};

View File

@@ -460,7 +460,11 @@ pub(crate) async fn create_procedure_manager(
);
let object_store = new_object_store(&procedure_config.store).await?;
let manager_config = ManagerConfig { object_store };
let manager_config = ManagerConfig {
object_store,
max_retry_times: procedure_config.max_retry_times,
retry_delay: procedure_config.retry_delay,
};
Ok(Some(Arc::new(LocalManager::new(manager_config))))
}

View File

@@ -127,7 +127,7 @@ impl Instance {
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
DdlExpr::FlushTable(_) => todo!(),
DdlExpr::FlushTable(expr) => self.handle_flush_table(expr).await,
}
}
}

View File

@@ -163,14 +163,14 @@ impl Instance {
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
CopyTable::To(copy_table) => {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(copy_table.table_name(), query_ctx.clone())?;
let file_name = copy_table.file_name().to_string();
table_idents_to_full_name(&copy_table.table_name, query_ctx.clone())?;
let file_name = copy_table.file_name;
let req = CopyTableRequest {
catalog_name,
schema_name,
table_name,
file_name,
connection: copy_table.connection,
};
self.sql_handler

View File

@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr};
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, FlushTableExpr};
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
use common_query::Output;
use common_telemetry::info;
use session::context::QueryContext;
use snafu::prelude::*;
use table::requests::DropTableRequest;
use table::requests::{DropTableRequest, FlushTableRequest};
use crate::error::{
AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu,
@@ -82,6 +82,24 @@ impl Instance {
.execute(SqlRequest::DropTable(req), QueryContext::arc())
.await
}
pub(crate) async fn handle_flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
let table_name = if expr.table_name.trim().is_empty() {
None
} else {
Some(expr.table_name)
};
let req = FlushTableRequest {
catalog_name: expr.catalog_name,
schema_name: expr.schema_name,
table_name,
region_number: expr.region_id,
};
self.sql_handler()
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
.await
}
}
#[cfg(test)]
@@ -136,7 +154,6 @@ mod tests {
}
#[test]
fn test_create_column_schema() {
let column_def = ColumnDef {
name: "a".to_string(),

View File

@@ -39,6 +39,7 @@ mod copy_table_from;
mod create;
mod delete;
mod drop_table;
mod flush_table;
pub(crate) mod insert;
#[derive(Debug)]
@@ -48,6 +49,7 @@ pub enum SqlRequest {
CreateDatabase(CreateDatabaseRequest),
Alter(AlterTableRequest),
DropTable(DropTableRequest),
FlushTable(FlushTableRequest),
ShowDatabases(ShowDatabases),
ShowTables(ShowTables),
DescribeTable(DescribeTable),
@@ -116,6 +118,7 @@ impl SqlHandler {
})?;
describe_table(table).context(ExecuteSqlSnafu)
}
SqlRequest::FlushTable(req) => self.flush_table(req).await,
};
if let Err(e) = &result {
error!(e; "{query_ctx}");

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::pin::Pin;
use common_query::physical_plan::SessionContext;
@@ -22,16 +23,54 @@ use datafusion::parquet::basic::{Compression, Encoding};
use datafusion::parquet::file::properties::WriterProperties;
use datafusion::physical_plan::RecordBatchStream;
use futures::TryStreamExt;
use object_store::services::Fs as Builder;
use object_store::{ObjectStore, ObjectStoreBuilder};
use object_store::ObjectStore;
use snafu::ResultExt;
use table::engine::TableReference;
use table::requests::CopyTableRequest;
use url::{ParseError, Url};
use super::copy_table_from::{build_fs_backend, build_s3_backend, S3_SCHEMA};
use crate::error::{self, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
fn build_backend(
&self,
url: &str,
connection: HashMap<String, String>,
) -> Result<(ObjectStore, String)> {
let result = Url::parse(url);
match result {
Ok(url) => {
let host = url.host_str();
let schema = url.scheme();
let path = url.path();
match schema.to_uppercase().as_str() {
S3_SCHEMA => {
let object_store = build_s3_backend(host, "/", connection)?;
Ok((object_store, path.to_string()))
}
_ => error::UnsupportedBackendProtocolSnafu {
protocol: schema.to_string(),
}
.fail(),
}
}
Err(ParseError::RelativeUrlWithoutBase) => {
let object_store = build_fs_backend("/")?;
Ok((object_store, url.to_string()))
}
Err(err) => Err(error::Error::InvalidUrl {
url: url.to_string(),
source: err,
}),
}
}
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
let table_ref = TableReference {
catalog: &req.catalog_name,
@@ -52,13 +91,9 @@ impl SqlHandler {
.context(error::TableScanExecSnafu)?;
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
let accessor = Builder::default()
.root("/")
.build()
.context(error::BuildBackendSnafu)?;
let object_store = ObjectStore::new(accessor).finish();
let (object_store, file_name) = self.build_backend(&req.file_name, req.connection)?;
let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
let mut parquet_writer = ParquetWriter::new(file_name, stream, object_store);
// TODO(jiachun):
// For now, COPY is implemented synchronously.
// When copying large table, it will be blocked for a long time.

View File

@@ -34,7 +34,7 @@ use url::{ParseError, Url};
use crate::error::{self, Result};
use crate::sql::SqlHandler;
const S3_SCHEMA: &str = "S3";
pub const S3_SCHEMA: &str = "S3";
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
@@ -165,13 +165,10 @@ impl DataSource {
Source::Dir
};
let accessor = Fs::default()
.root(&path)
.build()
.context(error::BuildBackendSnafu)?;
let object_store = build_fs_backend(&path)?;
Ok(DataSource {
object_store: ObjectStore::new(accessor).finish(),
object_store,
source,
path,
regex,
@@ -184,59 +181,6 @@ impl DataSource {
}
}
fn build_s3_backend(
host: Option<&str>,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
if let Some(bucket) = host {
builder.bucket(bucket);
}
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
let accessor = builder.build().context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}
fn from_url(
url: Url,
regex: Option<Regex>,
@@ -257,7 +201,7 @@ impl DataSource {
};
let object_store = match schema.to_uppercase().as_str() {
S3_SCHEMA => DataSource::build_s3_backend(host, &dir, connection)?,
S3_SCHEMA => build_s3_backend(host, &dir, connection)?,
_ => {
return error::UnsupportedBackendProtocolSnafu {
protocol: schema.to_string(),
@@ -348,6 +292,68 @@ impl DataSource {
}
}
pub fn build_s3_backend(
host: Option<&str>,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
if let Some(bucket) = host {
builder.bucket(bucket);
}
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
let accessor = builder.build().context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let accessor = Fs::default()
.root(root)
.build()
.context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}
#[cfg(test)]
mod tests {

View File

@@ -0,0 +1,78 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_query::Output;
use snafu::{OptionExt, ResultExt};
use table::engine::TableReference;
use table::requests::FlushTableRequest;
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
if let Some(table) = &req.table_name {
self.flush_table_inner(
&req.catalog_name,
&req.schema_name,
table,
req.region_number,
)
.await?;
} else {
let schema = self
.catalog_manager
.schema(&req.catalog_name, &req.schema_name)
.context(CatalogSnafu)?
.context(DatabaseNotFoundSnafu {
catalog: &req.catalog_name,
schema: &req.schema_name,
})?;
let all_table_names = schema.table_names().context(CatalogSnafu)?;
futures::future::join_all(all_table_names.iter().map(|table| {
self.flush_table_inner(
&req.catalog_name,
&req.schema_name,
table,
req.region_number,
)
}))
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
}
Ok(Output::AffectedRows(0))
}
async fn flush_table_inner(
&self,
catalog: &str,
schema: &str,
table: &str,
region: Option<u32>,
) -> Result<()> {
let table_ref = TableReference {
catalog,
schema,
table,
};
let full_table_name = table_ref.to_string();
let table = self.get_table(&table_ref)?;
table.flush(region).await.context(error::FlushTableSnafu {
table_name: full_table_name,
})
}
}

View File

@@ -12,11 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env;
use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util;
use common_telemetry::logging;
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
use query::parser::{QueryLanguageParser, QueryStatement};
@@ -797,6 +799,45 @@ async fn test_execute_copy_to() {
assert!(matches!(output, Output::AffectedRows(2)));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_to_s3() {
logging::init_default_ut_logging();
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
if !bucket.is_empty() {
let instance = setup_test_instance("test_execute_copy_to_s3").await;
// setups
execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
.await;
let output = execute_sql(
&instance,
r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
('host2', 88.8, 333.3, 1655276558000)
"#,
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
let key = env::var("GT_S3_ACCESS_KEY").unwrap();
let url =
env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
let root = uuid::Uuid::new_v4().to_string();
// exports
let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
let output = execute_sql(&instance, &copy_to_stmt).await;
assert!(matches!(output, Output::AffectedRows(2)));
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_from() {
let instance = setup_test_instance("test_execute_copy_from").await;
@@ -882,6 +923,106 @@ async fn test_execute_copy_from() {
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_from_s3() {
logging::init_default_ut_logging();
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
if !bucket.is_empty() {
let instance = setup_test_instance("test_execute_copy_from_s3").await;
// setups
execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
.await;
let output = execute_sql(
&instance,
r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
('host2', 88.8, 333.3, 1655276558000)
"#,
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
// export
let root = uuid::Uuid::new_v4().to_string();
let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
let key = env::var("GT_S3_ACCESS_KEY").unwrap();
let url =
env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
logging::info!("Copy table to s3: {}", copy_to_stmt);
let output = execute_sql(&instance, &copy_to_stmt).await;
assert!(matches!(output, Output::AffectedRows(2)));
struct Test<'a> {
sql: &'a str,
table_name: &'a str,
}
let tests = [
Test {
sql: &format!(
"Copy with_filename FROM 's3://{}/{}/export/demo.parquet_1_2'",
bucket, root
),
table_name: "with_filename",
},
Test {
sql: &format!("Copy with_path FROM 's3://{}/{}/export/'", bucket, root),
table_name: "with_path",
},
Test {
sql: &format!(
"Copy with_pattern FROM 's3://{}/{}/export/' WITH (PATTERN = 'demo.*')",
bucket, root
),
table_name: "with_pattern",
},
];
for test in tests {
// import
execute_sql(
&instance,
&format!(
"create table {}(host string, cpu double, memory double, ts timestamp time index);",
test.table_name
),
)
.await;
let sql = format!(
"{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')",
test.sql, key_id, key, url
);
logging::info!("Running sql: {}", sql);
let output = execute_sql(&instance, &sql).await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = execute_sql(
&instance,
&format!("select * from {} order by ts", test.table_name),
)
.await;
let expected = "\
+-------+------+--------+---------------------+
| host | cpu | memory | ts |
+-------+------+--------+---------------------+
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
+-------+------+--------+---------------------+"
.to_string();
check_output_stream(output, expected).await;
}
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_create_by_procedure() {
common_telemetry::init_default_ut_logging();

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_query::Output;
@@ -64,6 +65,8 @@ impl MockInstance {
store: ObjectStoreConfig::File(FileConfig {
data_dir: procedure_dir.path().to_str().unwrap().to_string(),
}),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
});
let instance = Instance::with_mock_meta_client(&opts).await.unwrap();

View File

@@ -677,7 +677,7 @@ pub fn check_permission(
validate_param(delete.table_name(), query_ctx)?;
}
Statement::Copy(stmd) => match stmd {
CopyTable::To(copy_table_to) => validate_param(copy_table_to.table_name(), query_ctx)?,
CopyTable::To(copy_table_to) => validate_param(&copy_table_to.table_name, query_ctx)?,
CopyTable::From(copy_table_from) => {
validate_param(&copy_table_from.table_name, query_ctx)?
}

View File

@@ -19,8 +19,8 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::{
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, InsertRequest,
TableId,
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropTableExpr, FlushTableExpr,
InsertRequest, TableId,
};
use async_trait::async_trait;
use catalog::helper::{SchemaKey, SchemaValue};
@@ -39,7 +39,7 @@ use meta_client::client::MetaClient;
use meta_client::rpc::router::DeleteRequest as MetaDeleteRequest;
use meta_client::rpc::{
CompareAndPutRequest, CreateRequest as MetaCreateRequest, Partition as MetaPartition,
RouteResponse, TableName,
RouteRequest, RouteResponse, TableName,
};
use partition::partition::{PartitionBound, PartitionDef};
use query::error::QueryExecutionSnafu;
@@ -259,6 +259,61 @@ impl DistInstance {
Ok(Output::AffectedRows(1))
}
async fn flush_table(&self, table_name: TableName, region_id: Option<u32>) -> Result<Output> {
let _ = self
.catalog_manager
.table(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
)
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
table_name: table_name.to_string(),
})?;
let route_response = self
.meta_client
.route(RouteRequest {
table_names: vec![table_name.clone()],
})
.await
.context(RequestMetaSnafu)?;
let expr = FlushTableExpr {
catalog_name: table_name.catalog_name.clone(),
schema_name: table_name.schema_name.clone(),
table_name: table_name.table_name.clone(),
region_id,
};
for table_route in &route_response.table_routes {
let should_send_rpc = table_route.region_routes.iter().any(|route| {
if let Some(region_id) = region_id {
region_id == route.region.id as u32
} else {
true
}
});
if !should_send_rpc {
continue;
}
for datanode in table_route.find_leaders() {
debug!("Flushing table {table_name} on Datanode {datanode:?}");
let client = self.datanode_clients.get_client(&datanode).await;
let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
client
.flush_table(expr.clone())
.await
.context(RequestDatanodeSnafu)?;
}
}
Ok(Output::AffectedRows(0))
}
async fn handle_statement(
&self,
stmt: Statement,

View File

@@ -57,7 +57,11 @@ impl GrpcQueryHandler for DistInstance {
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
self.drop_table(table_name).await
}
DdlExpr::FlushTable(_) => todo!(),
DdlExpr::FlushTable(expr) => {
let table_name =
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
self.flush_table(table_name, expr.region_id).await
}
}
}
}

View File

@@ -91,14 +91,15 @@ mod test {
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::{
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, InsertRequest,
QueryRequest,
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
InsertRequest, QueryRequest,
};
use catalog::helper::{TableGlobalKey, TableGlobalValue};
use common_query::Output;
use common_recordbatch::RecordBatches;
use query::parser::QueryLanguageParser;
use session::context::QueryContext;
use tests::{has_parquet_file, test_region_dir};
use super::*;
use crate::table::DistTable;
@@ -352,6 +353,108 @@ CREATE TABLE {table_name} (
test_insert_and_query_on_auto_created_table(instance).await
}
#[tokio::test(flavor = "multi_thread")]
async fn test_distributed_flush_table() {
common_telemetry::init_default_ut_logging();
let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
let data_tmp_dirs = instance.data_tmp_dirs();
let frontend = instance.frontend.as_ref();
let table_name = "my_dist_table";
let sql = format!(
r"
CREATE TABLE {table_name} (
a INT,
ts TIMESTAMP,
TIME INDEX (ts)
) PARTITION BY RANGE COLUMNS(a) (
PARTITION r0 VALUES LESS THAN (10),
PARTITION r1 VALUES LESS THAN (20),
PARTITION r2 VALUES LESS THAN (50),
PARTITION r3 VALUES LESS THAN (MAXVALUE),
)"
);
create_table(frontend, sql).await;
test_insert_and_query_on_existing_table(frontend, table_name).await;
flush_table(frontend, "greptime", "public", table_name, None).await;
// Wait for previous task finished
flush_table(frontend, "greptime", "public", table_name, None).await;
let table_id = 1024;
let table = instance
.frontend
.catalog_manager()
.table("greptime", "public", table_name)
.await
.unwrap()
.unwrap();
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
let TableGlobalValue { regions_id_map, .. } = table
.table_global_value(&TableGlobalKey {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: table_name.to_string(),
})
.await
.unwrap()
.unwrap();
let region_to_dn_map = regions_id_map
.iter()
.map(|(k, v)| (v[0], *k))
.collect::<HashMap<u32, u64>>();
for (region, dn) in region_to_dn_map.iter() {
// data_tmp_dirs -> dn: 1..4
let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
let region_dir = test_region_dir(
data_tmp_dir.path().to_str().unwrap(),
"greptime",
"public",
table_id,
*region,
);
has_parquet_file(&region_dir);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_standalone_flush_table() {
common_telemetry::init_default_ut_logging();
let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
let instance = &standalone.instance;
let data_tmp_dir = standalone.data_tmp_dir();
let table_name = "my_table";
let sql = format!("CREATE TABLE {table_name} (a INT, ts TIMESTAMP, TIME INDEX (ts))");
create_table(instance, sql).await;
test_insert_and_query_on_existing_table(instance, table_name).await;
let table_id = 1024;
let region_id = 0;
let region_dir = test_region_dir(
data_tmp_dir.path().to_str().unwrap(),
"greptime",
"public",
table_id,
region_id,
);
assert!(!has_parquet_file(&region_dir));
flush_table(instance, "greptime", "public", "my_table", None).await;
// Wait for previous task finished
flush_table(instance, "greptime", "public", "my_table", None).await;
assert!(has_parquet_file(&region_dir));
}
async fn create_table(frontend: &Instance, sql: String) {
let request = Request::Query(QueryRequest {
query: Some(Query::Sql(sql)),
@@ -360,6 +463,26 @@ CREATE TABLE {table_name} (
assert!(matches!(output, Output::AffectedRows(0)));
}
async fn flush_table(
frontend: &Instance,
catalog_name: &str,
schema_name: &str,
table_name: &str,
region_id: Option<u32>,
) {
let request = Request::Ddl(DdlRequest {
expr: Some(DdlExpr::FlushTable(FlushTableExpr {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
table_name: table_name.to_string(),
region_id,
})),
});
let output = query(frontend, request).await;
assert!(matches!(output, Output::AffectedRows(0)));
}
async fn test_insert_and_query_on_existing_table(instance: &Instance, table_name: &str) {
let insert = InsertRequest {
table_name: table_name.to_string(),

View File

@@ -152,6 +152,7 @@ impl Services {
let mut http_server = HttpServer::new(
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
http_options.clone(),
);
if let Some(user_provider) = user_provider.clone() {

View File

@@ -140,8 +140,11 @@ impl Table for DistTable {
Ok(Arc::new(dist_scan))
}
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
Ok(FilterPushDownType::Inexact)
fn supports_filters_pushdown(
&self,
filters: &[&Expr],
) -> table::Result<Vec<FilterPushDownType>> {
Ok(vec![FilterPushDownType::Inexact; filters.len()])
}
async fn alter(&self, context: AlterContext, request: &AlterTableRequest) -> table::Result<()> {

View File

@@ -34,6 +34,7 @@ use partition::route::TableRoutes;
use servers::grpc::GrpcServer;
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
use servers::Mode;
use table::engine::{region_name, table_dir};
use tonic::transport::Server;
use tower::service_fn;
@@ -56,11 +57,23 @@ pub(crate) struct MockDistributedInstance {
_guards: Vec<TestGuard>,
}
impl MockDistributedInstance {
pub fn data_tmp_dirs(&self) -> Vec<&TempDir> {
self._guards.iter().map(|g| &g._data_tmp_dir).collect()
}
}
pub(crate) struct MockStandaloneInstance {
pub(crate) instance: Arc<Instance>,
_guard: TestGuard,
}
impl MockStandaloneInstance {
pub fn data_tmp_dir(&self) -> &TempDir {
&self._guard._data_tmp_dir
}
}
pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandaloneInstance {
let (opts, guard) = create_tmp_dir_and_datanode_opts(test_name);
let datanode_instance = DatanodeInstance::new(&opts).await.unwrap();
@@ -269,3 +282,29 @@ pub(crate) async fn create_distributed_instance(test_name: &str) -> MockDistribu
_guards: test_guards,
}
}
pub fn test_region_dir(
dir: &str,
catalog_name: &str,
schema_name: &str,
table_id: u32,
region_id: u32,
) -> String {
let table_dir = table_dir(catalog_name, schema_name, table_id);
let region_name = region_name(table_id, region_id);
format!("{}/{}/{}", dir, table_dir, region_name)
}
pub fn has_parquet_file(sst_dir: &str) -> bool {
for entry in std::fs::read_dir(sst_dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
if !path.is_dir() {
assert_eq!("parquet", path.extension().unwrap());
return true;
}
}
false
}

View File

@@ -22,6 +22,7 @@ use api::v1::meta::store_server::StoreServer;
use etcd_client::Client;
use snafu::ResultExt;
use tokio::net::TcpListener;
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio_stream::wrappers::TcpListenerStream;
use tonic::transport::server::Router;
@@ -39,22 +40,70 @@ use crate::service::store::kv::ResettableKvStoreRef;
use crate::service::store::memory::MemStore;
use crate::{error, Result};
// Bootstrap the rpc server to serve incoming request
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
let meta_srv = make_meta_srv(opts.clone()).await?;
bootstrap_meta_srv_with_router(opts, router(meta_srv)).await
#[derive(Clone)]
pub struct MetaSrvInstance {
meta_srv: MetaSrv,
opts: MetaSrvOptions,
signal_sender: Option<Sender<()>>,
}
pub async fn bootstrap_meta_srv_with_router(opts: MetaSrvOptions, router: Router) -> Result<()> {
let listener = TcpListener::bind(&opts.bind_addr)
impl MetaSrvInstance {
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
let meta_srv = build_meta_srv(&opts).await?;
Ok(MetaSrvInstance {
meta_srv,
opts,
signal_sender: None,
})
}
pub async fn start(&mut self) -> Result<()> {
self.meta_srv.start().await;
let (tx, mut rx) = mpsc::channel::<()>(1);
self.signal_sender = Some(tx);
bootstrap_meta_srv_with_router(
&self.opts.bind_addr,
router(self.meta_srv.clone()),
&mut rx,
)
.await?;
Ok(())
}
pub async fn shutdown(&self) -> Result<()> {
if let Some(signal) = &self.signal_sender {
signal
.send(())
.await
.context(error::SendShutdownSignalSnafu)?;
}
self.meta_srv.shutdown();
Ok(())
}
}
pub async fn bootstrap_meta_srv_with_router(
bind_addr: &str,
router: Router,
signal: &mut Receiver<()>,
) -> Result<()> {
let listener = TcpListener::bind(bind_addr)
.await
.context(error::TcpBindSnafu {
addr: &opts.bind_addr,
})?;
.context(error::TcpBindSnafu { addr: bind_addr })?;
let listener = TcpListenerStream::new(listener);
router
.serve_with_incoming(listener)
.serve_with_incoming_shutdown(listener, async {
signal.recv().await;
})
.await
.context(error::StartGrpcSnafu)?;
@@ -72,7 +121,7 @@ pub fn router(meta_srv: MetaSrv) -> Router {
.add_service(admin::make_admin_service(meta_srv))
}
pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
pub async fn build_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
let (kv_store, election, lock) = if opts.use_memory_store {
(Arc::new(MemStore::new()) as _, None, None)
} else {
@@ -107,7 +156,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
};
let meta_srv = MetaSrvBuilder::new()
.options(opts)
.options(opts.clone())
.kv_store(kv_store)
.in_memory(in_memory)
.selector(selector)
@@ -117,6 +166,12 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
.build()
.await;
Ok(meta_srv)
}
pub async fn make_meta_srv(opts: &MetaSrvOptions) -> Result<MetaSrv> {
let meta_srv = build_meta_srv(opts).await?;
meta_srv.start().await;
Ok(meta_srv)

View File

@@ -15,12 +15,16 @@
use std::string::FromUtf8Error;
use common_error::prelude::*;
use tokio::sync::mpsc::error::SendError;
use tonic::codegen::http;
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to send shutdown signal"))]
SendShutdownSignal { source: SendError<()> },
#[snafu(display("Error stream request next is None"))]
StreamNone { backtrace: Backtrace },
@@ -312,6 +316,7 @@ impl ErrorExt for Error {
| Error::LeaseGrant { .. }
| Error::LockNotConfig { .. }
| Error::ExceededRetryLimit { .. }
| Error::SendShutdownSignal { .. }
| Error::StartGrpc { .. } => StatusCode::Internal,
Error::EmptyKey { .. }
| Error::MissingRequiredParameter { .. }

View File

@@ -31,13 +31,14 @@ use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{
ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder, ColumnId,
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, Region,
RegionDescriptorBuilder, RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
RegionDescriptorBuilder, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
};
use table::engine::{
region_id, region_name, table_dir, EngineContext, TableEngine, TableEngineProcedure,
TableReference,
};
use table::engine::{EngineContext, TableEngine, TableEngineProcedure, TableReference};
use table::error::TableOperationSnafu;
use table::metadata::{
TableId, TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion,
};
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion};
use table::requests::{
AlterKind, AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
};
@@ -59,22 +60,6 @@ pub const MITO_ENGINE: &str = "mito";
pub const INIT_COLUMN_ID: ColumnId = 0;
const INIT_TABLE_VERSION: TableVersion = 0;
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
#[inline]
fn region_name(table_id: TableId, n: u32) -> String {
format!("{table_id}_{n:010}")
}
#[inline]
fn region_id(table_id: TableId, n: u32) -> RegionId {
(u64::from(table_id) << 32) | u64::from(n)
}
#[inline]
fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> String {
format!("{catalog_name}/{schema_name}/{table_id}/")
}
/// [TableEngine] implementation.
///
/// About mito <https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo>.

View File

@@ -25,6 +25,7 @@ use store_api::storage::{
ColumnId, CreateOptions, EngineContext, OpenOptions, RegionDescriptorBuilder, RegionNumber,
StorageEngine,
};
use table::engine::{region_id, table_dir};
use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType};
use table::requests::CreateTableRequest;
@@ -146,7 +147,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
/// Creates regions for the table.
async fn on_create_regions(&mut self) -> Result<Status> {
let engine_ctx = EngineContext::default();
let table_dir = engine::table_dir(
let table_dir = table_dir(
&self.data.request.catalog_name,
&self.data.request.schema_name,
self.data.request.id,
@@ -203,7 +204,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
}
// We need to create that region.
let region_id = engine::region_id(self.data.request.id, *number);
let region_id = region_id(self.data.request.id, *number);
let region_desc = RegionDescriptorBuilder::default()
.id(region_id)
.name(region_name.clone())
@@ -234,7 +235,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
/// Writes metadata to the table manifest.
async fn on_write_table_manifest(&mut self) -> Result<Status> {
let table_dir = engine::table_dir(
let table_dir = table_dir(
&self.data.request.catalog_name,
&self.data.request.schema_name,
self.data.request.id,

View File

@@ -31,14 +31,28 @@ use storage::region::RegionImpl;
use storage::EngineImpl;
use store_api::manifest::Manifest;
use store_api::storage::ReadContext;
use table::requests::{AddColumnRequest, AlterKind, DeleteRequest, TableOptions};
use table::requests::{
AddColumnRequest, AlterKind, DeleteRequest, FlushTableRequest, TableOptions,
};
use super::*;
use crate::table::test_util;
use crate::table::test_util::{
new_insert_request, schema_for_test, TestEngineComponents, TABLE_NAME,
self, new_insert_request, schema_for_test, setup_table, TestEngineComponents, TABLE_NAME,
};
pub fn has_parquet_file(sst_dir: &str) -> bool {
for entry in std::fs::read_dir(sst_dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
if !path.is_dir() {
assert_eq!("parquet", path.extension().unwrap());
return true;
}
}
false
}
async fn setup_table_with_column_default_constraint() -> (TempDir, String, TableRef) {
let table_name = "test_default_constraint";
let column_schemas = vec![
@@ -752,3 +766,76 @@ async fn test_table_delete_rows() {
+-------+-----+--------+-------------------------+"
);
}
#[tokio::test]
async fn test_flush_table_all_regions() {
let TestEngineComponents {
table_ref: table,
dir,
..
} = test_util::setup_test_engine_and_table().await;
setup_table(table.clone()).await;
let table_id = 1u32;
let region_name = region_name(table_id, 0);
let table_info = table.table_info();
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
let region_dir = format!(
"{}/{}/{}",
dir.path().to_str().unwrap(),
table_dir,
region_name
);
assert!(!has_parquet_file(&region_dir));
// Trigger flush all region
table.flush(None).await.unwrap();
// Trigger again, wait for the previous task finished
table.flush(None).await.unwrap();
assert!(has_parquet_file(&region_dir));
}
#[tokio::test]
async fn test_flush_table_with_region_id() {
let TestEngineComponents {
table_ref: table,
dir,
..
} = test_util::setup_test_engine_and_table().await;
setup_table(table.clone()).await;
let table_id = 1u32;
let region_name = region_name(table_id, 0);
let table_info = table.table_info();
let table_dir = table_dir(&table_info.catalog_name, &table_info.schema_name, table_id);
let region_dir = format!(
"{}/{}/{}",
dir.path().to_str().unwrap(),
table_dir,
region_name
);
assert!(!has_parquet_file(&region_dir));
let req = FlushTableRequest {
region_number: Some(0),
..Default::default()
};
// Trigger flush all region
table.flush(req.region_number).await.unwrap();
// Trigger again, wait for the previous task finished
table.flush(req.region_number).await.unwrap();
assert!(has_parquet_file(&region_dir));
}

View File

@@ -47,7 +47,7 @@ use table::requests::{
AddColumnRequest, AlterKind, AlterTableRequest, DeleteRequest, InsertRequest,
};
use table::table::scan::SimpleTableScan;
use table::table::{AlterContext, Table};
use table::table::{AlterContext, RegionStat, Table};
use tokio::sync::Mutex;
use crate::error;
@@ -208,8 +208,8 @@ impl<R: Region> Table for MitoTable<R> {
Ok(Arc::new(SimpleTableScan::new(stream)))
}
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::error::Result<FilterPushDownType> {
Ok(FilterPushDownType::Inexact)
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> TableResult<Vec<FilterPushDownType>> {
Ok(vec![FilterPushDownType::Inexact; filters.len()])
}
/// Alter table changes the schemas of the table.
@@ -323,6 +323,25 @@ impl<R: Region> Table for MitoTable<R> {
Ok(rows_deleted)
}
async fn flush(&self, region_number: Option<RegionNumber>) -> TableResult<()> {
if let Some(region_number) = region_number {
if let Some(region) = self.regions.get(&region_number) {
region
.flush()
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
}
} else {
futures::future::try_join_all(self.regions.values().map(|region| region.flush()))
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
}
Ok(())
}
async fn close(&self) -> TableResult<()> {
futures::future::try_join_all(self.regions.values().map(|region| region.close()))
.await
@@ -331,6 +350,17 @@ impl<R: Region> Table for MitoTable<R> {
Ok(())
}
fn region_stats(&self) -> TableResult<Vec<RegionStat>> {
Ok(self
.regions
.values()
.map(|region| RegionStat {
region_id: region.id(),
disk_usage_bytes: region.disk_usage_bytes(),
})
.collect())
}
}
struct ChunkStream {

View File

@@ -20,7 +20,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::VectorRef;
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector, VectorRef};
use log_store::NoopLogStore;
use object_store::services::Fs as Builder;
use object_store::{ObjectStore, ObjectStoreBuilder};
@@ -30,7 +30,7 @@ use storage::EngineImpl;
use table::engine::{EngineContext, TableEngine};
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
use table::requests::{CreateTableRequest, InsertRequest, TableOptions};
use table::TableRef;
use table::{Table, TableRef};
use crate::config::EngineConfig;
use crate::engine::{MitoEngine, MITO_ENGINE};
@@ -178,3 +178,19 @@ pub async fn setup_mock_engine_and_table(
(mock_engine, table_engine, table, object_store, dir)
}
pub async fn setup_table(table: Arc<dyn Table>) {
let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2", "host3", "host4"]));
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2, 2, 1]));
columns_values.insert("host".to_string(), hosts.clone());
columns_values.insert("cpu".to_string(), cpus.clone());
columns_values.insert("memory".to_string(), memories.clone());
columns_values.insert("ts".to_string(), tss.clone());
let insert_req = new_insert_request("demo".to_string(), columns_values);
assert_eq!(4, table.insert(insert_req).await.unwrap());
}

View File

@@ -200,6 +200,10 @@ impl Region for MockRegion {
fn disk_usage_bytes(&self) -> u64 {
0
}
async fn flush(&self) -> Result<()> {
unimplemented!()
}
}
impl MockRegionInner {

View File

@@ -23,7 +23,7 @@ use datafusion::arrow::datatypes::{DataType, TimeUnit};
use datafusion::common::{DFField, DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics};
use datafusion::error::DataFusionError;
use datafusion::execution::context::TaskContext;
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNodeCore};
use datafusion::physical_expr::PhysicalSortExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
@@ -37,7 +37,7 @@ use futures::Stream;
use crate::extension_plan::Millisecond;
#[derive(Debug, Clone)]
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct EmptyMetric {
start: Millisecond,
end: Millisecond,
@@ -86,9 +86,9 @@ impl EmptyMetric {
}
}
impl UserDefinedLogicalNode for EmptyMetric {
fn as_any(&self) -> &dyn Any {
self as _
impl UserDefinedLogicalNodeCore for EmptyMetric {
fn name(&self) -> &str {
"EmptyMetric"
}
fn inputs(&self) -> Vec<&LogicalPlan> {
@@ -111,12 +111,8 @@ impl UserDefinedLogicalNode for EmptyMetric {
)
}
fn from_template(
&self,
_exprs: &[datafusion::prelude::Expr],
_inputs: &[LogicalPlan],
) -> Arc<dyn UserDefinedLogicalNode> {
Arc::new(self.clone())
fn from_template(&self, _expr: &[Expr], _inputs: &[LogicalPlan]) -> Self {
self.clone()
}
}

View File

@@ -24,7 +24,7 @@ use datafusion::arrow::record_batch::RecordBatch;
use datafusion::common::DFSchemaRef;
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::execution::context::TaskContext;
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
use datafusion::physical_expr::PhysicalSortExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
@@ -42,7 +42,7 @@ use crate::extension_plan::Millisecond;
/// This plan will try to align the input time series, for every timestamp between
/// `start` and `end` with step `interval`. Find in the `lookback` range if data
/// is missing at the given timestamp.
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct InstantManipulate {
start: Millisecond,
end: Millisecond,
@@ -52,9 +52,9 @@ pub struct InstantManipulate {
input: LogicalPlan,
}
impl UserDefinedLogicalNode for InstantManipulate {
fn as_any(&self) -> &dyn Any {
self as _
impl UserDefinedLogicalNodeCore for InstantManipulate {
fn name(&self) -> &str {
"InstantManipulate"
}
fn inputs(&self) -> Vec<&LogicalPlan> {
@@ -77,21 +77,17 @@ impl UserDefinedLogicalNode for InstantManipulate {
)
}
fn from_template(
&self,
_exprs: &[Expr],
inputs: &[LogicalPlan],
) -> Arc<dyn UserDefinedLogicalNode> {
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
assert!(!inputs.is_empty());
Arc::new(Self {
Self {
start: self.start,
end: self.end,
lookback_delta: self.lookback_delta,
interval: self.interval,
time_index_column: self.time_index_column.clone(),
input: inputs[0].clone(),
})
}
}
}

View File

@@ -22,7 +22,7 @@ use datafusion::arrow::compute;
use datafusion::common::{DFSchemaRef, Result as DataFusionResult, Statistics};
use datafusion::error::DataFusionError;
use datafusion::execution::context::TaskContext;
use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
use datafusion::physical_expr::PhysicalSortExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
@@ -43,7 +43,7 @@ use crate::extension_plan::Millisecond;
/// - bias sample's timestamp by offset
/// - sort the record batch based on timestamp column
/// - remove NaN values
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct SeriesNormalize {
offset: Millisecond,
time_index_column_name: String,
@@ -51,9 +51,9 @@ pub struct SeriesNormalize {
input: LogicalPlan,
}
impl UserDefinedLogicalNode for SeriesNormalize {
fn as_any(&self) -> &dyn Any {
self as _
impl UserDefinedLogicalNodeCore for SeriesNormalize {
fn name(&self) -> &str {
"SeriesNormalize"
}
fn inputs(&self) -> Vec<&LogicalPlan> {
@@ -76,18 +76,14 @@ impl UserDefinedLogicalNode for SeriesNormalize {
)
}
fn from_template(
&self,
_exprs: &[datafusion::logical_expr::Expr],
inputs: &[LogicalPlan],
) -> Arc<dyn UserDefinedLogicalNode> {
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
assert!(!inputs.is_empty());
Arc::new(Self {
Self {
offset: self.offset,
time_index_column_name: self.time_index_column_name.clone(),
input: inputs[0].clone(),
})
}
}
}

View File

@@ -26,7 +26,7 @@ use datafusion::arrow::record_batch::RecordBatch;
use datafusion::common::{DFField, DFSchema, DFSchemaRef};
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::execution::context::TaskContext;
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
use datafusion::physical_expr::PhysicalSortExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
@@ -42,7 +42,7 @@ use crate::range_array::RangeArray;
///
/// This plan will "fold" time index and value columns into [RangeArray]s, and truncate
/// other columns to the same length with the "folded" [RangeArray] column.
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct RangeManipulate {
start: Millisecond,
end: Millisecond,
@@ -137,9 +137,9 @@ impl RangeManipulate {
}
}
impl UserDefinedLogicalNode for RangeManipulate {
fn as_any(&self) -> &dyn Any {
self as _
impl UserDefinedLogicalNodeCore for RangeManipulate {
fn name(&self) -> &str {
"RangeManipulate"
}
fn inputs(&self) -> Vec<&LogicalPlan> {
@@ -162,14 +162,10 @@ impl UserDefinedLogicalNode for RangeManipulate {
)
}
fn from_template(
&self,
_exprs: &[Expr],
inputs: &[LogicalPlan],
) -> Arc<dyn UserDefinedLogicalNode> {
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
assert!(!inputs.is_empty());
Arc::new(Self {
Self {
start: self.start,
end: self.end,
interval: self.interval,
@@ -178,7 +174,7 @@ impl UserDefinedLogicalNode for RangeManipulate {
value_columns: self.value_columns.clone(),
input: inputs[0].clone(),
output_schema: self.output_schema.clone(),
})
}
}
}

View File

@@ -23,7 +23,7 @@ use datafusion::arrow::record_batch::RecordBatch;
use datafusion::common::DFSchemaRef;
use datafusion::error::Result as DataFusionResult;
use datafusion::execution::context::TaskContext;
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
use datafusion::physical_expr::PhysicalSortExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
@@ -33,15 +33,15 @@ use datafusion::physical_plan::{
use datatypes::arrow::compute;
use futures::{ready, Stream, StreamExt};
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct SeriesDivide {
tag_columns: Vec<String>,
input: LogicalPlan,
}
impl UserDefinedLogicalNode for SeriesDivide {
fn as_any(&self) -> &dyn Any {
self as _
impl UserDefinedLogicalNodeCore for SeriesDivide {
fn name(&self) -> &str {
"SeriesDivide"
}
fn inputs(&self) -> Vec<&LogicalPlan> {
@@ -60,17 +60,13 @@ impl UserDefinedLogicalNode for SeriesDivide {
write!(f, "PromSeriesDivide: tags={:?}", self.tag_columns)
}
fn from_template(
&self,
_exprs: &[Expr],
inputs: &[LogicalPlan],
) -> Arc<dyn UserDefinedLogicalNode> {
fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
assert!(!inputs.is_empty());
Arc::new(Self {
Self {
tag_columns: self.tag_columns.clone(),
input: inputs[0].clone(),
})
}
}
}

View File

@@ -957,7 +957,12 @@ impl PromPlanner {
.tag_columns
.iter()
.chain(self.ctx.time_index_column.iter())
.map(|col| Ok(DfExpr::Column(Column::from(col))));
.map(|col| {
Ok(DfExpr::Column(Column::new(
self.ctx.table_name.clone(),
col,
)))
});
// build computation exprs
let result_value_columns = self
@@ -1485,7 +1490,7 @@ mod test {
.unwrap();
let expected = String::from(
"Projection: lhs.tag_0, lhs.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\
"Projection: some_metric.tag_0, some_metric.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\
\n Inner Join: lhs.tag_0 = some_metric.tag_0, lhs.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
\n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\

View File

@@ -34,6 +34,8 @@ use datatypes::arrow::datatypes::DataType;
pub struct TypeConversionRule;
impl OptimizerRule for TypeConversionRule {
// TODO(ruihang): fix this warning
#[allow(deprecated)]
fn try_optimize(
&self,
plan: &LogicalPlan,

View File

@@ -157,7 +157,7 @@ mod test {
distinct: false, \
top: None, \
projection: \
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None })], \
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None, opt_replace: None })], \
into: None, \
from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: \"t1\", quote_style: None }]\
), \

View File

@@ -70,8 +70,11 @@ impl Table for MemTableWrapper {
self.inner.scan(projection, filters, limit).await
}
fn supports_filter_pushdown(&self, _filter: &Expr) -> table::Result<FilterPushDownType> {
Ok(FilterPushDownType::Exact)
fn supports_filters_pushdown(
&self,
filters: &[&Expr],
) -> table::Result<Vec<FilterPushDownType>> {
Ok(vec![FilterPushDownType::Exact; filters.len()])
}
}

View File

@@ -379,7 +379,8 @@ import greptime as gt
@copr(args=["number"], returns = ["number"], sql = "select * from numbers")
def test(number) -> vector[u32]:
return query.sql("select * from numbers")[0][0]
from greptime import query
return query().sql("select * from numbers")[0][0]
"#;
let script = script_engine
.compile(script, CompileContext::default())
@@ -438,7 +439,8 @@ from greptime import col
@copr(args=["number"], returns = ["number"], sql = "select * from numbers")
def test(number) -> vector[u32]:
return dataframe.filter(col("number")==col("number")).collect()[0][0]
from greptime import dataframe
return dataframe().filter(col("number")==col("number")).collect()[0][0]
"#;
let script = script_engine
.compile(script, CompileContext::default())

View File

@@ -140,7 +140,7 @@ impl Coprocessor {
cols.len() == names.len() && names.len() == anno.len(),
OtherSnafu {
reason: format!(
"Unmatched length for cols({}), names({}) and anno({})",
"Unmatched length for cols({}), names({}) and annotation({})",
cols.len(),
names.len(),
anno.len()
@@ -335,7 +335,7 @@ pub fn exec_coprocessor(script: &str, rb: &Option<RecordBatch>) -> Result<Record
#[cfg_attr(feature = "pyo3_backend", pyo3class(name = "query_engine"))]
#[rspyclass(module = false, name = "query_engine")]
#[derive(Debug, PyPayload)]
#[derive(Debug, PyPayload, Clone)]
pub struct PyQueryEngine {
inner: QueryEngineWeakRef,
}

View File

@@ -184,7 +184,7 @@ fn eval_rspy(case: CodeBlockTestCase) {
#[cfg(feature = "pyo3_backend")]
fn eval_pyo3(case: CodeBlockTestCase) {
init_cpython_interpreter();
init_cpython_interpreter().unwrap();
Python::with_gil(|py| {
let locals = {
let locals_dict = PyDict::new(py);

View File

@@ -38,6 +38,55 @@ pub(super) fn generate_copr_intgrate_tests() -> Vec<CoprTestCase> {
vec![
CoprTestCase {
script: r#"
from greptime import vector
@copr(returns=["value"])
def boolean_array() -> vector[f64]:
v = vector([1.0, 2.0, 3.0])
# This returns a vector([2.0])
return v[(v > 1) & (v < 3)]
"#
.to_string(),
expect: Some(ronish!("value": vector!(Float64Vector, [2.0f64]))),
},
#[cfg(feature = "pyo3_backend")]
CoprTestCase {
script: r#"
@copr(returns=["value"], backend="pyo3")
def boolean_array() -> vector[f64]:
from greptime import vector
from greptime import query, dataframe
try:
print("query()=", query())
except KeyError as e:
print("query()=", e)
try:
print("dataframe()=", dataframe())
except KeyError as e:
print("dataframe()=", e)
v = vector([1.0, 2.0, 3.0])
# This returns a vector([2.0])
return v[(v > 1) & (v < 3)]
"#
.to_string(),
expect: Some(ronish!("value": vector!(Float64Vector, [2.0f64]))),
},
#[cfg(feature = "pyo3_backend")]
CoprTestCase {
script: r#"
@copr(returns=["value"], backend="pyo3")
def boolean_array() -> vector[f64]:
from greptime import vector
v = vector([1.0, 2.0, 3.0])
# This returns a vector([2.0])
return v[(v > 1) & (v < 3)]
"#
.to_string(),
expect: Some(ronish!("value": vector!(Float64Vector, [2.0f64]))),
},
CoprTestCase {
script: r#"
@copr(args=["number", "number"],
returns=["value"],
sql="select number from numbers limit 5", backend="rspy")
@@ -107,9 +156,9 @@ def answer() -> vector[i64]:
script: r#"
@copr(args=[], returns = ["number"], sql = "select * from numbers", backend="rspy")
def answer() -> vector[i64]:
from greptime import vector, col, lit
from greptime import vector, col, lit, dataframe
expr_0 = (col("number")<lit(3)) & (col("number")>0)
ret = dataframe.select([col("number")]).filter(expr_0).collect()[0][0]
ret = dataframe().select([col("number")]).filter(expr_0).collect()[0][0]
return ret
"#
.to_string(),
@@ -120,10 +169,10 @@ def answer() -> vector[i64]:
script: r#"
@copr(args=[], returns = ["number"], sql = "select * from numbers", backend="pyo3")
def answer() -> vector[i64]:
from greptime import vector, col, lit
from greptime import vector, col, lit, dataframe
# Bitwise Operator pred comparison operator
expr_0 = (col("number")<lit(3)) & (col("number")>0)
ret = dataframe.select([col("number")]).filter(expr_0).collect()[0][0]
ret = dataframe().select([col("number")]).filter(expr_0).collect()[0][0]
return ret
"#
.to_string(),

View File

@@ -133,7 +133,7 @@ fn get_test_cases() -> Vec<TestCase> {
}
#[cfg(feature = "pyo3_backend")]
fn eval_pyo3(testcase: TestCase, locals: HashMap<String, PyVector>) {
init_cpython_interpreter();
init_cpython_interpreter().unwrap();
Python::with_gil(|py| {
let locals = {
let locals_dict = PyDict::new(py);

View File

@@ -20,10 +20,13 @@ use datafusion::physical_plan::expressions;
use datafusion_expr::ColumnarValue;
use datafusion_physical_expr::{math_expressions, AggregateExpr};
use datatypes::vectors::VectorRef;
use pyo3::exceptions::PyValueError;
use pyo3::exceptions::{PyKeyError, PyValueError};
use pyo3::prelude::*;
use pyo3::types::PyDict;
use super::dataframe_impl::PyDataFrame;
use super::utils::scalar_value_to_py_any;
use crate::python::ffi_types::copr::PyQueryEngine;
use crate::python::ffi_types::utils::all_to_f64;
use crate::python::ffi_types::PyVector;
use crate::python::pyo3::dataframe_impl::{col, lit};
@@ -58,9 +61,12 @@ macro_rules! batch_import {
#[pyo3(name = "greptime")]
pub(crate) fn greptime_builtins(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_class::<PyVector>()?;
use self::query_engine;
batch_import!(
m,
[
dataframe,
query_engine,
lit,
col,
pow,
@@ -112,6 +118,34 @@ pub(crate) fn greptime_builtins(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
Ok(())
}
fn get_globals(py: Python) -> PyResult<&PyDict> {
// TODO(discord9): check if this is sound(in python)
let py_main = PyModule::import(py, "__main__")?;
let globals = py_main.dict();
Ok(globals)
}
#[pyfunction]
fn dataframe(py: Python) -> PyResult<PyDataFrame> {
let globals = get_globals(py)?;
let df = globals
.get_item("__dataframe__")
.ok_or(PyKeyError::new_err("No __dataframe__ variable is found"))?
.extract::<PyDataFrame>()?;
Ok(df)
}
#[pyfunction]
#[pyo3(name = "query")]
fn query_engine(py: Python) -> PyResult<PyQueryEngine> {
let globals = get_globals(py)?;
let query = globals
.get_item("__query__")
.ok_or(PyKeyError::new_err("No __query__ variable is found"))?
.extract::<PyQueryEngine>()?;
Ok(query)
}
fn eval_func(py: Python<'_>, name: &str, v: &[&PyObject]) -> PyResult<PyVector> {
let v = to_array_of_py_vec(py, v)?;
py.allow_threads(|| {

View File

@@ -24,7 +24,6 @@ use snafu::{ensure, Backtrace, GenerateImplicitData, ResultExt};
use crate::python::error::{self, NewRecordBatchSnafu, OtherSnafu, Result};
use crate::python::ffi_types::copr::PyQueryEngine;
use crate::python::ffi_types::{check_args_anno_real_type, select_from_rb, Coprocessor, PyVector};
use crate::python::pyo3::builtins::greptime_builtins;
use crate::python::pyo3::dataframe_impl::PyDataFrame;
use crate::python::pyo3::utils::{init_cpython_interpreter, pyo3_obj_try_to_typed_val};
@@ -57,6 +56,7 @@ impl PyQueryEngine {
}
// TODO: put this into greptime module
}
/// Execute a `Coprocessor` with given `RecordBatch`
pub(crate) fn pyo3_exec_parsed(
copr: &Coprocessor,
@@ -73,7 +73,7 @@ pub(crate) fn pyo3_exec_parsed(
Vec::new()
};
// Just in case cpython is not inited
init_cpython_interpreter();
init_cpython_interpreter().unwrap();
Python::with_gil(|py| -> Result<_> {
let mut cols = (|| -> PyResult<_> {
let dummy_decorator = "
@@ -86,7 +86,6 @@ def copr(*dummy, **kwdummy):
return func
return inner
coprocessor = copr
from greptime import vector
";
let gen_call = format!("\n_return_from_coprocessor = {}(*_args_for_coprocessor, **_kwargs_for_coprocessor)", copr.name);
let script = format!("{}{}{}", dummy_decorator, copr.script, gen_call);
@@ -109,14 +108,11 @@ from greptime import vector
let globals = py_main.dict();
let locals = PyDict::new(py);
let greptime = PyModule::new(py, "greptime")?;
greptime_builtins(py, greptime)?;
locals.set_item("greptime", greptime)?;
if let Some(engine) = &copr.query_engine {
let query_engine = PyQueryEngine::from_weakref(engine.clone());
let query_engine = PyCell::new(py, query_engine)?;
globals.set_item("query", query_engine)?;
globals.set_item("__query__", query_engine)?;
}
// TODO(discord9): find out why `dataframe` is not in scope
@@ -129,12 +125,12 @@ from greptime import vector
)
)?;
let dataframe = PyCell::new(py, dataframe)?;
globals.set_item("dataframe", dataframe)?;
globals.set_item("__dataframe__", dataframe)?;
}
locals.set_item("_args_for_coprocessor", args)?;
locals.set_item("_kwargs_for_coprocessor", kwargs)?;
// `greptime` is already import when init interpreter, so no need to set in here
// TODO(discord9): find a better way to set `dataframe` and `query` in scope/ or set it into module(latter might be impossible and not idomatic even in python)
// set `dataframe` and `query` in scope/ or set it into module
@@ -219,10 +215,10 @@ mod copr_test {
@copr(args=["cpu", "mem"], returns=["ref"], backend="pyo3")
def a(cpu, mem, **kwargs):
import greptime as gt
from greptime import vector, log2, sum, pow, col, lit
from greptime import vector, log2, sum, pow, col, lit, dataframe
for k, v in kwargs.items():
print("%s == %s" % (k, v))
print(dataframe.select([col("cpu")<lit(0.3)]).collect())
print(dataframe().select([col("cpu")<lit(0.3)]).collect())
return (0.5 < cpu) & ~( cpu >= 0.75)
"#;
let cpu_array = Float32Vector::from_slice([0.9f32, 0.8, 0.7, 0.3]);

View File

@@ -26,6 +26,8 @@ use crate::python::ffi_types::PyVector;
use crate::python::pyo3::utils::pyo3_obj_try_to_typed_scalar_value;
use crate::python::utils::block_on_async;
type PyExprRef = Py<PyExpr>;
#[derive(Debug, Clone)]
#[pyclass]
pub(crate) struct PyDataFrame {
inner: DfDataFrame,
@@ -47,6 +49,9 @@ impl PyDataFrame {
#[pymethods]
impl PyDataFrame {
fn __call__(&self) -> PyResult<Self> {
Ok(self.clone())
}
fn select_columns(&self, columns: Vec<String>) -> PyResult<Self> {
Ok(self
.inner
@@ -252,6 +257,9 @@ pub(crate) fn col(name: String) -> PyExpr {
#[pymethods]
impl PyExpr {
fn __call__(&self) -> PyResult<Self> {
Ok(self.clone())
}
fn __richcmp__(&self, py: Python<'_>, other: PyObject, op: CompareOp) -> PyResult<Self> {
let other = other.extract::<Self>(py).or_else(|_| lit(py, other))?;
let op = match op {

View File

@@ -36,7 +36,9 @@ static START_PYO3: Lazy<Mutex<bool>> = Lazy::new(|| Mutex::new(false));
pub(crate) fn to_py_err(err: impl ToString) -> PyErr {
PyArrowException::new_err(err.to_string())
}
pub(crate) fn init_cpython_interpreter() {
/// init cpython interpreter with `greptime` builtins, if already inited, do nothing
pub(crate) fn init_cpython_interpreter() -> PyResult<()> {
let mut start = START_PYO3.lock().unwrap();
if !*start {
pyo3::append_to_inittab!(greptime_builtins);
@@ -44,6 +46,7 @@ pub(crate) fn init_cpython_interpreter() {
*start = true;
info!("Started CPython Interpreter");
}
Ok(())
}
pub fn val_to_py_any(py: Python<'_>, val: Value) -> PyResult<PyObject> {

View File

@@ -21,11 +21,12 @@ use datatypes::arrow::array::{Array, ArrayRef};
use datatypes::arrow::datatypes::DataType as ArrowDataType;
use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::vectors::Helper;
use pyo3::exceptions::PyValueError;
use pyo3::exceptions::{PyIndexError, PyRuntimeError, PyValueError};
use pyo3::prelude::*;
use pyo3::pyclass::CompareOp;
use pyo3::types::{PyBool, PyFloat, PyInt, PyList, PyString, PyType};
use super::utils::val_to_py_any;
use crate::python::ffi_types::vector::{arrow_rtruediv, wrap_bool_result, wrap_result, PyVector};
use crate::python::pyo3::utils::{pyo3_obj_try_to_typed_val, to_py_err};
@@ -278,6 +279,45 @@ impl PyVector {
let v = Helper::try_into_vector(array).map_err(to_py_err)?;
Ok(v.into())
}
/// PyO3's Magic Method for slicing and indexing
fn __getitem__(&self, py: Python, needle: PyObject) -> PyResult<PyObject> {
if let Ok(needle) = needle.extract::<PyVector>(py) {
let mask = needle.to_arrow_array();
let mask = mask
.as_any()
.downcast_ref::<BooleanArray>()
.ok_or_else(|| {
PyValueError::new_err(
"A Boolean Array is requested for slicing, found {mask:?}",
)
})?;
let result = compute::filter(&self.to_arrow_array(), mask)
.map_err(|err| PyRuntimeError::new_err(format!("Arrow Error: {err:#?}")))?;
let ret = Helper::try_into_vector(result.clone()).map_err(|e| {
PyRuntimeError::new_err(format!("Can't cast result into vector, err: {e:?}"))
})?;
let ret = Self::from(ret).into_py(py);
Ok(ret)
} else if let Ok(index) = needle.extract::<isize>(py) {
// deal with negative index
let len = self.len() as isize;
let index = if index < 0 { len + index } else { index };
if index < 0 || index >= len {
return Err(PyIndexError::new_err(format!(
"Index out of bound, index: {index}, len: {len}",
index = index,
len = len
)));
}
let val = self.as_vector_ref().get(index as usize);
val_to_py_any(py, val)
} else {
Err(PyValueError::new_err(
"{needle:?} is neither a Vector nor a int, can't use for slicing or indexing",
))
}
}
}
#[cfg(test)]
@@ -317,7 +357,7 @@ mod test {
}
#[test]
fn test_py_vector_api() {
init_cpython_interpreter();
init_cpython_interpreter().unwrap();
Python::with_gil(|py| {
let module = PyModule::new(py, "gt").unwrap();
module.add_class::<PyVector>().unwrap();

View File

@@ -306,13 +306,38 @@ pub(crate) mod greptime_builtin {
all_to_f64, eval_aggr_fn, from_df_err, try_into_columnar_value, try_into_py_obj,
type_cast_error,
};
use crate::python::ffi_types::copr::PyQueryEngine;
use crate::python::ffi_types::vector::val_to_pyobj;
use crate::python::ffi_types::PyVector;
use crate::python::rspython::dataframe_impl::data_frame::{PyExpr, PyExprRef};
use crate::python::rspython::dataframe_impl::data_frame::{PyDataFrame, PyExpr, PyExprRef};
use crate::python::rspython::utils::{
is_instance, py_obj_to_value, py_obj_to_vec, PyVectorRef,
};
/// get `__dataframe__` from globals and return it
/// TODO(discord9): this is a terrible hack, we should find a better way to get `__dataframe__`
#[pyfunction]
fn dataframe(vm: &VirtualMachine) -> PyResult<PyDataFrame> {
let df = vm.current_globals().get_item("__dataframe__", vm)?;
let df = df
.payload::<PyDataFrame>()
.ok_or_else(|| vm.new_type_error(format!("object {:?} is not a DataFrame", df)))?;
let df = df.clone();
Ok(df)
}
/// get `__query__` from globals and return it
/// TODO(discord9): this is a terrible hack, we should find a better way to get `__dataframe__`
#[pyfunction]
fn query(vm: &VirtualMachine) -> PyResult<PyQueryEngine> {
let query_engine = vm.current_globals().get_item("__query__", vm)?;
let query_engine = query_engine.payload::<PyQueryEngine>().ok_or_else(|| {
vm.new_type_error(format!("object {:?} is not a QueryEngine", query_engine))
})?;
let query_engine = query_engine.clone();
Ok(query_engine)
}
#[pyfunction]
fn vector(args: OptionalArg<PyObjectRef>, vm: &VirtualMachine) -> PyResult<PyVector> {
PyVector::new(args, vm)

View File

@@ -81,12 +81,13 @@ fn set_items_in_scope(
fn set_query_engine_in_scope(
scope: &Scope,
vm: &VirtualMachine,
name: &str,
query_engine: PyQueryEngine,
) -> Result<()> {
scope
.locals
.as_object()
.set_item("query", query_engine.to_pyobject(vm), vm)
.set_item(name, query_engine.to_pyobject(vm), vm)
.map_err(|e| format_py_error(e, vm))
}
@@ -101,7 +102,7 @@ pub(crate) fn exec_with_cached_vm(
// set arguments with given name and values
let scope = vm.new_scope_with_builtins();
if let Some(rb) = rb {
set_dataframe_in_scope(&scope, vm, "dataframe", rb)?;
set_dataframe_in_scope(&scope, vm, "__dataframe__", rb)?;
}
if let Some(arg_names) = &copr.deco_args.arg_names {
@@ -113,7 +114,7 @@ pub(crate) fn exec_with_cached_vm(
let query_engine = PyQueryEngine::from_weakref(engine.clone());
// put a object named with query of class PyQueryEngine in scope
set_query_engine_in_scope(&scope, vm, query_engine)?;
set_query_engine_in_scope(&scope, vm, "__query__", query_engine)?;
}
if let Some(kwarg) = &copr.kwarg {

View File

@@ -38,7 +38,7 @@ pub(crate) mod data_frame {
use crate::python::rspython::builtins::greptime_builtin::lit;
use crate::python::utils::block_on_async;
#[rspyclass(module = "data_frame", name = "DataFrame")]
#[derive(PyPayload, Debug)]
#[derive(PyPayload, Debug, Clone)]
pub struct PyDataFrame {
pub inner: DfDataFrame,
}

View File

@@ -287,7 +287,7 @@ def a(cpu, mem):
abc = vector([v[0] > v[1] for v in zip(cpu, mem)])
fed = cpu.filter(abc)
ref = log2(fed/prev(fed))
return (0.5 < cpu) & ~( cpu >= 0.75)
return cpu[(cpu > 0.5) & ~( cpu >= 0.75)]
"#;
let cpu_array = Float32Vector::from_slice([0.9f32, 0.8, 0.7, 0.3]);
let mem_array = Float64Vector::from_slice([0.1f64, 0.2, 0.3, 0.4]);

View File

@@ -559,11 +559,11 @@ def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
// constant column(int)
name: "test_data_frame",
code: r#"
from greptime import col
from greptime import col, dataframe
@copr(args=["cpu", "mem"], returns=["perf", "what"])
def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
vector[f32]):
ret = dataframe.select([col("cpu"), col("mem")]).collect()[0]
ret = dataframe().select([col("cpu"), col("mem")]).collect()[0]
return ret[0], ret[1]
"#,
predicate: ExecIsOk(
@@ -593,11 +593,11 @@ def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
// constant column(int)
name: "test_data_frame",
code: r#"
from greptime import col
from greptime import col, dataframe
@copr(args=["cpu", "mem"], returns=["perf", "what"])
def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
vector[f32]):
ret = dataframe.filter(col("cpu")>col("mem")).collect()[0]
ret = dataframe().filter(col("cpu")>col("mem")).collect()[0]
return ret[0], ret[1]
"#,
predicate: ExecIsOk(

View File

@@ -263,8 +263,12 @@ pub enum Error {
#[snafu(backtrace)]
source: common_mem_prof::error::Error,
},
#[snafu(display("Invalid prepare statement: {}", err_msg))]
InvalidPrepareStatement { err_msg: String },
#[snafu(display("Invalid flush argument: {}", err_msg))]
InvalidFlushArgument { err_msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -327,6 +331,7 @@ impl ErrorExt for Error {
DatabaseNotFound { .. } => StatusCode::DatabaseNotFound,
#[cfg(feature = "mem-prof")]
DumpProfileData { source, .. } => source.status_code(),
InvalidFlushArgument { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -19,6 +19,7 @@ pub mod opentsdb;
pub mod prometheus;
pub mod script;
mod admin;
#[cfg(feature = "mem-prof")]
pub mod mem_prof;
@@ -56,6 +57,8 @@ use self::authorize::HttpAuth;
use self::influxdb::{influxdb_health, influxdb_ping, influxdb_write};
use crate::auth::UserProviderRef;
use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu};
use crate::http::admin::flush;
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
use crate::query_handler::{
InfluxdbLineProtocolHandlerRef, OpentsdbProtocolHandlerRef, PrometheusProtocolHandlerRef,
@@ -96,6 +99,7 @@ pub static PUBLIC_APIS: [&str; 2] = ["/v1/influxdb/ping", "/v1/influxdb/health"]
pub struct HttpServer {
sql_handler: ServerSqlQueryHandlerRef,
grpc_handler: ServerGrpcQueryHandlerRef,
options: HttpOptions,
influxdb_handler: Option<InfluxdbLineProtocolHandlerRef>,
opentsdb_handler: Option<OpentsdbProtocolHandlerRef>,
@@ -349,9 +353,14 @@ pub struct ApiState {
}
impl HttpServer {
pub fn new(sql_handler: ServerSqlQueryHandlerRef, options: HttpOptions) -> Self {
pub fn new(
sql_handler: ServerSqlQueryHandlerRef,
grpc_handler: ServerGrpcQueryHandlerRef,
options: HttpOptions,
) -> Self {
Self {
sql_handler,
grpc_handler,
options,
opentsdb_handler: None,
influxdb_handler: None,
@@ -426,6 +435,10 @@ impl HttpServer {
.layer(Extension(api));
let mut router = Router::new().nest(&format!("/{HTTP_API_VERSION}"), sql_router);
router = router.nest(
&format!("/{HTTP_API_VERSION}/admin"),
self.route_admin(self.grpc_handler.clone()),
);
if let Some(opentsdb_handler) = self.opentsdb_handler.clone() {
router = router.nest(
@@ -517,6 +530,12 @@ impl HttpServer {
.route("/api/put", routing::post(opentsdb::put))
.with_state(opentsdb_handler)
}
fn route_admin<S>(&self, grpc_handler: ServerGrpcQueryHandlerRef) -> Router<S> {
Router::new()
.route("/flush", routing::post(flush))
.with_state(grpc_handler)
}
}
pub const HTTP_SERVER: &str = "HTTP_SERVER";
@@ -578,6 +597,7 @@ mod test {
use std::future::pending;
use std::sync::Arc;
use api::v1::greptime_request::Request;
use axum::handler::Handler;
use axum::http::StatusCode;
use axum::routing::get;
@@ -592,12 +612,26 @@ mod test {
use super::*;
use crate::error::Error;
use crate::query_handler::grpc::{GrpcQueryHandler, ServerGrpcQueryHandlerAdaptor};
use crate::query_handler::sql::{ServerSqlQueryHandlerAdaptor, SqlQueryHandler};
struct DummyInstance {
_tx: mpsc::Sender<(String, Vec<u8>)>,
}
#[async_trait]
impl GrpcQueryHandler for DummyInstance {
type Error = Error;
async fn do_query(
&self,
_query: Request,
_ctx: QueryContextRef,
) -> std::result::Result<Output, Self::Error> {
unimplemented!()
}
}
#[async_trait]
impl SqlQueryHandler for DummyInstance {
type Error = Error;
@@ -637,8 +671,10 @@ mod test {
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
let instance = Arc::new(DummyInstance { _tx: tx });
let instance = ServerSqlQueryHandlerAdaptor::arc(instance);
let server = HttpServer::new(instance, HttpOptions::default());
let sql_instance = ServerSqlQueryHandlerAdaptor::arc(instance.clone());
let grpc_instance = ServerGrpcQueryHandlerAdaptor::arc(instance);
let server = HttpServer::new(sql_instance, grpc_instance, HttpOptions::default());
server.make_app().route(
"/test/timeout",
get(forever.layer(

View File

@@ -0,0 +1,69 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use api::v1::ddl_request::Expr;
use api::v1::greptime_request::Request;
use api::v1::{DdlRequest, FlushTableExpr};
use axum::extract::{Query, RawBody, State};
use axum::http::StatusCode as HttpStatusCode;
use axum::Json;
use session::context::QueryContext;
use snafu::OptionExt;
use crate::error;
use crate::error::Result;
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
#[axum_macros::debug_handler]
pub async fn flush(
State(grpc_handler): State<ServerGrpcQueryHandlerRef>,
Query(params): Query<HashMap<String, String>>,
RawBody(_): RawBody,
) -> Result<(HttpStatusCode, Json<String>)> {
let catalog_name = params
.get("catalog_name")
.cloned()
.unwrap_or("greptime".to_string());
let schema_name =
params
.get("schema_name")
.cloned()
.context(error::InvalidFlushArgumentSnafu {
err_msg: "schema_name is not present",
})?;
// if table name is not present, flush all tables inside schema
let table_name = params.get("table_name").cloned().unwrap_or_default();
let region_id: Option<u32> = params
.get("region")
.map(|v| v.parse())
.transpose()
.ok()
.flatten();
let request = Request::Ddl(DdlRequest {
expr: Some(Expr::FlushTable(FlushTableExpr {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
region_id,
})),
});
grpc_handler.do_query(request, QueryContext::arc()).await?;
Ok((HttpStatusCode::OK, Json::from("hello, world".to_string())))
}

View File

@@ -43,6 +43,7 @@ pub async fn sql(
Form(form_params): Form<SqlQuery>,
) -> Json<JsonResponse> {
let sql_handler = &state.sql_handler;
let start = Instant::now();
let sql = query_params.sql.or(form_params.sql);
let db = query_params.db.or(form_params.db);

View File

@@ -17,11 +17,12 @@ use axum_test_helper::TestClient;
use servers::http::{HttpOptions, HttpServer};
use table::test_util::MemTable;
use crate::create_testing_sql_query_handler;
use crate::{create_testing_grpc_query_handler, create_testing_sql_query_handler};
fn make_test_app() -> Router {
let server = HttpServer::new(
create_testing_sql_query_handler(MemTable::default_numbers_table()),
create_testing_grpc_query_handler(MemTable::default_numbers_table()),
HttpOptions::default(),
);
server.make_app()

View File

@@ -14,6 +14,7 @@
use std::sync::Arc;
use api::v1::greptime_request::Request;
use api::v1::InsertRequest;
use async_trait::async_trait;
use axum::{http, Router};
@@ -24,6 +25,7 @@ use query::parser::PromQuery;
use servers::error::{Error, Result};
use servers::http::{HttpOptions, HttpServer};
use servers::influxdb::InfluxdbRequest;
use servers::query_handler::grpc::GrpcQueryHandler;
use servers::query_handler::sql::SqlQueryHandler;
use servers::query_handler::InfluxdbLineProtocolHandler;
use session::context::QueryContextRef;
@@ -35,6 +37,19 @@ struct DummyInstance {
tx: Arc<mpsc::Sender<(String, String)>>,
}
#[async_trait]
impl GrpcQueryHandler for DummyInstance {
type Error = Error;
async fn do_query(
&self,
_query: Request,
_ctx: QueryContextRef,
) -> std::result::Result<Output, Self::Error> {
unimplemented!()
}
}
#[async_trait]
impl InfluxdbLineProtocolHandler for DummyInstance {
async fn exec(&self, request: &InfluxdbRequest, ctx: QueryContextRef) -> Result<()> {
@@ -79,7 +94,7 @@ impl SqlQueryHandler for DummyInstance {
fn make_test_app(tx: Arc<mpsc::Sender<(String, String)>>, db_name: Option<&str>) -> Router {
let instance = Arc::new(DummyInstance { tx });
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default());
let mut user_provider = MockUserProvider::default();
if let Some(name) = db_name {
user_provider.set_authorization_info(DatabaseAuthInfo {

View File

@@ -14,6 +14,7 @@
use std::sync::Arc;
use api::v1::greptime_request::Request;
use async_trait::async_trait;
use axum::Router;
use axum_test_helper::TestClient;
@@ -23,6 +24,7 @@ use query::parser::PromQuery;
use servers::error::{self, Result};
use servers::http::{HttpOptions, HttpServer};
use servers::opentsdb::codec::DataPoint;
use servers::query_handler::grpc::GrpcQueryHandler;
use servers::query_handler::sql::SqlQueryHandler;
use servers::query_handler::OpentsdbProtocolHandler;
use session::context::QueryContextRef;
@@ -32,6 +34,19 @@ struct DummyInstance {
tx: mpsc::Sender<String>,
}
#[async_trait]
impl GrpcQueryHandler for DummyInstance {
type Error = crate::Error;
async fn do_query(
&self,
_query: Request,
_ctx: QueryContextRef,
) -> std::result::Result<Output, Self::Error> {
unimplemented!()
}
}
#[async_trait]
impl OpentsdbProtocolHandler for DummyInstance {
async fn exec(&self, data_point: &DataPoint, _ctx: QueryContextRef) -> Result<()> {
@@ -77,7 +92,7 @@ impl SqlQueryHandler for DummyInstance {
fn make_test_app(tx: mpsc::Sender<String>) -> Router {
let instance = Arc::new(DummyInstance { tx });
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default());
server.set_opentsdb_handler(instance);
server.make_app()
}

View File

@@ -17,6 +17,7 @@ use std::sync::Arc;
use api::prometheus::remote::{
LabelMatcher, Query, QueryResult, ReadRequest, ReadResponse, WriteRequest,
};
use api::v1::greptime_request::Request;
use async_trait::async_trait;
use axum::Router;
use axum_test_helper::TestClient;
@@ -28,6 +29,7 @@ use servers::error::{Error, Result};
use servers::http::{HttpOptions, HttpServer};
use servers::prometheus;
use servers::prometheus::{snappy_compress, Metrics};
use servers::query_handler::grpc::GrpcQueryHandler;
use servers::query_handler::sql::SqlQueryHandler;
use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse};
use session::context::QueryContextRef;
@@ -37,6 +39,19 @@ struct DummyInstance {
tx: mpsc::Sender<(String, Vec<u8>)>,
}
#[async_trait]
impl GrpcQueryHandler for DummyInstance {
type Error = Error;
async fn do_query(
&self,
_query: Request,
_ctx: QueryContextRef,
) -> std::result::Result<Output, Self::Error> {
unimplemented!()
}
}
#[async_trait]
impl PrometheusProtocolHandler for DummyInstance {
async fn write(&self, request: WriteRequest, ctx: QueryContextRef) -> Result<()> {
@@ -102,7 +117,7 @@ impl SqlQueryHandler for DummyInstance {
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
let instance = Arc::new(DummyInstance { tx });
let mut server = HttpServer::new(instance.clone(), HttpOptions::default());
let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default());
server.set_prom_handler(instance);
server.make_app()
}

View File

@@ -47,7 +47,7 @@ mod py_script;
const LOCALHOST_WITH_0: &str = "127.0.0.1:0";
struct DummyInstance {
pub struct DummyInstance {
query_engine: QueryEngineRef,
py_engine: Arc<PyEngine>,
scripts: RwLock<HashMap<String, Arc<PyScript>>>,

View File

@@ -130,8 +130,24 @@ impl<'a> ParserContext<'a> {
}
}
let connection_options = self
.parser
.parse_options(Keyword::CONNECTION)
.context(error::SyntaxSnafu { sql: self.sql })?;
let connection = connection_options
.into_iter()
.filter_map(|option| {
if let Some(v) = ParserContext::parse_option_string(option.value) {
Some((option.name.value.to_uppercase(), v))
} else {
None
}
})
.collect();
Ok(CopyTable::To(CopyTableTo::new(
table_name, file_name, format,
table_name, file_name, format, connection,
)))
}
@@ -167,7 +183,7 @@ mod tests {
match statement {
Statement::Copy(CopyTable::To(copy_table)) => {
let (catalog, schema, table) =
if let [catalog, schema, table] = &copy_table.table_name().0[..] {
if let [catalog, schema, table] = &copy_table.table_name.0[..] {
(
catalog.value.clone(),
schema.value.clone(),
@@ -181,11 +197,11 @@ mod tests {
assert_eq!("schema0", schema);
assert_eq!("tbl", table);
let file_name = copy_table.file_name();
let file_name = copy_table.file_name;
assert_eq!("tbl_file.parquet", file_name);
let format = copy_table.format();
assert_eq!(Format::Parquet, *format);
let format = copy_table.format;
assert_eq!(Format::Parquet, format);
}
_ => unreachable!(),
}
@@ -275,6 +291,44 @@ mod tests {
}
}
#[test]
fn test_parse_copy_table_to() {
struct Test<'a> {
sql: &'a str,
expected_connection: HashMap<String, String>,
}
let tests = [
Test {
sql: "COPY catalog0.schema0.tbl TO 'tbl_file.parquet' ",
expected_connection: HashMap::new(),
},
Test {
sql: "COPY catalog0.schema0.tbl TO 'tbl_file.parquet' CONNECTION (FOO='Bar', ONE='two')",
expected_connection: [("FOO","Bar"),("ONE","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect()
},
Test {
sql:"COPY catalog0.schema0.tbl TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')",
expected_connection: [("FOO","Bar"),("ONE","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect()
},
];
for test in tests {
let mut result =
ParserContext::create_with_dialect(test.sql, &GenericDialect {}).unwrap();
assert_eq!(1, result.len());
let statement = result.remove(0);
assert_matches!(statement, Statement::Copy { .. });
match statement {
Statement::Copy(CopyTable::To(copy_table)) => {
assert_eq!(copy_table.connection.clone(), test.expected_connection);
}
_ => unreachable!(),
}
}
}
#[test]
fn test_parse_copy_table_with_unsupopoted_format() {
let results = [

View File

@@ -26,31 +26,26 @@ pub enum CopyTable {
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CopyTableTo {
table_name: ObjectName,
file_name: String,
format: Format,
pub table_name: ObjectName,
pub file_name: String,
pub format: Format,
pub connection: HashMap<String, String>,
}
impl CopyTableTo {
pub(crate) fn new(table_name: ObjectName, file_name: String, format: Format) -> Self {
pub(crate) fn new(
table_name: ObjectName,
file_name: String,
format: Format,
connection: HashMap<String, String>,
) -> Self {
Self {
table_name,
file_name,
format,
connection,
}
}
pub fn table_name(&self) -> &ObjectName {
&self.table_name
}
pub fn file_name(&self) -> &str {
&self.file_name
}
pub fn format(&self) -> &Format {
&self.format
}
}
// TODO: To combine struct CopyTableFrom and CopyTableTo

View File

@@ -135,6 +135,10 @@ impl<S: LogStore> Region for RegionImpl<S> {
.map(|level_ssts| level_ssts.files().map(|sst| sst.file_size()).sum::<u64>())
.sum()
}
async fn flush(&self) -> Result<()> {
self.inner.flush().await
}
}
/// Storage related config for region.
@@ -560,4 +564,18 @@ impl<S: LogStore> RegionInner<S> {
async fn close(&self) -> Result<()> {
self.writer.close().await
}
async fn flush(&self) -> Result<()> {
let writer_ctx = WriterContext {
shared: &self.shared,
flush_strategy: &self.flush_strategy,
flush_scheduler: &self.flush_scheduler,
compaction_scheduler: &self.compaction_scheduler,
sst_layer: &self.sst_layer,
wal: &self.wal,
writer: &self.writer,
manifest: &self.manifest,
};
self.writer.flush(writer_ctx).await
}
}

View File

@@ -18,7 +18,7 @@ use std::sync::Arc;
use common_test_util::temp_dir::create_temp_dir;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use store_api::storage::{OpenOptions, WriteResponse};
use store_api::storage::{OpenOptions, Region, WriteResponse};
use crate::engine;
use crate::flush::FlushStrategyRef;
@@ -94,6 +94,10 @@ impl FlushTester {
async fn wait_flush_done(&self) {
self.base().region.wait_flush_done().await.unwrap();
}
async fn flush(&self) {
self.base().region.flush().await.unwrap();
}
}
#[tokio::test]
@@ -124,6 +128,30 @@ async fn test_flush_and_stall() {
assert!(has_parquet_file(&sst_dir));
}
#[tokio::test]
async fn test_manual_flush() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("manual_flush");
let store_dir = dir.path().to_str().unwrap();
let flush_switch = Arc::new(FlushSwitch::default());
let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
let data = [(1000, Some(100))];
// Put one element so we have content to flush.
tester.put(&data).await;
// No parquet file should be flushed.
let sst_dir = format!("{}/{}", store_dir, engine::region_sst_dir("", REGION_NAME));
assert!(!has_parquet_file(&sst_dir));
tester.flush().await;
tester.wait_flush_done().await;
assert!(has_parquet_file(&sst_dir));
}
#[tokio::test]
async fn test_flush_empty() {
let dir = create_temp_dir("flush-empty");

View File

@@ -260,6 +260,15 @@ impl RegionWriter {
Ok(())
}
/// Flush task manually
pub async fn flush<S: LogStore>(&self, writer_ctx: WriterContext<'_, S>) -> Result<()> {
let mut inner = self.inner.lock().await;
ensure!(!inner.is_closed(), error::ClosedRegionSnafu);
inner.manual_flush(writer_ctx).await
}
/// Cancel flush task if any
async fn cancel_flush(&self) -> Result<()> {
let mut inner = self.inner.lock().await;
@@ -680,6 +689,11 @@ impl WriterInner {
Some(schedule_compaction_cb)
}
async fn manual_flush<S: LogStore>(&mut self, writer_ctx: WriterContext<'_, S>) -> Result<()> {
self.trigger_flush(&writer_ctx).await?;
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.closed

View File

@@ -76,6 +76,8 @@ pub trait Region: Send + Sync + Clone + std::fmt::Debug + 'static {
async fn close(&self) -> Result<(), Self::Error>;
fn disk_usage_bytes(&self) -> u64;
async fn flush(&self) -> Result<(), Self::Error>;
}
/// Context for write operations.

Some files were not shown because too many files have changed in this diff Show More