Compare commits

..

48 Commits

Author SHA1 Message Date
dennis zhuang
b4fc8c5b78 refactor: make sql function in scripts return a list of column vectors (#1243) 2023-03-27 08:50:19 +08:00
Lei, HUANG
6f81717866 fix: skip empty parquet (#1236)
* fix: returns None if parquet file does not contain any rows

* fix: skip empty parquet file

* chore: add doc

* rebase develop

* fix: use flatten instead of filter_map with identity
2023-03-26 09:39:15 +08:00
Lei, HUANG
77f9383daf fix: allow larger compaction window to reduce parallel task num (#1223)
fix: unit tests
2023-03-24 17:12:13 +08:00
discord9
c788b7fc26 feat: slicing PyVector&Create DataFrame from sql (#1190)
* chore: some typos

* feat: slicing for pyo3 vector

* feat: slice tests

* feat: from_sql

* feat: from_sql for dataframe

* test: df tests

* feat: `from_sql` for rspython

* test: tweak a bit

* test: and CR advices

* typos: ordered points

* chore: update error msg

* test: add more `slicing` testcase
2023-03-24 15:37:45 +08:00
LFC
0f160a73be feat: metasrv collects datanode heartbeats for region failure detection (#1214)
* feat: metasrv collects datanode heartbeats for region failure detection

* chore: change visibility

* fix: fragile tests

* Update src/meta-srv/src/handler/persist_stats_handler.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/meta-srv/src/handler/failure_handler.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* fix: resolve PR comments

* fix: resolve PR comments

* fix: resolve PR comments

---------

Co-authored-by: shuiyisong <xixing.sys@gmail.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-03-24 04:28:34 +00:00
LFC
92963b9614 feat: execute "delete" in query engine (in the form of "LogicalPlan") (#1222)
fix: execute "delete" in query engine (in the form of "LogicalPlan")
2023-03-24 12:11:58 +08:00
Yingwen
f1139fba59 fix: Holds FileHandle in ParquetReader to avoid the purger purges it (#1224) 2023-03-23 14:24:25 +00:00
Ruihang Xia
4e552245b1 fix: range func tests (#1221)
* remove ignore on range fn tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* placeholder for changes, deriv and resets

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-23 17:33:11 +08:00
Ruihang Xia
3126bbc1c7 docs: use CDN for logos (#1219)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-23 11:39:24 +08:00
LFC
b77b561bc8 refactor: execute insert with select in query engine (#1181)
* refactor: execute insert with select in query engine

* fix: resolve PR comments
2023-03-23 10:38:26 +08:00
dennis zhuang
501faad8ab chore: rename params in flush api (#1213) 2023-03-22 14:07:23 +08:00
Eugene Tolbakov
5397a9bbe6 feat(to_unixtime): add initial implementation (#1186)
* feat(to_unixtime): add initial implementation

* feat(to_unixtime): use Timestamp for conversion

* feat(to_unixtime):  implement conversion to Result<VectorRef>

* feat(to_unixtime): make unit test pass

* feat(to_unixtime): preserve None for invalid timestamps

* feat(to_unixtime): address code review suggestions

* feat(to_unixtime): add an sqlness test

* feat(to_unixtime): adjust the assertion for the sqlness test

* Update tests/cases/standalone/common/select/dummy.sql

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-21 12:41:07 +00:00
Ruihang Xia
f351ee7042 docs: update document string and site (#1211)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-21 07:01:08 +00:00
Ruihang Xia
e0493e0b8f feat: flush all tables on shutdown (#1185)
* feat: impl flush on shutdown (#14)

* feat: impl flush on shutdown

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* powerful if-else!

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* retrieve table handler from schema provider

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* feat: impl flush on shutdown

* feat: impl flush on shutdown

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* powerful if-else!

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* retrieve table handler from schema provider

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* Update src/datanode/src/instance.rs

Co-authored-by: dennis zhuang <killme2008@gmail.com>

* fix: uncommitted merge change

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: dennis zhuang <killme2008@gmail.com>
2023-03-21 14:36:30 +08:00
LFC
b2a09c888a feat: phi accrual failure detector (#1200) 2023-03-21 11:47:47 +08:00
LFC
af101480b3 feat: add gRPC reflection service (#1208)
* feat: add gRPC reflection service

* feat: add gRPC reflection service
2023-03-21 11:23:29 +08:00
Weny Xu
b8f7f603cf test: add copy clause sqlness tests (#1198) 2023-03-21 11:22:26 +08:00
dennis zhuang
8fb97ea1d8 fix: losing region numbers after altering table (#1209) 2023-03-21 11:19:43 +08:00
discord9
21ce9c1163 docs: more explain in readme (#1195)
* docs: more explain in readme

* fix: typos

* fix: CR advices
2023-03-20 21:56:34 +08:00
Ruihang Xia
0a22375ac1 fix: nyc-taxi bench suite (#1204)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-20 21:53:01 +08:00
fys
0596d20a3b fix: can not create table in the local distributed environment (#1207)
fix: create table in local distribute env
2023-03-20 20:12:35 +08:00
Weny Xu
e19c8fa2b6 refactor: combine Copy To and Copy From (#1197)
* refactor: combine Copy To and Copy From

* Apply suggestions from code review

Co-authored-by: LFC <bayinamine@gmail.com>

* Apply suggestions from code review

Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>

---------

Co-authored-by: LFC <bayinamine@gmail.com>
Co-authored-by: Lei, HUANG <6406592+v0y4g3r@users.noreply.github.com>
2023-03-20 19:23:25 +08:00
LFC
ad886f5b3e feat: GRPC client stream interface for insertion (#1206)
* feat: GRPC client stream interface for insertion

* feat: GRPC client stream interface for insertion
2023-03-20 18:45:37 +08:00
LFC
f6669a8201 feat: add GRPC unary call service to our GreptimeDB (#1196)
* feat: add GRPC unary call service to our GreptimeDB
2023-03-20 14:27:32 +08:00
Yingwen
ad5c47185d feat: wait flush until the flush is done (#1188)
* feat: Add wait argument to flush

* test(storage): Fix flush tests
2023-03-20 11:25:19 +08:00
zyy17
64441616db ci: refactor compile-python.sh and use the python310 to build amd64 binary (#1199) 2023-03-18 16:16:15 +08:00
zyy17
09491d6aee ci: release the standalone binaries with pyo3 and install python utils in images (#1194)
* ci: install python3 and python3-dev in CI Dockerfile

* ci: release the standalone binaries with pyo3 support for multiple platforms

* refactor: install pip and pyarrow

* refactor: specify the python version
2023-03-17 15:42:13 +08:00
Weny Xu
7cfa30b2ab feat: add shutdown for standalone and metasrv (#1174) 2023-03-17 11:35:17 +08:00
Ning Sun
a7676d8860 refactor: port div_ceil from stdlib to avoid unstable features (#1191)
* refactor: use float div&ceil to avoid unstable features

* refactor: port div_ceil from rust stdlib
2023-03-16 22:55:35 +08:00
zyy17
62e2a60b7b ci: release artifacts after binary and container is ready (#1192)
ci: release artifacts before binary and container is ready
2023-03-16 09:20:03 +00:00
zyy17
128c5cabe1 ci: disable run tests temporarily (#1187) 2023-03-16 14:12:19 +08:00
Yingwen
9a001d3392 chore(datanode): derive serde default for Wal/CompactionConfig (#1173) 2023-03-16 11:56:28 +08:00
Weny Xu
facdda4d9f feat: implement CONNECTION clause of Copy To (#1163)
* feat: implement CONNECTION clause of Copy To

* test: add tests for s3 backend

* Apply suggestions from code review

Co-authored-by: Yingwen <realevenyag@gmail.com>

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-16 11:36:38 +08:00
Lei, HUANG
17eb99bc52 feat: allow manual table flush through HTTP API (#1184) 2023-03-15 20:15:34 +08:00
Xieqijun
cd8be77968 feat(procedure): Max retry time (#1095)
* feat: procedure config

* fix: modify config

* feat: add retry logic

* feat: add error

* feat: add it

* feat: add it

* feat: add it

* feat: rm retry from runner

* feat: use backon

* feat: add retry_interval

* feat: add retry_interval

* fix: conflict

* fix: cr

* feat: add retry error and id

* feat: rename

* refactor: execute

* feat: use config dir

* fix: cr

* fix: cr

* fix: fmt

* fix: fmt

* fix: pr

* fix: it

* fix: rm unless cmd params

* feat: add toml

* fix: ut

* feat: add rolling back

* fix: cr

* fix: cr

* fix: cr

* fix: ci

* fix: ci

* fix: ci

* chore: Apply suggestions from code review

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-15 08:28:08 +00:00
Eugene Tolbakov
b530ac9e60 chore(from_unixtime): remove UDF from_unixtime (#1179)
* chore(from_unixtime): remove UDF from_unixtime

* chore(from_unixtime): restore timestamp.rs for further usage

* chore(from_unixtime): address fmt issue
2023-03-15 16:27:09 +08:00
zyy17
76f1a79f1b ci: set 'continue-on-error' to false since the problem of compiling binary was resolved (#1182)
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2023-03-15 15:41:36 +08:00
LFC
4705245d60 docs: region failover RFC (#1139)
* docs: region failover RFC

* fix: resolve PR comments
2023-03-15 15:21:58 +08:00
Zheming Li
f712f978cf feat: Report disk usage stats to metasrv thru heartbeat (#1167)
* feat: Report disk usage stats to metasrv thru heartbeat

Signed-off-by: Zheming Li <nkdudu@126.com>

* Update src/catalog/src/error.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/catalog/src/lib.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

* Update src/mito/src/table.rs

Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>

---------

Signed-off-by: Zheming Li <nkdudu@126.com>
Co-authored-by: fys <40801205+Fengys123@users.noreply.github.com>
2023-03-15 03:11:32 +00:00
discord9
cbf64e65b9 refactor: put dataframe & query into greptime module (#1172)
* feat: impl getitem for `vector`

* feat: mv `query`&`dataframe` into `greptime` for PyO3

* refactor: allow call dataframe&query

* refactor: pyo3 query&dataframe

* chore: CR advices
2023-03-15 11:01:43 +08:00
zyy17
242ce5c2aa ci: add pyo3 options for mac (#1178) 2023-03-14 13:51:58 +00:00
Ruihang Xia
e8d2e82335 fix: ambiguous column reference (#1177)
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-14 13:18:43 +00:00
zyy17
0086cc2d3d fix: export 'PYO3_CROSS_LIB_DIR' when cargo build for aarch64-linux and refactor matrix opts (#1171) 2023-03-14 15:35:29 +08:00
Weny Xu
cdc111b607 refactor: make the cmd hold the application instance (#1159) 2023-03-14 15:18:50 +08:00
zyy17
81ca1d8399 refactor: add the separate GitHub Action job to push the image to the UCloud registry (#1170) 2023-03-14 11:35:18 +08:00
LFC
8d3999df5f fix: failed to run subquery wrapped in two parentheses (#1157) 2023-03-14 10:59:43 +08:00
discord9
a60788e92e fix: use correct env var (#1166)
* fix: use correct env var

* fix: move COPY up so rustup know it's nightly

* fix: add `pyo3_backend` in GHA yml

* chore: name for `TODO`

* temp: not set `pyo3_backend` before find DSO

* fix: release linux with pyo3_backend
2023-03-14 10:57:13 +08:00
Weny Xu
296c6dfcbf feat: implement table flush (#1121)
* feat: add flush method for trait

* feat: implement flush via grpc

* chore: move table_dir/region_name/region_id to table crate

* chore: Update src/mito/src/table.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-13 20:10:37 +08:00
170 changed files with 4680 additions and 2544 deletions

View File

@@ -2,7 +2,7 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id

View File

@@ -1,42 +1,42 @@
# on:
# push:
# branches:
# - develop
# paths-ignore:
# - 'docs/**'
# - 'config/**'
# - '**.md'
# - '.dockerignore'
# - 'docker/**'
# - '.gitignore'
on:
push:
branches:
- develop
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
# name: Build API docs
name: Build API docs
# env:
# RUST_TOOLCHAIN: nightly-2023-02-26
env:
RUST_TOOLCHAIN: nightly-2023-02-26
# jobs:
# apidoc:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v3
# - uses: arduino/setup-protoc@v1
# with:
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# - uses: dtolnay/rust-toolchain@master
# with:
# toolchain: ${{ env.RUST_TOOLCHAIN }}
# - run: cargo doc --workspace --no-deps --document-private-items
# - run: |
# cat <<EOF > target/doc/index.html
# <!DOCTYPE html>
# <html>
# <head>
# <meta http-equiv="refresh" content="0; url='greptime/'" />
# </head>
# <body></body></html>
# EOF
# - name: Publish dist directory
# uses: JamesIves/github-pages-deploy-action@v4
# with:
# folder: target/doc
jobs:
apidoc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url='greptime/'" />
</head>
<body></body></html>
EOF
- name: Publish dist directory
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc

View File

@@ -213,11 +213,10 @@ jobs:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
# - name: Install cargo-llvm-cov
# uses: taiki-e/install-action@cargo-llvm-cov
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo nextest run -F pyo3_backend
# run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1

View File

@@ -2,9 +2,9 @@ on:
push:
tags:
- "v*.*.*"
# schedule:
# # At 00:00 on Monday.
# - cron: '0 0 * * 1'
schedule:
# At 00:00 on Monday.
- cron: '0 0 * * 1'
workflow_dispatch:
name: Release
@@ -18,6 +18,9 @@ env:
CARGO_PROFILE: nightly
## FIXME(zyy17): Enable it after the tests are stabled.
DISABLE_RUN_TESTS: true
jobs:
build:
name: Build binary
@@ -29,23 +32,41 @@ jobs:
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
# opts: "-F pyo3_backend"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: true
# opts: "-F pyo3_backend"
# - arch: aarch64-apple-darwin
# os: macos-latest
# file: greptime-darwin-arm64
# continue-on-error: true
# - arch: x86_64-apple-darwin
# os: macos-latest
# file: greptime-darwin-amd64
# continue-on-error: true
continue-on-error: false
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: false
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: false
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb-edge'
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -100,11 +121,12 @@ jobs:
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
- name: Compile Python 3.10.10 from source for Aarch64
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
@@ -116,19 +138,54 @@ jobs:
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
- name: Run cargo build for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=python3
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
@@ -150,11 +207,100 @@ jobs:
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
release:
name: Release artifacts
docker:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb-edge'
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64-pyo3
path: amd64
- name: Unzip the amd64 artifacts
run: |
cd amd64
tar xvf greptime-linux-amd64-pyo3.tgz
rm greptime-linux-amd64-pyo3.tgz
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64-pyo3
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
cd arm64
tar xvf greptime-linux-arm64-pyo3.tgz
rm greptime-linux-arm64-pyo3.tgz
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: .
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
# Release artifacts only when all the artifacts are built successfully.
needs: [build,docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -193,142 +339,48 @@ jobs:
files: |
**/greptime-*
# docker:
# name: Build docker image
# needs: [build]
# runs-on: ubuntu-latest
# if: github.repository == 'GreptimeTeam/greptimedb'
# steps:
# - name: Checkout sources
# uses: actions/checkout@v3
docker-push-uhub:
name: Push docker image to UCloud Container Registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb'
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
continue-on-error: true
steps:
- name: Checkout sources
uses: actions/checkout@v3
# - name: Login to UCloud Container Registry
# uses: docker/login-action@v2
# with:
# registry: uhub.service.ucloud.cn
# username: ${{ secrets.UCLOUD_USERNAME }}
# password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
# - name: Login to Dockerhub
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
# shell: bash
# if: github.event_name == 'schedule'
# run: |
# buildTime=`date "+%Y%m%d"`
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
# shell: bash
# if: github.event_name != 'schedule'
# run: |
# VERSION=${{ github.ref_name }}
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
# - name: Set up QEMU
# uses: docker/setup-qemu-action@v2
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
# - name: Set up buildx
# uses: docker/setup-buildx-action@v2
# - name: Download amd64 binary
# uses: actions/download-artifact@v3
# with:
# name: greptime-linux-amd64
# path: amd64
# - name: Unzip the amd64 artifacts
# run: |
# cd amd64
# tar xvf greptime-linux-amd64.tgz
# rm greptime-linux-amd64.tgz
# - name: Download arm64 binary
# id: download-arm64
# uses: actions/download-artifact@v3
# with:
# name: greptime-linux-arm64
# path: arm64
# - name: Unzip the arm64 artifacts
# id: unzip-arm64
# if: success() || steps.download-arm64.conclusion == 'success'
# run: |
# cd arm64
# tar xvf greptime-linux-arm64.tgz
# rm greptime-linux-arm64.tgz
# - name: Build and push all
# uses: docker/build-push-action@v3
# if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
# with:
# context: .
# file: ./docker/ci/Dockerfile
# push: true
# platforms: linux/amd64,linux/arm64
# tags: |
# greptime/greptimedb:latest
# greptime/greptimedb:${{ env.IMAGE_TAG }}
# - name: Build and push amd64 only
# uses: docker/build-push-action@v3
# if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
# with:
# context: .
# file: ./docker/ci/Dockerfile
# push: true
# platforms: linux/amd64
# tags: |
# greptime/greptimedb:latest
# greptime/greptimedb:${{ env.IMAGE_TAG }}
# docker-push-uhub:
# name: Push docker image to UCloud Container Registry
# needs: [docker]
# runs-on: ubuntu-latest
# if: github.repository == 'GreptimeTeam/greptimedb'
# # Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
# continue-on-error: true
# steps:
# - name: Checkout sources
# uses: actions/checkout@v3
# - name: Set up QEMU
# uses: docker/setup-qemu-action@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Login to UCloud Container Registry
# uses: docker/login-action@v2
# with:
# registry: uhub.service.ucloud.cn
# username: ${{ secrets.UCLOUD_USERNAME }}
# password: ${{ secrets.UCLOUD_PASSWORD }}
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
# shell: bash
# if: github.event_name == 'schedule'
# run: |
# buildTime=`date "+%Y%m%d"`
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
# shell: bash
# if: github.event_name != 'schedule'
# run: |
# VERSION=${{ github.ref_name }}
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
# - name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
# run: |
# docker buildx imagetools create \
# --tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
# --tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
# greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
docker buildx imagetools create \
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}

165
Cargo.lock generated
View File

@@ -135,7 +135,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]]
name = "api"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arrow-flight",
"common-base",
@@ -754,7 +754,7 @@ dependencies = [
[[package]]
name = "benchmarks"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arrow",
"clap 4.1.8",
@@ -1088,7 +1088,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"arc-swap",
@@ -1339,7 +1339,7 @@ dependencies = [
[[package]]
name = "client"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"arrow-flight",
@@ -1362,7 +1362,7 @@ dependencies = [
"prost",
"rand",
"snafu",
"substrait 0.1.0",
"substrait 0.1.1",
"substrait 0.4.1",
"tokio",
"tonic",
@@ -1392,7 +1392,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"anymap",
"build-data",
@@ -1420,7 +1420,7 @@ dependencies = [
"servers",
"session",
"snafu",
"substrait 0.1.0",
"substrait 0.1.1",
"tikv-jemalloc-ctl",
"tikv-jemallocator",
"tokio",
@@ -1456,7 +1456,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"anymap",
"bitvec",
@@ -1470,7 +1470,7 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-trait",
"chrono",
@@ -1485,9 +1485,21 @@ dependencies = [
"tokio",
]
[[package]]
name = "common-datasource"
version = "0.1.1"
dependencies = [
"common-error",
"futures",
"object-store",
"regex",
"snafu",
"url",
]
[[package]]
name = "common-error"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"snafu",
"strum",
@@ -1495,7 +1507,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arc-swap",
"chrono-tz",
@@ -1518,7 +1530,7 @@ dependencies = [
[[package]]
name = "common-function-macro"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arc-swap",
"common-query",
@@ -1532,7 +1544,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"arrow-flight",
@@ -1558,7 +1570,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"async-trait",
@@ -1576,7 +1588,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"common-error",
"snafu",
@@ -1589,9 +1601,10 @@ dependencies = [
[[package]]
name = "common-procedure"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-trait",
"backon 0.4.0",
"common-error",
"common-runtime",
"common-telemetry",
@@ -1609,7 +1622,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-trait",
"common-base",
@@ -1627,7 +1640,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"common-error",
"datafusion",
@@ -1643,7 +1656,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"common-error",
"common-telemetry",
@@ -1657,7 +1670,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"backtrace",
"common-error",
@@ -1679,14 +1692,14 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"tempfile",
]
[[package]]
name = "common-time"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"chrono",
"common-error",
@@ -2107,8 +2120,8 @@ dependencies = [
[[package]]
name = "datafusion"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2137,7 +2150,6 @@ dependencies = [
"object_store",
"parking_lot",
"parquet",
"paste",
"percent-encoding",
"pin-project-lite",
"rand",
@@ -2155,8 +2167,8 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow",
"chrono",
@@ -2168,8 +2180,8 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"dashmap",
"datafusion-common",
@@ -2185,20 +2197,19 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"ahash 0.8.3",
"arrow",
"datafusion-common",
"log",
"sqlparser",
]
[[package]]
name = "datafusion-optimizer"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow",
"async-trait",
@@ -2214,8 +2225,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2233,7 +2244,6 @@ dependencies = [
"itertools",
"lazy_static",
"md-5",
"num-traits",
"paste",
"petgraph",
"rand",
@@ -2245,8 +2255,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow",
"datafusion-common",
@@ -2256,8 +2266,8 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "19.0.0"
source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff"
version = "20.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a"
dependencies = [
"arrow-schema",
"datafusion-common",
@@ -2268,7 +2278,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"async-compat",
@@ -2282,7 +2292,9 @@ dependencies = [
"client",
"common-base",
"common-catalog",
"common-datasource",
"common-error",
"common-function",
"common-grpc",
"common-grpc-expr",
"common-procedure",
@@ -2319,7 +2331,7 @@ dependencies = [
"sql",
"storage",
"store-api",
"substrait 0.1.0",
"substrait 0.1.1",
"table",
"table-procedure",
"tokio",
@@ -2329,11 +2341,12 @@ dependencies = [
"tower",
"tower-http",
"url",
"uuid",
]
[[package]]
name = "datatypes"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arrow",
"arrow-schema",
@@ -2781,7 +2794,7 @@ dependencies = [
[[package]]
name = "frontend"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"async-stream",
@@ -2824,7 +2837,7 @@ dependencies = [
"sql",
"store-api",
"strfmt",
"substrait 0.1.0",
"substrait 0.1.1",
"table",
"tokio",
"toml",
@@ -2939,9 +2952,9 @@ dependencies = [
[[package]]
name = "futures-core"
version = "0.3.26"
version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608"
checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd"
[[package]]
name = "futures-executor"
@@ -3085,7 +3098,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0a7b790ed41364b5599dff806d1080bd59c5c9f6#0a7b790ed41364b5599dff806d1080bd59c5c9f6"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=eb760d219206c77dd3a105ecb6a3ba97d9d650ec#eb760d219206c77dd3a105ecb6a3ba97d9d650ec"
dependencies = [
"prost",
"tonic",
@@ -3732,7 +3745,7 @@ dependencies = [
[[package]]
name = "log-store"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arc-swap",
"async-stream",
@@ -3975,7 +3988,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"async-trait",
@@ -4002,7 +4015,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"anymap",
"api",
@@ -4024,6 +4037,7 @@ dependencies = [
"lazy_static",
"parking_lot",
"prost",
"rand",
"regex",
"serde",
"serde_json",
@@ -4137,7 +4151,7 @@ dependencies = [
[[package]]
name = "mito"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"anymap",
"arc-swap",
@@ -4540,7 +4554,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"anyhow",
"async-trait",
@@ -4862,7 +4876,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"common-catalog",
"common-error",
@@ -5383,7 +5397,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-recursion",
"async-trait",
@@ -5615,7 +5629,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"approx_eq",
"arc-swap",
@@ -6628,7 +6642,7 @@ checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
[[package]]
name = "script"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arrow",
"async-trait",
@@ -6858,7 +6872,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"aide",
"api",
@@ -6928,13 +6942,14 @@ dependencies = [
"tokio-stream",
"tokio-test",
"tonic",
"tonic-reflection",
"tower",
"tower-http",
]
[[package]]
name = "session"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arc-swap",
"common-catalog",
@@ -7171,7 +7186,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"catalog",
@@ -7206,7 +7221,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-trait",
"client",
@@ -7284,7 +7299,7 @@ dependencies = [
[[package]]
name = "storage"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"arc-swap",
"arrow",
@@ -7332,7 +7347,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-stream",
"async-trait",
@@ -7464,7 +7479,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-recursion",
"async-trait",
@@ -7558,7 +7573,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"anymap",
"async-trait",
@@ -7594,7 +7609,7 @@ dependencies = [
[[package]]
name = "table-procedure"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"async-trait",
"catalog",
@@ -7676,7 +7691,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"api",
"axum",
@@ -8117,6 +8132,20 @@ dependencies = [
"syn",
]
[[package]]
name = "tonic-reflection"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67494bad4dda4c9bffae901dfe14e2b2c0f760adb4706dc10beeb81799f7f7b2"
dependencies = [
"bytes",
"prost",
"prost-types",
"tokio",
"tokio-stream",
"tonic",
]
[[package]]
name = "toolchain_find"
version = "0.2.0"

View File

@@ -7,6 +7,7 @@ members = [
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/datasource",
"src/common/error",
"src/common/function",
"src/common/function-macro",
@@ -45,7 +46,7 @@ members = [
]
[workspace.package]
version = "0.1.0"
version = "0.1.1"
edition = "2021"
license = "Apache-2.0"
@@ -57,18 +58,18 @@ arrow-schema = { version = "34.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
# TODO(LFC): Use official DataFusion, when https://github.com/apache/arrow-datafusion/pull/5542 got merged
datafusion = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-common = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-optimizer = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-physical-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-sql = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
futures = "0.3"
futures-util = "0.3"
parquet = "34.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }

View File

@@ -1,8 +1,8 @@
<p align="center">
<picture>
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
</picture>
</p>
@@ -61,12 +61,12 @@ To compile GreptimeDB from source, you'll need:
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
in cpython): this install a Python shared library required for running python
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default.
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
#### Build with Docker
@@ -147,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
downloadable pre-built binaries for Linux and MacOS
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
Docker images
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
@@ -158,6 +158,7 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)

View File

@@ -21,12 +21,12 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -61,7 +61,7 @@ struct Args {
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
@@ -97,6 +97,9 @@ async fn write_data(
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
@@ -122,11 +125,16 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let values = build_values(array);
let (values, datatype) = build_values(array);
let column = Column {
column_name: field.name().to_owned(),
values: Some(values),
null_mask: vec![],
null_mask: array
.data()
.null_bitmap()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
// datatype and semantic_type are set to default
..Default::default()
};
@@ -136,7 +144,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> Values {
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
@@ -144,10 +152,13 @@ fn build_values(column: &ArrayRef) -> Values {
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
@@ -155,29 +166,38 @@ fn build_values(column: &ArrayRef) -> Values {
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
Values {
f64_values: values.to_vec(),
..Default::default()
}
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampNanosecondArray>()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
Values {
string_values: values,
..Default::default()
}
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
DataType::Null
| DataType::Boolean
@@ -213,6 +233,10 @@ fn build_values(column: &ArrayRef) -> Values {
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr() -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
@@ -340,7 +364,7 @@ fn create_table_expr() -> CreateTableExpr {
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: Some(TableId { id: 0 }),
table_id: None,
}
}

View File

@@ -44,5 +44,7 @@ max_purge_tasks = 32
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = 'File'
# data_dir = '/tmp/greptimedb/procedure/'
# type = "File"
# data_dir = "/tmp/greptimedb/procedure/"
# max_retry_times = 3
# retry_delay = "500ms"

View File

@@ -1,11 +0,0 @@
# WAL options.
[wal]
# WAL data directory.
dir = "/tmp/greptimedb/wal"
# Storage options.
[storage]
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data"

View File

@@ -114,3 +114,7 @@ max_purge_tasks = 32
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"
# # Procedure max retry time.
# max_retry_times = 3
# # Initial retry delay of procedures, increases exponentially
# retry_delay = "500ms"

View File

@@ -1,9 +1,36 @@
#!/usr/bin/env bash
set -e
# this script will download Python source code, compile it, and install it to /usr/local/lib
# then use this python to compile cross-compiled python for aarch64
ARCH=$1
PYTHON_VERSION=3.10.10
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
function download_python_source_code() {
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
tar -xvf Python-$PYTHON_VERSION.tgz
}
function compile_for_amd64_platform() {
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
echo "Compiling for amd64 platform..."
./configure \
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make install
}
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
tar -xvf Python-3.10.10.tgz
cd Python-3.10.10
# explain Python compile options here a bit:s
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
@@ -14,33 +41,47 @@ cd Python-3.10.10
# ac_cv_have_long_long_format=yes: target platform supports long long type
# disable-ipv6: disable ipv6 support, we don't need it in here
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
function compile_for_aarch64_platform() {
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
echo "Compiling for aarch64 platform..."
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo "LIBRARY_PATH: $LIBRARY_PATH"
echo "PATH: $PATH"
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make altinstall
}
# Main script starts here.
download_python_source_code
# Enter the python source code directory.
cd $PYTHON_SOURCE_DIR || exit 1
# Build local python first, then build cross-compiled python.
./configure \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
make
make install
cd ..
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
cd Python-3.10.10 && \
make clean && \
make distclean && \
alias python=python3 && \
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix=$PY_INSTALL_PATH --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
make && make altinstall && \
cd ..
compile_for_amd64_platform
# Clean the build directory.
make clean && make distclean
# Cross compile python for aarch64.
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
compile_for_aarch64_platform
fi

View File

@@ -1,6 +1,12 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip
RUN python3 -m pip install pyarrow
ARG TARGETARCH

View File

@@ -0,0 +1,196 @@
---
Feature Name: "Fault Tolerance for Region"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
Date: 2023-03-08
Author: "Luo Fucong <luofucong@greptime.com>"
---
Fault Tolerance for Region
----------------------
# Summary
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
# Motivation
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
# Details
## Background
Some backgrounds about region in distributed mode:
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
## Failure Detection
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
```protobuf
message HeartbeatRequest {
...
// Region stats on this node
repeated RegionStat region_stats = 6;
...
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
...
}
```
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
> The heartbeat interval is now hardcoded to five seconds.
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
To conclude, the failure detection pseudo-codes are like this:
```rust
// in Metasrv:
fn failure_detection() {
loop {
// passive detection
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
// find the datanodes that contains the failed regions
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
// active detection
for (datanode, regions) in datanodes_and_regions {
if !datanode.is_healthy(regions) {
do_failover(datanode, regions);
}
}
sleep(config.detect_interval);
}
}
```
Some design considerations:
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
So we detect some regions are not available. How to regain the availability back?
## Region Failover
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
```protobuf
message Region {
uint64 id = 1;
string name = 2;
Partition partition = 3;
message State {
Active,
Passive,
}
State state = 4;
map<string, string> attrs = 100;
}
```
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
A picture is worth a 1000 words:
```text
+-------------------------+
| Metasrv detects region |
| failure |
+-------------------------+
|
v
+----------------------------+
| Metasrv chooses candidates |
| to hold failed regions |
+----------------------------+
|
v
+-------------------------+ +-------------------------+
| Metasrv "passive" the |------>| Frontend rejects writes |
| failed regions | | to "passive" regions |
+-------------------------+ +-------------------------+
|
v
+--------------------------+ +---------------------------+
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
| regions from remote WAL | | "open" region requests |
+--------------------------+ +---------------------------+
|
|
| +-------------------------+ +-------------------------+
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
| failed regions | | restriction to regions |
+-------------------------+ +-------------------------+
|
v
+-------------------------+
| Region failover done, |
| HA regain |
+-------------------------+
```
# Alternatives
## The "Neon" Way
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
However, the main concerns we don't adopt this method are two-fold:
1. Remote WAL is planned to be quorum based, it can be efficiently written;
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
## No WAL, Replication instead
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
# Future Work
Some optimizations we could take:
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.

View File

@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0a7b790ed41364b5599dff806d1080bd59c5c9f6" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eb760d219206c77dd3a105ecb6a3ba97d9d650ec" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true

View File

@@ -204,6 +204,21 @@ pub enum Error {
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display(
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
catalog,
schema,
table,
source
))]
RegionStats {
catalog: String,
schema: String,
table: String,
#[snafu(backtrace)]
source: table::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -238,7 +253,8 @@ impl ErrorExt for Error {
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. } => source.status_code(),
| Error::DeregisterTable { source, .. }
| Error::RegionStats { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),

View File

@@ -18,8 +18,9 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use common_telemetry::info;
use snafu::{OptionExt, ResultExt};
use api::v1::meta::{RegionStat, TableName};
use common_telemetry::{info, warn};
use snafu::ResultExt;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::CreateTableRequest;
@@ -225,39 +226,52 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
Ok(())
}
/// The number of regions in the datanode node.
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
///
/// Ignores any errors occurred during iterating regions. The intention of this method is to
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
/// "try our best" job.
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
for catalog_name in catalog_manager.catalog_names()? {
let catalog =
catalog_manager
.catalog(&catalog_name)?
.context(error::CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
for catalog_name in catalog_names {
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
for schema_name in catalog.schema_names()? {
let schema = catalog
.schema(&schema_name)?
.context(error::SchemaNotFoundSnafu {
catalog: &catalog_name,
schema: &schema_name,
})?;
let Ok(schema_names) = catalog.schema_names() else { continue };
for schema_name in schema_names {
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
for table_name in schema.table_names()? {
let table =
schema
.table(&table_name)
.await?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let Ok(table_names) = schema.table_names() else { continue };
for table_name in table_names {
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
region_id: stat.region_id,
table_name: Some(TableName {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
..Default::default()
});
region_stats.extend(stats);
}
Err(e) => {
warn!("Failed to get region status, err: {:?}", e);
}
};
}
}
}
Ok(region_number)
(region_number, region_stats)
}

View File

@@ -23,7 +23,7 @@ enum_dispatch = "0.3"
futures-util.workspace = true
parking_lot = "0.12"
prost.workspace = true
rand = "0.8"
rand.workspace = true
snafu.workspace = true
tonic.workspace = true

View File

@@ -66,10 +66,6 @@ pub enum Error {
source: common_grpc::error::Error,
},
/// Error deserialized from gRPC metadata
#[snafu(display("{}", msg))]
ExternalError { code: StatusCode, msg: String },
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
Server { code: StatusCode, msg: String },
@@ -94,7 +90,6 @@ impl ErrorExt for Error {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::ExternalError { code, .. } => *code,
}
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![doc = include_str!("../../../../README.md")]
use std::fmt;
use clap::Parser;

View File

@@ -31,7 +31,6 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
// TODO: handle cli shutdown
Ok(())
}
}

View File

@@ -21,7 +21,7 @@ use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::toml_loader;
pub struct Instance {
@@ -34,8 +34,10 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
// TODO: handle datanode shutdown
Ok(())
self.datanode
.shutdown()
.await
.context(ShutdownDatanodeSnafu)
}
}
@@ -150,7 +152,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(wal_dir) = cmd.wal_dir {
opts.wal.dir = wal_dir;
}
if let Some(procedure_dir) = cmd.procedure_dir {
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
}

View File

@@ -26,10 +26,10 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to stop datanode, source: {}", source))]
StopDatanode {
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(backtrace)]
source: BoxedError,
source: datanode::error::Error,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
@@ -38,6 +38,12 @@ pub enum Error {
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
@@ -50,6 +56,12 @@ pub enum Error {
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
path: String,
@@ -149,7 +161,10 @@ impl ErrorExt for Error {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
@@ -169,7 +184,6 @@ impl ErrorExt for Error {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
Error::StopDatanode { source } => source.status_code(),
}
}

View File

@@ -47,8 +47,10 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
// TODO: handle frontend shutdown
Ok(())
self.frontend
.shutdown()
.await
.context(error::ShutdownFrontendSnafu)
}
}

View File

@@ -30,13 +30,14 @@ impl Instance {
self.instance
.start()
.await
.context(error::StartMetaServerSnafu)?;
Ok(())
.context(error::StartMetaServerSnafu)
}
pub async fn stop(&self) -> Result<()> {
// TODO: handle metasrv shutdown
Ok(())
self.instance
.shutdown()
.await
.context(error::ShutdownMetaServerSnafu)
}
}

View File

@@ -16,7 +16,6 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use common_error::prelude::BoxedError;
use common_telemetry::info;
use datanode::datanode::{
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
@@ -38,7 +37,8 @@ use servers::Mode;
use snafu::ResultExt;
use crate::error::{
Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu, StopDatanodeSnafu,
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFrontendSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::toml_loader;
@@ -155,16 +155,16 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
self.datanode
.shutdown()
.await
.map_err(BoxedError::new)
.context(StopDatanodeSnafu)?;
self.frontend
.shutdown()
.await
.map_err(BoxedError::new)
.context(StopDatanodeSnafu)?;
.context(ShutdownFrontendSnafu)?;
self.datanode
.shutdown_instance()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
Ok(())
}

View File

@@ -0,0 +1,13 @@
[package]
name = "common-datasource"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-error = { path = "../error" }
futures.workspace = true
object-store = { path = "../../object-store" }
regex = "1.7"
snafu.workspace = true
url = "2.3"

View File

@@ -0,0 +1,75 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::prelude::*;
use url::ParseError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unsupported backend protocol: {}", protocol))]
UnsupportedBackendProtocol { protocol: String },
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String },
#[snafu(display("Invalid path: {}", path))]
InvalidPath { path: String },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl { url: String, source: ParseError },
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
backtrace: Backtrace,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
ListObjects {
path: String,
backtrace: Backtrace,
source: object_store::Error,
},
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
UnsupportedBackendProtocol { .. }
| InvalidConnection { .. }
| InvalidUrl { .. }
| EmptyHostPath { .. }
| InvalidPath { .. } => StatusCode::InvalidArguments,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -0,0 +1,18 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
pub mod lister;
pub mod object_store;
pub mod util;

View File

@@ -0,0 +1,81 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::{future, TryStreamExt};
use object_store::{Object, ObjectStore};
use regex::Regex;
use snafu::ResultExt;
use crate::error::{self, Result};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Source {
Filename(String),
Dir,
}
pub struct Lister {
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
}
impl Lister {
pub fn new(
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
) -> Self {
Lister {
object_store,
source,
path,
regex,
}
}
pub async fn list(&self) -> Result<Vec<Object>> {
match &self.source {
Source::Dir => {
let streamer = self
.object_store
.object(&self.path)
.list()
.await
.context(error::ListObjectsSnafu { path: &self.path })?;
streamer
.try_filter(|f| {
let res = self
.regex
.as_ref()
.map(|x| x.is_match(f.name()))
.unwrap_or(true);
future::ready(res)
})
.try_collect::<Vec<_>>()
.await
.context(error::ListObjectsSnafu { path: &self.path })
}
Source::Filename(filename) => {
let obj = self
.object_store
.object(&format!("{}{}", self.path, filename));
Ok(vec![obj])
}
}
}
}

View File

@@ -0,0 +1,60 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod fs;
pub mod s3;
use std::collections::HashMap;
use object_store::ObjectStore;
use snafu::{OptionExt, ResultExt};
use url::{ParseError, Url};
use self::fs::build_fs_backend;
use self::s3::build_s3_backend;
use crate::error::{self, Result};
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// parse url returns (schema,Option<host>,path)
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
let parsed_url = Url::parse(url);
match parsed_url {
Ok(url) => Ok((
url.scheme().to_string(),
url.host_str().map(|s| s.to_string()),
url.path().to_string(),
)),
Err(ParseError::RelativeUrlWithoutBase) => {
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
}
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
}
}
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
let (schema, host, _path) = parse_url(url)?;
match schema.to_uppercase().as_str() {
S3_SCHEMA => {
let host = host.context(error::EmptyHostPathSnafu {
url: url.to_string(),
})?;
Ok(build_s3_backend(&host, "/", connection)?)
}
FS_SCHEMA => Ok(build_fs_backend("/")?),
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
}
}

View File

@@ -0,0 +1,28 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use crate::error::{self, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let accessor = Fs::default()
.root(root)
.build()
.context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}

View File

@@ -0,0 +1,79 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use object_store::services::S3;
use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use crate::error::{self, Result};
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
const SESSION_TOKEN: &str = "SESSION_TOKEN";
const REGION: &str = "REGION";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
pub fn build_s3_backend(
host: &str,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
builder.bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
let accessor = builder.build().context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}

View File

@@ -0,0 +1,125 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
if path.is_empty() {
("/".to_string(), None)
} else if path.ends_with('/') {
(path.to_string(), None)
} else if let Some(idx) = path.rfind('/') {
(
path[..idx + 1].to_string(),
Some(path[idx + 1..].to_string()),
)
} else {
("/".to_string(), Some(path.to_string()))
}
}
#[cfg(test)]
mod tests {
use url::Url;
use super::*;
#[test]
fn test_parse_uri() {
struct Test<'a> {
uri: &'a str,
expected_path: &'a str,
expected_schema: &'a str,
}
let tests = [
Test {
uri: "s3://bucket/to/path/",
expected_path: "/to/path/",
expected_schema: "s3",
},
Test {
uri: "fs:///to/path/",
expected_path: "/to/path/",
expected_schema: "fs",
},
Test {
uri: "fs:///to/path/file",
expected_path: "/to/path/file",
expected_schema: "fs",
},
];
for test in tests {
let parsed_uri = Url::parse(test.uri).unwrap();
assert_eq!(parsed_uri.path(), test.expected_path);
assert_eq!(parsed_uri.scheme(), test.expected_schema);
}
}
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("/to/path/file").unwrap();
assert_eq!(parsed.path(), "/to/path/file");
let parsed = Url::from_directory_path("/to/path/").unwrap();
assert_eq!(parsed.path(), "/to/path/");
}
#[test]
fn test_find_dir_and_filename() {
struct Test<'a> {
path: &'a str,
expected_dir: &'a str,
expected_filename: Option<String>,
}
let tests = [
Test {
path: "to/path/",
expected_dir: "to/path/",
expected_filename: None,
},
Test {
path: "to/path/filename",
expected_dir: "to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/to/path/filename",
expected_dir: "/to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/",
expected_dir: "/",
expected_filename: None,
},
Test {
path: "filename",
expected_dir: "/",
expected_filename: Some("filename".into()),
},
Test {
path: "",
expected_dir: "/",
expected_filename: None,
},
];
for test in tests {
let (path, filename) = find_dir_and_filename(test.path);
assert_eq!(test.expected_dir, path);
assert_eq!(test.expected_filename, filename)
}
}
}

View File

@@ -11,11 +11,10 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod from_unixtime;
mod to_unixtime;
use from_unixtime::FromUnixtimeFunction;
use to_unixtime::ToUnixtimeFunction;
use crate::scalars::function_registry::FunctionRegistry;
@@ -23,6 +22,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(FromUnixtimeFunction::default()));
registry.register(Arc::new(ToUnixtimeFunction::default()));
}
}

View File

@@ -1,133 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! from_unixtime function.
/// TODO(dennis) It can be removed after we upgrade datafusion.
use std::fmt;
use std::sync::Arc;
use common_query::error::{
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
use snafu::ResultExt;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct FromUnixtimeFunction;
const NAME: &str = "from_unixtime";
impl Function for FromUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_millisecond_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(
1,
vec![ConcreteDataType::int64_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
match columns[0].data_type() {
ConcreteDataType::Int64(_) => {
let array = columns[0].to_arrow_array();
// Our timestamp vector's time unit is millisecond
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
.context(ArrowComputeSnafu)?;
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
Ok(Arc::new(
TimestampMillisecondVector::try_from_arrow_array(
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
typ: ArrowDatatype::Int64,
})?,
)
.context(IntoVectorSnafu {
data_type: arrow_datatype.clone(),
})?,
))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for FromUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FROM_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::value::Value;
use datatypes::vectors::Int64Vector;
use super::*;
#[test]
fn test_from_unixtime() {
let f = FromUnixtimeFunction::default();
assert_eq!("from_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::int64_datatype()]
));
let times = vec![Some(1494410783), None, Some(1494410983)];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(3, vector.len());
for (i, t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), t.unwrap() * 1000);
}
_ => unreachable!(),
}
}
}
}

View File

@@ -0,0 +1,148 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::str::FromStr;
use std::sync::Arc;
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::{Signature, Volatility};
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
use snafu::ensure;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct ToUnixtimeFunction;
const NAME: &str = "to_unixtime";
fn convert_to_seconds(arg: &str) -> Option<i64> {
match Timestamp::from_str(arg) {
Ok(ts) => {
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
Some(ts.value().div_euclid(sec_mul))
}
Err(_err) => None,
}
}
impl Function for ToUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_second_datatype())
}
fn signature(&self) -> Signature {
Signature::exact(
vec![ConcreteDataType::String(StringType)],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
error::InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
),
}
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
let array = columns[0].to_arrow_array();
let vector = StringVector::try_from_arrow_array(&array).unwrap();
Ok(Arc::new(Int64Vector::from(
(0..vector.len())
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
.collect::<Vec<_>>(),
)))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for ToUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TO_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::value::Value;
use datatypes::vectors::StringVector;
use super::{ToUnixtimeFunction, *};
use crate::scalars::Function;
#[test]
fn test_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_second_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Exact(valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::String(StringType)]
));
let times = vec![
Some("2023-03-01T06:35:02Z"),
None,
Some("2022-06-30T23:59:60Z"),
Some("invalid_time_stamp"),
];
let results = vec![Some(1677652502), None, Some(1656633600), None];
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 || i == 3 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Int64(ts) => {
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
}
_ => unreachable!(),
}
}
}
}

View File

@@ -32,7 +32,7 @@ pub enum Error {
DecodeInsert { source: DecodeError },
#[snafu(display("Illegal insert data"))]
IllegalInsertData,
IllegalInsertData { backtrace: Backtrace },
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {

View File

@@ -26,7 +26,7 @@ tower = "0.4"
[dev-dependencies]
criterion = "0.4"
rand = "0.8"
rand.workspace = true
[[bench]]
name = "bench_main"

View File

@@ -14,6 +14,7 @@ object-store = { path = "../../object-store" }
serde.workspace = true
serde_json = "1.0"
smallvec = "1"
backon = "0.4.0"
snafu.workspace = true
tokio.workspace = true
uuid.workspace = true

View File

@@ -97,6 +97,16 @@ pub enum Error {
source: Arc<Error>,
backtrace: Backtrace,
},
#[snafu(display(
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
procedure_id,
source
))]
RetryTimesExceeded {
source: Arc<Error>,
procedure_id: ProcedureId,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -111,6 +121,7 @@ impl ErrorExt for Error {
| Error::ListState { .. }
| Error::ReadState { .. }
| Error::FromJson { .. }
| Error::RetryTimesExceeded { .. }
| Error::RetryLater { .. }
| Error::WaitWatcher { .. } => StatusCode::Internal,
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {

View File

@@ -17,8 +17,10 @@ mod runner;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use async_trait::async_trait;
use backon::ExponentialBuilder;
use common_telemetry::logging;
use object_store::ObjectStore;
use snafu::ensure;
@@ -291,12 +293,16 @@ impl ManagerContext {
pub struct ManagerConfig {
/// Object store
pub object_store: ObjectStore,
pub max_retry_times: usize,
pub retry_delay: Duration,
}
/// A [ProcedureManager] that maintains procedure states locally.
pub struct LocalManager {
manager_ctx: Arc<ManagerContext>,
state_store: StateStoreRef,
max_retry_times: usize,
retry_delay: Duration,
}
impl LocalManager {
@@ -305,6 +311,8 @@ impl LocalManager {
LocalManager {
manager_ctx: Arc::new(ManagerContext::new()),
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
max_retry_times: config.max_retry_times,
retry_delay: config.retry_delay,
}
}
@@ -321,7 +329,11 @@ impl LocalManager {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: ExponentialBuilder::default()
.with_min_delay(self.retry_delay)
.with_max_times(self.max_retry_times),
store: ProcedureStore::new(self.state_store.clone()),
rolling_back: false,
};
let watcher = meta.state_receiver.clone();
@@ -543,6 +555,8 @@ mod tests {
let dir = create_temp_dir("register");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -562,6 +576,8 @@ mod tests {
let object_store = test_util::new_object_store(&dir);
let config = ManagerConfig {
object_store: object_store.clone(),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -606,6 +622,8 @@ mod tests {
let dir = create_temp_dir("submit");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);
@@ -652,6 +670,8 @@ mod tests {
let dir = create_temp_dir("on_err");
let config = ManagerConfig {
object_store: test_util::new_object_store(&dir),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
};
let manager = LocalManager::new(config);

View File

@@ -15,15 +15,15 @@
use std::sync::Arc;
use std::time::Duration;
use backon::{BackoffBuilder, ExponentialBuilder};
use common_telemetry::logging;
use tokio::time;
use crate::error::{ProcedurePanicSnafu, Result};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
use crate::store::ProcedureStore;
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
use crate::ProcedureState::Retrying;
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
#[derive(Debug)]
enum ExecResult {
@@ -108,7 +108,9 @@ pub(crate) struct Runner {
pub(crate) procedure: BoxedProcedure,
pub(crate) manager_ctx: Arc<ManagerContext>,
pub(crate) step: u32,
pub(crate) exponential_builder: ExponentialBuilder,
pub(crate) store: ProcedureStore,
pub(crate) rolling_back: bool,
}
impl Runner {
@@ -164,18 +166,56 @@ impl Runner {
provider: self.manager_ctx.clone(),
};
self.rolling_back = false;
self.execute_once_with_retry(&ctx).await;
}
async fn execute_once_with_retry(&mut self, ctx: &Context) {
let mut retry = self.exponential_builder.build();
let mut retry_times = 0;
loop {
match self.execute_once(&ctx).await {
ExecResult::Continue => (),
match self.execute_once(ctx).await {
ExecResult::Done | ExecResult::Failed => return,
ExecResult::Continue => (),
ExecResult::RetryLater => {
self.wait_on_err().await;
retry_times += 1;
if let Some(d) = retry.next() {
self.wait_on_err(d, retry_times).await;
} else {
assert!(self.meta.state().is_retrying());
if let Retrying { error } = self.meta.state() {
self.meta.set_state(ProcedureState::failed(Arc::new(
Error::RetryTimesExceeded {
source: error,
procedure_id: self.meta.id,
},
)))
}
return;
}
}
}
}
}
async fn rollback(&mut self, error: Arc<Error>) -> ExecResult {
if let Err(e) = self.rollback_procedure().await {
self.rolling_back = true;
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(error));
ExecResult::Failed
}
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
// if rolling_back, there is no need to execute again.
if self.rolling_back {
// We can definitely get the previous error here.
let state = self.meta.state();
let err = state.error().unwrap();
return self.rollback(err.clone()).await;
}
match self.procedure.execute(ctx).await {
Ok(status) => {
logging::debug!(
@@ -186,8 +226,11 @@ impl Runner {
status.need_persist(),
);
if status.need_persist() && self.persist_procedure().await.is_err() {
return ExecResult::RetryLater;
if status.need_persist() {
if let Err(err) = self.persist_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
return ExecResult::RetryLater;
}
}
match status {
@@ -196,7 +239,8 @@ impl Runner {
self.on_suspended(subprocedures).await;
}
Status::Done => {
if self.commit_procedure().await.is_err() {
if let Err(e) = self.commit_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
@@ -217,17 +261,12 @@ impl Runner {
);
if e.is_retry_later() {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
// Write rollback key so we can skip this procedure while recovering procedures.
if self.rollback_procedure().await.is_err() {
return ExecResult::RetryLater;
}
ExecResult::Failed
self.rollback(Arc::new(e)).await
}
}
}
@@ -261,7 +300,9 @@ impl Runner {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: self.exponential_builder.clone(),
store: self.store.clone(),
rolling_back: false,
};
// Insert the procedure. We already check the procedure existence before inserting
@@ -285,8 +326,16 @@ impl Runner {
});
}
async fn wait_on_err(&self) {
time::sleep(ERR_WAIT_DURATION).await;
/// Extend the retry time to wait for the next retry.
async fn wait_on_err(&self, d: Duration, i: u64) {
logging::info!(
"Procedure {}-{} retry for the {} times after {} millis",
self.procedure.type_name(),
self.meta.id,
i,
d.as_millis(),
);
time::sleep(d).await;
}
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
@@ -416,7 +465,9 @@ mod tests {
procedure,
manager_ctx: Arc::new(ManagerContext::new()),
step: 0,
exponential_builder: ExponentialBuilder::default(),
store,
rolling_back: false,
}
}
@@ -744,7 +795,7 @@ mod tests {
let res = runner.execute_once(&ctx).await;
assert!(res.is_retry_later(), "{res:?}");
assert!(meta.state().is_running());
assert!(meta.state().is_retrying());
let res = runner.execute_once(&ctx).await;
assert!(res.is_done(), "{res:?}");
@@ -752,6 +803,36 @@ mod tests {
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
}
#[tokio::test]
async fn test_execute_exceed_max_retry_later() {
let exec_fn =
|_| async { Err(Error::retry_later(MockError::new(StatusCode::Unexpected))) }.boxed();
let exceed_max_retry_later = ProcedureAdapter {
data: "exceed_max_retry_later".to_string(),
lock_key: LockKey::single("catalog.schema.table"),
exec_fn,
};
let dir = create_temp_dir("exceed_max_retry_later");
let meta = exceed_max_retry_later.new_meta(ROOT_ID);
let object_store = test_util::new_object_store(&dir);
let procedure_store = ProcedureStore::from(object_store.clone());
let mut runner = new_runner(
meta.clone(),
Box::new(exceed_max_retry_later),
procedure_store,
);
runner.exponential_builder = ExponentialBuilder::default()
.with_min_delay(Duration::from_millis(1))
.with_max_times(3);
// Run the runner and execute the procedure.
runner.execute_procedure_in_loop().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("Procedure retry exceeded max times"));
}
#[tokio::test]
async fn test_child_error() {
let mut times = 0;
@@ -819,7 +900,7 @@ mod tests {
// Replace the manager ctx.
runner.manager_ctx = manager_ctx;
// Run the runer and execute the procedure.
// Run the runner and execute the procedure.
runner.run().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("subprocedure failed"), "{err}");

View File

@@ -206,6 +206,8 @@ pub enum ProcedureState {
Running,
/// The procedure is finished.
Done,
/// The procedure is failed and can be retried.
Retrying { error: Arc<Error> },
/// The procedure is failed and cannot proceed anymore.
Failed { error: Arc<Error> },
}
@@ -216,6 +218,11 @@ impl ProcedureState {
ProcedureState::Failed { error }
}
/// Returns a [ProcedureState] with retrying state.
pub fn retrying(error: Arc<Error>) -> ProcedureState {
ProcedureState::Retrying { error }
}
/// Returns true if the procedure state is running.
pub fn is_running(&self) -> bool {
matches!(self, ProcedureState::Running)
@@ -231,10 +238,16 @@ impl ProcedureState {
matches!(self, ProcedureState::Failed { .. })
}
/// Returns true if the procedure state is retrying.
pub fn is_retrying(&self) -> bool {
matches!(self, ProcedureState::Retrying { .. })
}
/// Returns the error.
pub fn error(&self) -> Option<&Arc<Error>> {
match self {
ProcedureState::Failed { error } => Some(error),
ProcedureState::Retrying { error } => Some(error),
_ => None,
}
}

View File

@@ -33,6 +33,9 @@ pub async fn wait(watcher: &mut Watcher) -> Result<()> {
ProcedureState::Failed { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
}
ProcedureState::Retrying { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
}
}
}
}

View File

@@ -17,6 +17,7 @@ use std::any::Any;
use common_error::ext::BoxedError;
use common_error::prelude::*;
use datatypes::prelude::ConcreteDataType;
pub type Result<T> = std::result::Result<T, Error>;
@@ -70,6 +71,26 @@ pub enum Error {
source: datafusion_common::DataFusionError,
backtrace: Backtrace,
},
#[snafu(display("Column {} not exists in table {}", column_name, table_name))]
ColumnNotExists {
column_name: String,
table_name: String,
backtrace: Backtrace,
},
#[snafu(display(
"Failed to cast vector of type '{:?}' to type '{:?}', source: {}",
from_type,
to_type,
source
))]
CastVector {
from_type: ConcreteDataType,
to_type: ConcreteDataType,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
}
impl ErrorExt for Error {
@@ -81,11 +102,14 @@ impl ErrorExt for Error {
| Error::CreateRecordBatches { .. }
| Error::PollStream { .. }
| Error::Format { .. }
| Error::InitRecordbatchStream { .. } => StatusCode::Internal,
| Error::InitRecordbatchStream { .. }
| Error::ColumnNotExists { .. } => StatusCode::Internal,
Error::External { source } => source.status_code(),
Error::SchemaConversion { source, .. } => source.status_code(),
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
source.status_code()
}
}
}

View File

@@ -12,14 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use datatypes::schema::SchemaRef;
use datatypes::value::Value;
use datatypes::vectors::{Helper, VectorRef};
use serde::ser::{Error, SerializeStruct};
use serde::{Serialize, Serializer};
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use crate::error::{self, Result};
use crate::error::{self, CastVectorSnafu, ColumnNotExistsSnafu, Result};
use crate::DfRecordBatch;
/// A two-dimensional batch of column-oriented data with a defined schema.
@@ -108,6 +110,41 @@ impl RecordBatch {
pub fn rows(&self) -> RecordBatchRowIterator<'_> {
RecordBatchRowIterator::new(self)
}
pub fn column_vectors(
&self,
table_name: &str,
table_schema: SchemaRef,
) -> Result<HashMap<String, VectorRef>> {
let mut vectors = HashMap::with_capacity(self.num_columns());
// column schemas in recordbatch must match its vectors, otherwise it's corrupted
for (vector_schema, vector) in self.schema.column_schemas().iter().zip(self.columns.iter())
{
let column_name = &vector_schema.name;
let column_schema =
table_schema
.column_schema_by_name(column_name)
.context(ColumnNotExistsSnafu {
table_name,
column_name,
})?;
let vector = if vector_schema.data_type != column_schema.data_type {
vector
.cast(&column_schema.data_type)
.with_context(|_| CastVectorSnafu {
from_type: vector.data_type(),
to_type: column_schema.data_type.clone(),
})?
} else {
vector.clone()
};
vectors.insert(column_name.clone(), vector);
}
Ok(vectors)
}
}
impl Serialize for RecordBatch {

View File

@@ -12,4 +12,4 @@ serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
[dev-dependencies]
rand = "0.8"
rand.workspace = true

View File

@@ -1,4 +1,3 @@
#![feature(int_roundings)]
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -26,6 +26,7 @@ use snafu::{OptionExt, ResultExt};
use crate::error;
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
use crate::util::div_ceil;
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct Timestamp {
@@ -143,7 +144,7 @@ impl Timestamp {
Some(Timestamp::new(value, unit))
} else {
let mul = unit.factor() / self.unit().factor();
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
Some(Timestamp::new(div_ceil(self.value, mul as i64), unit))
}
}

View File

@@ -17,6 +17,17 @@ pub fn current_time_millis() -> i64 {
chrono::Utc::now().timestamp_millis()
}
/// Port of rust unstable features `int_roundings`.
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
let d = this / rhs;
let r = this % rhs;
if r > 0 && rhs > 0 {
d + 1
} else {
d
}
}
#[cfg(test)]
mod tests {
use std::time::{self, SystemTime};
@@ -42,4 +53,10 @@ mod tests {
assert_eq!(datetime_std.hour(), datetime_now.hour());
assert_eq!(datetime_std.minute(), datetime_now.minute());
}
#[test]
fn test_div_ceil() {
let v0 = 9223372036854676001;
assert_eq!(9223372036854677, div_ceil(v0, 1000));
}
}

View File

@@ -20,6 +20,8 @@ catalog = { path = "../catalog" }
common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-datasource = { path = "../common/datasource" }
common-function = { path = "../common/function" }
common-grpc = { path = "../common/grpc" }
common-grpc-expr = { path = "../common/grpc-expr" }
common-procedure = { path = "../common/procedure" }
@@ -64,6 +66,7 @@ tonic.workspace = true
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
url = "2.3.1"
uuid.workspace = true
[dev-dependencies]
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }

View File

@@ -151,11 +151,22 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
pub struct ProcedureConfig {
/// Storage config for procedure manager.
pub store: ObjectStoreConfig,
/// Max retry times of procedure.
pub max_retry_times: usize,
/// Initial retry delay of procedures, increases exponentially.
#[serde(with = "humantime_serde")]
pub retry_delay: Duration,
}
impl Default for ProcedureConfig {
fn default() -> ProcedureConfig {
ProcedureConfig::from_file_path("/tmp/greptimedb/procedure/".to_string())
ProcedureConfig {
store: ObjectStoreConfig::File(FileConfig {
data_dir: "/tmp/greptimedb/procedure/".to_string(),
}),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
}
}
}
@@ -163,6 +174,7 @@ impl ProcedureConfig {
pub fn from_file_path(path: String) -> ProcedureConfig {
ProcedureConfig {
store: ObjectStoreConfig::File(FileConfig { data_dir: path }),
..Default::default()
}
}
}
@@ -243,7 +255,7 @@ impl Datanode {
self.instance.clone()
}
async fn shutdown_instance(&self) -> Result<()> {
pub async fn shutdown_instance(&self) -> Result<()> {
self.instance.shutdown().await
}

View File

@@ -14,11 +14,10 @@
use std::any::Any;
use common_datasource::error::Error as DataSourceError;
use common_error::prelude::*;
use common_procedure::ProcedureId;
use common_recordbatch::error::Error as RecordBatchError;
use datafusion::parquet;
use datatypes::prelude::ConcreteDataType;
use storage::error::Error as StorageError;
use table::error::Error as TableError;
use url::ParseError;
@@ -124,24 +123,6 @@ pub enum Error {
))]
ColumnValuesNumberMismatch { columns: usize, values: usize },
#[snafu(display(
"Column type mismatch, column: {}, expected type: {:?}, actual: {:?}",
column,
expected,
actual,
))]
ColumnTypeMismatch {
column: String,
expected: ConcreteDataType,
actual: ConcreteDataType,
},
#[snafu(display("Failed to collect record batch, source: {}", source))]
CollectRecords {
#[snafu(backtrace)]
source: RecordBatchError,
},
#[snafu(display("Failed to parse sql value, source: {}", source))]
ParseSqlValue {
#[snafu(backtrace)]
@@ -218,7 +199,13 @@ pub enum Error {
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
#[snafu(backtrace)]
source: DataSourceError,
},
#[snafu(display("Failed to parse url, source: {}", source))]
ParseUrl {
source: DataSourceError,
backtrace: Backtrace,
},
@@ -249,6 +236,12 @@ pub enum Error {
source: regex::Error,
},
#[snafu(display("Failed to list objects, source: {}", source))]
ListObjects {
#[snafu(backtrace)]
source: DataSourceError,
},
#[snafu(display("Failed to parse the data, source: {}", source))]
ParseDataTypes {
#[snafu(backtrace)]
@@ -475,13 +468,6 @@ pub enum Error {
source: object_store::Error,
},
#[snafu(display("Failed to lists object in path: {}, source: {}", path, source))]
ListObjects {
path: String,
backtrace: Backtrace,
source: object_store::Error,
},
#[snafu(display("Unrecognized table option: {}", source))]
UnrecognizedTableOption {
#[snafu(backtrace)]
@@ -550,8 +536,6 @@ impl ErrorExt for Error {
Insert { source, .. } => source.status_code(),
Delete { source, .. } => source.status_code(),
CollectRecords { source, .. } => source.status_code(),
TableNotFound { .. } => StatusCode::TableNotFound,
ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
@@ -564,7 +548,6 @@ impl ErrorExt for Error {
ConvertSchema { source, .. } | VectorComputation { source } => source.status_code(),
ColumnValuesNumberMismatch { .. }
| ColumnTypeMismatch { .. }
| InvalidSql { .. }
| InvalidUrl { .. }
| InvalidPath { .. }
@@ -584,7 +567,8 @@ impl ErrorExt for Error {
| DatabaseNotFound { .. }
| MissingNodeId { .. }
| MissingMetasrvOpts { .. }
| ColumnNoneDefaultValue { .. } => StatusCode::InvalidArguments,
| ColumnNoneDefaultValue { .. }
| ParseUrl { .. } => StatusCode::InvalidArguments,
// TODO(yingwen): Further categorize http error.
StartServer { .. }

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
use catalog::{region_number, CatalogManagerRef};
use catalog::{datanode_stat, CatalogManagerRef};
use common_telemetry::{error, info, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt;
@@ -106,13 +106,7 @@ impl HeartbeatTask {
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
common_runtime::spawn_bg(async move {
while running.load(Ordering::Acquire) {
let region_num = match region_number(&catalog_manager_clone).await {
Ok(region_num) => region_num as i64,
Err(e) => {
error!("failed to get region number, err: {e:?}");
-1
}
};
let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
let req = HeartbeatRequest {
peer: Some(Peer {
@@ -120,9 +114,10 @@ impl HeartbeatTask {
addr: addr.clone(),
}),
node_stat: Some(NodeStat {
region_num,
region_num: region_num as _,
..Default::default()
}),
region_stats,
..Default::default()
};

View File

@@ -204,7 +204,6 @@ impl Instance {
sql_handler: SqlHandler::new(
table_engine.clone(),
catalog_manager.clone(),
query_engine.clone(),
table_engine,
procedure_manager,
),
@@ -263,6 +262,7 @@ impl Instance {
schema_name,
table_name: None,
region_number: None,
wait: Some(true),
})
})
.collect::<Vec<_>>();
@@ -274,7 +274,7 @@ impl Instance {
.await
.map_err(BoxedError::new)
.context(ShutdownInstanceSnafu);
info!("flush success: {}", flush_result.is_ok());
info!("Flushed all tables result: {}", flush_result.is_ok());
flush_result?;
Ok(())
@@ -500,7 +500,11 @@ pub(crate) async fn create_procedure_manager(
);
let object_store = new_object_store(&procedure_config.store).await?;
let manager_config = ManagerConfig { object_store };
let manager_config = ManagerConfig {
object_store,
max_retry_times: procedure_config.max_retry_times,
retry_delay: procedure_config.retry_delay,
};
Ok(Some(Arc::new(LocalManager::new(manager_config))))
}

View File

@@ -21,7 +21,7 @@ use common_query::Output;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::plan::LogicalPlan;
use servers::query_handler::grpc::GrpcQueryHandler;
use session::context::QueryContextRef;
use session::context::{QueryContext, QueryContextRef};
use snafu::prelude::*;
use sql::statements::statement::Statement;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
@@ -53,7 +53,7 @@ impl Instance {
.context(DecodeLogicalPlanSnafu)?;
self.query_engine
.execute(&LogicalPlan::DfPlan(logical_plan))
.execute(LogicalPlan::DfPlan(logical_plan), QueryContext::arc())
.await
.context(ExecuteLogicalPlanSnafu)
}
@@ -69,11 +69,11 @@ impl Instance {
let plan = self
.query_engine
.planner()
.plan(stmt, ctx)
.plan(stmt, ctx.clone())
.await
.context(PlanStatementSnafu)?;
self.query_engine
.execute(&plan)
.execute(plan, ctx)
.await
.context(ExecuteLogicalPlanSnafu)
}
@@ -175,7 +175,7 @@ mod test {
.plan(stmt, QueryContext::arc())
.await
.unwrap();
engine.execute(&plan).await.unwrap()
engine.execute(plan, QueryContext::arc()).await.unwrap()
}
#[tokio::test(flavor = "multi_thread")]

View File

@@ -19,7 +19,6 @@ use common_error::prelude::BoxedError;
use common_query::Output;
use common_telemetry::logging::info;
use common_telemetry::timer;
use futures::StreamExt;
use query::error::QueryExecutionSnafu;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::query_engine::StatementHandler;
@@ -28,12 +27,10 @@ use servers::prom::PromHandler;
use session::context::{QueryContext, QueryContextRef};
use snafu::prelude::*;
use sql::ast::ObjectName;
use sql::statements::copy::CopyTable;
use sql::statements::copy::{CopyTable, CopyTableArgument};
use sql::statements::statement::Statement;
use table::engine::TableReference;
use table::requests::{
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
};
use table::requests::{CopyDirection, CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
use crate::error::{
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
@@ -41,8 +38,7 @@ use crate::error::{
};
use crate::instance::Instance;
use crate::metric;
use crate::sql::insert::InsertRequests;
use crate::sql::SqlRequest;
use crate::sql::{SqlHandler, SqlRequest};
impl Instance {
pub async fn execute_stmt(
@@ -52,37 +48,10 @@ impl Instance {
) -> Result<Output> {
match stmt {
QueryStatement::Sql(Statement::Insert(insert)) => {
let requests = self
.sql_handler
.insert_to_requests(self.catalog_manager.clone(), *insert, query_ctx.clone())
.await?;
match requests {
InsertRequests::Request(request) => {
self.sql_handler.execute(request, query_ctx.clone()).await
}
InsertRequests::Stream(mut s) => {
let mut rows = 0;
while let Some(request) = s.next().await {
match self
.sql_handler
.execute(request?, query_ctx.clone())
.await?
{
Output::AffectedRows(n) => {
rows += n;
}
_ => unreachable!(),
}
}
Ok(Output::AffectedRows(rows))
}
}
}
QueryStatement::Sql(Statement::Delete(delete)) => {
let request = SqlRequest::Delete(*delete);
self.sql_handler.execute(request, query_ctx).await
let request =
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
.await?;
self.sql_handler.insert(request).await
}
QueryStatement::Sql(Statement::CreateDatabase(create_database)) => {
let request = CreateDatabaseRequest {
@@ -160,43 +129,59 @@ impl Instance {
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
}
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
CopyTable::To(copy_table) => {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(copy_table.table_name(), query_ctx.clone())?;
let file_name = copy_table.file_name().to_string();
QueryStatement::Sql(Statement::Copy(copy_table)) => {
let req = match copy_table {
CopyTable::To(copy_table) => {
let CopyTableArgument {
location,
connection,
pattern,
table_name,
..
} = copy_table;
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&table_name, query_ctx.clone())?;
CopyTableRequest {
catalog_name,
schema_name,
table_name,
location,
connection,
pattern,
direction: CopyDirection::Export,
}
}
CopyTable::From(copy_table) => {
let CopyTableArgument {
location,
connection,
pattern,
table_name,
..
} = copy_table;
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&table_name, query_ctx.clone())?;
CopyTableRequest {
catalog_name,
schema_name,
table_name,
location,
connection,
pattern,
direction: CopyDirection::Import,
}
}
};
let req = CopyTableRequest {
catalog_name,
schema_name,
table_name,
file_name,
};
self.sql_handler
.execute(SqlRequest::CopyTable(req), query_ctx)
.await
}
CopyTable::From(copy_table) => {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&copy_table.table_name, query_ctx.clone())?;
let req = CopyTableFromRequest {
catalog_name,
schema_name,
table_name,
connection: copy_table.connection,
pattern: copy_table.pattern,
from: copy_table.from,
};
self.sql_handler
.execute(SqlRequest::CopyTableFrom(req), query_ctx)
.await
}
},
self.sql_handler
.execute(SqlRequest::CopyTable(req), query_ctx)
.await
}
QueryStatement::Sql(Statement::Query(_))
| QueryStatement::Sql(Statement::Explain(_))
| QueryStatement::Sql(Statement::Use(_))
| QueryStatement::Sql(Statement::Tql(_))
| QueryStatement::Sql(Statement::Delete(_))
| QueryStatement::Promql(_) => unreachable!(),
}
}
@@ -213,10 +198,13 @@ impl Instance {
let engine = self.query_engine();
let plan = engine
.planner()
.plan(stmt, query_ctx)
.plan(stmt, query_ctx.clone())
.await
.context(PlanStatementSnafu)?;
engine.execute(&plan).await.context(ExecuteStatementSnafu)
engine
.execute(plan, query_ctx)
.await
.context(ExecuteStatementSnafu)
}
// TODO(ruihang): merge this and `execute_promql` after #951 landed
@@ -249,10 +237,13 @@ impl Instance {
let engine = self.query_engine();
let plan = engine
.planner()
.plan(stmt, query_ctx)
.plan(stmt, query_ctx.clone())
.await
.context(PlanStatementSnafu)?;
engine.execute(&plan).await.context(ExecuteStatementSnafu)
engine
.execute(plan, query_ctx)
.await
.context(ExecuteStatementSnafu)
}
}

View File

@@ -95,6 +95,7 @@ impl Instance {
schema_name: expr.schema_name,
table_name,
region_number: expr.region_id,
wait: None,
};
self.sql_handler()
.execute(SqlRequest::FlushTable(req), QueryContext::arc())

View File

@@ -17,11 +17,9 @@ use common_error::prelude::BoxedError;
use common_procedure::ProcedureManagerRef;
use common_query::Output;
use common_telemetry::error;
use query::query_engine::QueryEngineRef;
use query::sql::{describe_table, show_databases, show_tables};
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use sql::statements::delete::Delete;
use sql::statements::describe::DescribeTable;
use sql::statements::show::{ShowDatabases, ShowTables};
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
@@ -34,17 +32,15 @@ use crate::error::{
use crate::instance::sql::table_idents_to_full_name;
mod alter;
mod copy_table;
mod copy_table_from;
mod copy_table_to;
mod create;
mod delete;
mod drop_table;
mod flush_table;
pub(crate) mod insert;
#[derive(Debug)]
pub enum SqlRequest {
Insert(InsertRequest),
CreateTable(CreateTableRequest),
CreateDatabase(CreateDatabaseRequest),
Alter(AlterTableRequest),
@@ -53,16 +49,14 @@ pub enum SqlRequest {
ShowDatabases(ShowDatabases),
ShowTables(ShowTables),
DescribeTable(DescribeTable),
Delete(Delete),
CopyTable(CopyTableRequest),
CopyTableFrom(CopyTableFromRequest),
}
// Handler to execute SQL except query
#[derive(Clone)]
pub struct SqlHandler {
table_engine: TableEngineRef,
catalog_manager: CatalogManagerRef,
query_engine: QueryEngineRef,
engine_procedure: TableEngineProcedureRef,
procedure_manager: Option<ProcedureManagerRef>,
}
@@ -71,14 +65,12 @@ impl SqlHandler {
pub fn new(
table_engine: TableEngineRef,
catalog_manager: CatalogManagerRef,
query_engine: QueryEngineRef,
engine_procedure: TableEngineProcedureRef,
procedure_manager: Option<ProcedureManagerRef>,
) -> Self {
Self {
table_engine,
catalog_manager,
query_engine,
engine_procedure,
procedure_manager,
}
@@ -90,14 +82,14 @@ impl SqlHandler {
// there, instead of executing here in a "static" fashion.
pub async fn execute(&self, request: SqlRequest, query_ctx: QueryContextRef) -> Result<Output> {
let result = match request {
SqlRequest::Insert(req) => self.insert(req).await,
SqlRequest::CreateTable(req) => self.create_table(req).await,
SqlRequest::CreateDatabase(req) => self.create_database(req, query_ctx.clone()).await,
SqlRequest::Alter(req) => self.alter(req).await,
SqlRequest::DropTable(req) => self.drop_table(req).await,
SqlRequest::Delete(req) => self.delete(query_ctx.clone(), req).await,
SqlRequest::CopyTable(req) => self.copy_table(req).await,
SqlRequest::CopyTableFrom(req) => self.copy_table_from(req).await,
SqlRequest::CopyTable(req) => match req.direction {
CopyDirection::Export => self.copy_table_to(req).await,
CopyDirection::Import => self.copy_table_from(req).await,
},
SqlRequest::ShowDatabases(req) => {
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
}
@@ -149,239 +141,3 @@ impl SqlHandler {
.context(CloseTableEngineSnafu)
}
}
#[cfg(test)]
mod tests {
use std::any::Any;
use std::sync::Arc;
use catalog::{CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::logical_plan::Expr;
use common_query::physical_plan::PhysicalPlanRef;
use common_test_util::temp_dir::create_temp_dir;
use common_time::timestamp::Timestamp;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use datatypes::value::Value;
use futures::StreamExt;
use log_store::NoopLogStore;
use mito::config::EngineConfig as TableEngineConfig;
use mito::engine::MitoEngine;
use object_store::services::Fs as Builder;
use object_store::{ObjectStore, ObjectStoreBuilder};
use query::parser::{QueryLanguageParser, QueryStatement};
use query::QueryEngineFactory;
use session::context::QueryContext;
use sql::statements::statement::Statement;
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::error::Result as TableResult;
use table::metadata::TableInfoRef;
use table::Table;
use super::*;
use crate::error::Error;
use crate::sql::insert::InsertRequests;
struct DemoTable;
#[async_trait::async_trait]
impl Table for DemoTable {
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
let column_schemas = vec![
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true),
ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_millisecond_datatype(),
true,
)
.with_time_index(true),
];
Arc::new(
SchemaBuilder::try_from(column_schemas)
.unwrap()
.build()
.unwrap(),
)
}
fn table_info(&self) -> TableInfoRef {
unimplemented!()
}
async fn scan(
&self,
_projection: Option<&Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
) -> TableResult<PhysicalPlanRef> {
unimplemented!();
}
}
#[tokio::test]
async fn test_statement_to_request() {
let dir = create_temp_dir("setup_test_engine_and_table");
let store_dir = dir.path().to_string_lossy();
let accessor = Builder::default().root(&store_dir).build().unwrap();
let object_store = ObjectStore::new(accessor).finish();
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let sql = r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
('host2', 88.8, 333.3, 1655276558000)
"#;
let table_engine = Arc::new(MitoEngine::<EngineImpl<NoopLogStore>>::new(
TableEngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
compaction_scheduler,
),
object_store,
));
let catalog_list = Arc::new(
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
.await
.unwrap(),
);
catalog_list.start().await.unwrap();
assert!(catalog_list
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "demo".to_string(),
table_id: 1,
table: Arc::new(DemoTable),
})
.await
.unwrap());
let factory = QueryEngineFactory::new(catalog_list.clone());
let query_engine = factory.query_engine();
let sql_handler = SqlHandler::new(
table_engine.clone(),
catalog_list.clone(),
query_engine.clone(),
table_engine,
None,
);
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
QueryStatement::Sql(Statement::Insert(i)) => i,
_ => {
unreachable!()
}
};
let request = sql_handler
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
.await
.unwrap();
match request {
InsertRequests::Request(SqlRequest::Insert(req)) => {
assert_eq!(req.table_name, "demo");
let columns_values = req.columns_values;
assert_eq!(4, columns_values.len());
let hosts = &columns_values["host"];
assert_eq!(2, hosts.len());
assert_eq!(Value::from("host1"), hosts.get(0));
assert_eq!(Value::from("host2"), hosts.get(1));
let cpus = &columns_values["cpu"];
assert_eq!(2, cpus.len());
assert_eq!(Value::from(66.6f64), cpus.get(0));
assert_eq!(Value::from(88.8f64), cpus.get(1));
let memories = &columns_values["memory"];
assert_eq!(2, memories.len());
assert_eq!(Value::from(1024f64), memories.get(0));
assert_eq!(Value::from(333.3f64), memories.get(1));
let ts = &columns_values["ts"];
assert_eq!(2, ts.len());
assert_eq!(
Value::from(Timestamp::new_millisecond(1655276557000i64)),
ts.get(0)
);
assert_eq!(
Value::from(Timestamp::new_millisecond(1655276558000i64)),
ts.get(1)
);
}
_ => {
panic!("Not supposed to reach here")
}
}
// test inert into select
// type mismatch
let sql = "insert into demo(ts) select number from numbers limit 3";
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
QueryStatement::Sql(Statement::Insert(i)) => i,
_ => {
unreachable!()
}
};
let request = sql_handler
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
.await
.unwrap();
match request {
InsertRequests::Stream(mut stream) => {
assert!(matches!(
stream.next().await.unwrap().unwrap_err(),
Error::ColumnTypeMismatch { .. }
));
}
_ => unreachable!(),
}
let sql = "insert into demo(cpu) select cast(number as double) from numbers limit 3";
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
QueryStatement::Sql(Statement::Insert(i)) => i,
_ => {
unreachable!()
}
};
let request = sql_handler
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
.await
.unwrap();
match request {
InsertRequests::Stream(mut stream) => {
let mut times = 0;
while let Some(Ok(SqlRequest::Insert(req))) = stream.next().await {
times += 1;
assert_eq!(req.table_name, "demo");
let columns_values = req.columns_values;
assert_eq!(1, columns_values.len());
let memories = &columns_values["cpu"];
assert_eq!(3, memories.len());
assert_eq!(Value::from(0.0f64), memories.get(0));
assert_eq!(Value::from(1.0f64), memories.get(1));
assert_eq!(Value::from(2.0f64), memories.get(2));
}
assert_eq!(1, times);
}
_ => unreachable!(),
}
}
}

View File

@@ -15,35 +15,26 @@
use std::collections::HashMap;
use async_compat::CompatExt;
use common_datasource::lister::{Lister, Source};
use common_datasource::object_store::{build_backend, parse_url};
use common_datasource::util::find_dir_and_filename;
use common_query::Output;
use common_recordbatch::error::DataTypesSnafu;
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
use datatypes::arrow::record_batch::RecordBatch;
use datatypes::vectors::{Helper, VectorRef};
use futures::future;
use futures_util::TryStreamExt;
use object_store::services::{Fs, S3};
use object_store::{Object, ObjectStore, ObjectStoreBuilder};
use regex::Regex;
use snafu::{ensure, ResultExt};
use table::engine::TableReference;
use table::requests::{CopyTableFromRequest, InsertRequest};
use table::requests::{CopyTableRequest, InsertRequest};
use tokio::io::BufReader;
use url::{ParseError, Url};
use crate::error::{self, Result};
use crate::sql::SqlHandler;
const S3_SCHEMA: &str = "S3";
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
const SESSION_TOKEN: &str = "SESSION_TOKEN";
const REGION: &str = "REGION";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
impl SqlHandler {
pub(crate) async fn copy_table_from(&self, req: CopyTableFromRequest) -> Result<Output> {
pub(crate) async fn copy_table_from(&self, req: CopyTableRequest) -> Result<Output> {
let table_ref = TableReference {
catalog: &req.catalog_name,
schema: &req.schema_name,
@@ -51,9 +42,29 @@ impl SqlHandler {
};
let table = self.get_table(&table_ref)?;
let datasource = DataSource::new(&req.from, req.pattern, req.connection)?;
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
let objects = datasource.list().await?;
let object_store =
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
let (dir, filename) = find_dir_and_filename(&path);
let regex = req
.pattern
.as_ref()
.map(|x| Regex::new(x))
.transpose()
.context(error::BuildRegexSnafu)?;
let source = if let Some(filename) = filename {
Source::Filename(filename)
} else {
Source::Dir
};
let lister = Lister::new(object_store, source, dir, regex);
let objects = lister.list().await.context(error::ListObjectsSnafu)?;
let mut buf: Vec<RecordBatch> = Vec::new();
@@ -131,315 +142,3 @@ impl SqlHandler {
Ok(Output::AffectedRows(result.iter().sum()))
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum Source {
Filename(String),
Dir,
}
struct DataSource {
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
}
impl DataSource {
fn from_path(url: &str, regex: Option<Regex>) -> Result<DataSource> {
let result = if url.ends_with('/') {
Url::from_directory_path(url)
} else {
Url::from_file_path(url)
};
match result {
Ok(url) => {
let path = url.path();
let (path, filename) = DataSource::find_dir_and_filename(path);
let source = if let Some(filename) = filename {
Source::Filename(filename)
} else {
Source::Dir
};
let accessor = Fs::default()
.root(&path)
.build()
.context(error::BuildBackendSnafu)?;
Ok(DataSource {
object_store: ObjectStore::new(accessor).finish(),
source,
path,
regex,
})
}
Err(()) => error::InvalidPathSnafu {
path: url.to_string(),
}
.fail(),
}
}
fn build_s3_backend(
host: Option<&str>,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
if let Some(bucket) = host {
builder.bucket(bucket);
}
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
let accessor = builder.build().context(error::BuildBackendSnafu)?;
Ok(ObjectStore::new(accessor).finish())
}
fn from_url(
url: Url,
regex: Option<Regex>,
connection: HashMap<String, String>,
) -> Result<DataSource> {
let host = url.host_str();
let path = url.path();
let schema = url.scheme();
let (dir, filename) = DataSource::find_dir_and_filename(path);
let source = if let Some(filename) = filename {
Source::Filename(filename)
} else {
Source::Dir
};
let object_store = match schema.to_uppercase().as_str() {
S3_SCHEMA => DataSource::build_s3_backend(host, &dir, connection)?,
_ => {
return error::UnsupportedBackendProtocolSnafu {
protocol: schema.to_string(),
}
.fail()
}
};
Ok(DataSource {
object_store,
source,
path: dir,
regex,
})
}
pub fn new(
url: &str,
pattern: Option<String>,
connection: HashMap<String, String>,
) -> Result<DataSource> {
let regex = if let Some(pattern) = pattern {
let regex = Regex::new(&pattern).context(error::BuildRegexSnafu)?;
Some(regex)
} else {
None
};
let result = Url::parse(url);
match result {
Ok(url) => DataSource::from_url(url, regex, connection),
Err(err) => {
if ParseError::RelativeUrlWithoutBase == err {
DataSource::from_path(url, regex)
} else {
Err(error::Error::InvalidUrl {
url: url.to_string(),
source: err,
})
}
}
}
}
pub async fn list(&self) -> Result<Vec<Object>> {
match &self.source {
Source::Dir => {
let streamer = self
.object_store
.object("/")
.list()
.await
.context(error::ListObjectsSnafu { path: &self.path })?;
streamer
.try_filter(|f| {
let res = if let Some(regex) = &self.regex {
regex.is_match(f.name())
} else {
true
};
future::ready(res)
})
.try_collect::<Vec<_>>()
.await
.context(error::ListObjectsSnafu { path: &self.path })
}
Source::Filename(filename) => {
let obj = self.object_store.object(filename);
Ok(vec![obj])
}
}
}
fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
if path.is_empty() {
("/".to_string(), None)
} else if path.ends_with('/') {
(path.to_string(), None)
} else if let Some(idx) = path.rfind('/') {
(
path[..idx + 1].to_string(),
Some(path[idx + 1..].to_string()),
)
} else {
("/".to_string(), Some(path.to_string()))
}
}
}
#[cfg(test)]
mod tests {
use url::Url;
use super::*;
#[test]
fn test_parse_uri() {
struct Test<'a> {
uri: &'a str,
expected_path: &'a str,
expected_schema: &'a str,
}
let tests = [
Test {
uri: "s3://bucket/to/path/",
expected_path: "/to/path/",
expected_schema: "s3",
},
Test {
uri: "fs:///to/path/",
expected_path: "/to/path/",
expected_schema: "fs",
},
Test {
uri: "fs:///to/path/file",
expected_path: "/to/path/file",
expected_schema: "fs",
},
];
for test in tests {
let parsed_uri = Url::parse(test.uri).unwrap();
assert_eq!(parsed_uri.path(), test.expected_path);
assert_eq!(parsed_uri.scheme(), test.expected_schema);
}
}
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("/to/path/file").unwrap();
assert_eq!(parsed.path(), "/to/path/file");
let parsed = Url::from_directory_path("/to/path/").unwrap();
assert_eq!(parsed.path(), "/to/path/");
}
#[test]
fn test_find_dir_and_filename() {
struct Test<'a> {
path: &'a str,
expected_dir: &'a str,
expected_filename: Option<String>,
}
let tests = [
Test {
path: "to/path/",
expected_dir: "to/path/",
expected_filename: None,
},
Test {
path: "to/path/filename",
expected_dir: "to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/to/path/filename",
expected_dir: "/to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/",
expected_dir: "/",
expected_filename: None,
},
Test {
path: "filename",
expected_dir: "/",
expected_filename: Some("filename".into()),
},
Test {
path: "",
expected_dir: "/",
expected_filename: None,
},
];
for test in tests {
let (path, filename) = DataSource::find_dir_and_filename(test.path);
assert_eq!(test.expected_dir, path);
assert_eq!(test.expected_filename, filename)
}
}
}

View File

@@ -14,6 +14,8 @@
use std::pin::Pin;
use common_datasource;
use common_datasource::object_store::{build_backend, parse_url};
use common_query::physical_plan::SessionContext;
use common_query::Output;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
@@ -22,8 +24,7 @@ use datafusion::parquet::basic::{Compression, Encoding};
use datafusion::parquet::file::properties::WriterProperties;
use datafusion::physical_plan::RecordBatchStream;
use futures::TryStreamExt;
use object_store::services::Fs as Builder;
use object_store::{ObjectStore, ObjectStoreBuilder};
use object_store::ObjectStore;
use snafu::ResultExt;
use table::engine::TableReference;
use table::requests::CopyTableRequest;
@@ -32,7 +33,7 @@ use crate::error::{self, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
pub(crate) async fn copy_table_to(&self, req: CopyTableRequest) -> Result<Output> {
let table_ref = TableReference {
catalog: &req.catalog_name,
schema: &req.schema_name,
@@ -52,13 +53,11 @@ impl SqlHandler {
.context(error::TableScanExecSnafu)?;
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
let accessor = Builder::default()
.root("/")
.build()
.context(error::BuildBackendSnafu)?;
let object_store = ObjectStore::new(accessor).finish();
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
let object_store =
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
let mut parquet_writer = ParquetWriter::new(path.to_string(), stream, object_store);
// TODO(jiachun):
// For now, COPY is implemented synchronously.
// When copying large table, it will be blocked for a long time.

View File

@@ -1,142 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_query::Output;
use datatypes::data_type::DataType;
use datatypes::prelude::VectorRef;
use datatypes::vectors::StringVector;
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use sql::ast::{BinaryOperator, Expr, Value};
use sql::statements::delete::Delete;
use sql::statements::sql_value_to_value;
use table::engine::TableReference;
use table::requests::DeleteRequest;
use table::TableRef;
use crate::error::{ColumnNotFoundSnafu, DeleteSnafu, InvalidSqlSnafu, NotSupportSqlSnafu, Result};
use crate::instance::sql::table_idents_to_full_name;
use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn delete(&self, query_ctx: QueryContextRef, stmt: Delete) -> Result<Output> {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
let table_ref = TableReference {
catalog: &catalog_name.to_string(),
schema: &schema_name.to_string(),
table: &table_name.to_string(),
};
let table = self.get_table(&table_ref)?;
let req = DeleteRequest {
key_column_values: parse_selection(stmt.selection(), &table)?,
};
let affected_rows = table.delete(req).await.with_context(|_| DeleteSnafu {
table_name: table_ref.to_string(),
})?;
Ok(Output::AffectedRows(affected_rows))
}
}
/// parse selection, currently supported format is `tagkey1 = 'tagvalue1' and 'ts' = 'value'`.
/// (only uses =, and in the where clause and provides all columns needed by the key.)
fn parse_selection(
selection: &Option<Expr>,
table: &TableRef,
) -> Result<HashMap<String, VectorRef>> {
let mut key_column_values = HashMap::new();
if let Some(expr) = selection {
parse_expr(expr, &mut key_column_values, table)?;
}
Ok(key_column_values)
}
fn parse_expr(
expr: &Expr,
key_column_values: &mut HashMap<String, VectorRef>,
table: &TableRef,
) -> Result<()> {
// match BinaryOp
if let Expr::BinaryOp { left, op, right } = expr {
match (&**left, op, &**right) {
// match And operator
(Expr::BinaryOp { .. }, BinaryOperator::And, Expr::BinaryOp { .. }) => {
parse_expr(left, key_column_values, table)?;
parse_expr(right, key_column_values, table)?;
return Ok(());
}
// match Eq operator
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Value(value)) => {
key_column_values.insert(
column_name.to_string(),
value_to_vector(&column_name.to_string(), value, table)?,
);
return Ok(());
}
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Identifier(value)) => {
key_column_values.insert(
column_name.to_string(),
Arc::new(StringVector::from(vec![value.to_string()])),
);
return Ok(());
}
_ => {}
}
}
NotSupportSqlSnafu {
msg: format!(
"Not support sql expr:{expr},correct format is tagkey1 = tagvalue1 and ts = value"
),
}
.fail()
}
/// parse value to vector
fn value_to_vector(column_name: &String, sql_value: &Value, table: &TableRef) -> Result<VectorRef> {
let schema = table.schema();
let column_schema =
schema
.column_schema_by_name(column_name)
.with_context(|| ColumnNotFoundSnafu {
table_name: table.table_info().name.clone(),
column_name: column_name.to_string(),
})?;
let data_type = &column_schema.data_type;
let value = sql_value_to_value(column_name, data_type, sql_value);
match value {
Ok(value) => {
let mut vec = data_type.create_mutable_vector(1);
if vec.try_push_value_ref(value.as_value_ref()).is_err() {
return InvalidSqlSnafu {
msg: format!(
"invalid sql, column name is {column_name}, value is {sql_value}",
),
}
.fail();
}
Ok(vec.to_vector())
}
_ => InvalidSqlSnafu {
msg: format!("invalid sql, column name is {column_name}, value is {sql_value}",),
}
.fail(),
}
}

View File

@@ -12,10 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use catalog::SchemaProviderRef;
use common_query::Output;
use snafu::{OptionExt, ResultExt};
use table::engine::TableReference;
use table::requests::FlushTableRequest;
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
@@ -23,32 +22,22 @@ use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
if let Some(table) = &req.table_name {
self.flush_table_inner(
&req.catalog_name,
&req.schema_name,
table,
req.region_number,
)
.await?;
} else {
let schema = self
.catalog_manager
.schema(&req.catalog_name, &req.schema_name)
.context(CatalogSnafu)?
.context(DatabaseNotFoundSnafu {
catalog: &req.catalog_name,
schema: &req.schema_name,
})?;
let schema = self
.catalog_manager
.schema(&req.catalog_name, &req.schema_name)
.context(CatalogSnafu)?
.context(DatabaseNotFoundSnafu {
catalog: &req.catalog_name,
schema: &req.schema_name,
})?;
if let Some(table) = &req.table_name {
self.flush_table_inner(schema, table, req.region_number, req.wait)
.await?;
} else {
let all_table_names = schema.table_names().context(CatalogSnafu)?;
futures::future::join_all(all_table_names.iter().map(|table| {
self.flush_table_inner(
&req.catalog_name,
&req.schema_name,
table,
req.region_number,
)
self.flush_table_inner(schema.clone(), table, req.region_number, req.wait)
}))
.await
.into_iter()
@@ -59,25 +48,18 @@ impl SqlHandler {
async fn flush_table_inner(
&self,
catalog: &str,
schema: &str,
table: &str,
schema: SchemaProviderRef,
table_name: &str,
region: Option<u32>,
wait: Option<bool>,
) -> Result<()> {
if schema == DEFAULT_SCHEMA_NAME && table == "numbers" {
return Ok(());
}
let table_ref = TableReference {
catalog,
schema,
table,
};
let full_table_name = table_ref.to_string();
let table = self.get_table(&table_ref)?;
table.flush(region).await.context(error::FlushTableSnafu {
table_name: full_table_name,
})
schema
.table(table_name)
.await
.context(error::FindTableSnafu { table_name })?
.context(error::TableNotFoundSnafu { table_name })?
.flush(region, wait)
.await
.context(error::FlushTableSnafu { table_name })
}
}

View File

@@ -11,49 +11,31 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::pin::Pin;
use catalog::CatalogManagerRef;
use common_catalog::format_full_table_name;
use common_query::Output;
use common_recordbatch::RecordBatch;
use datafusion_expr::type_coercion::binary::coerce_types;
use datafusion_expr::Operator;
use datatypes::data_type::DataType;
use datatypes::schema::ColumnSchema;
use datatypes::vectors::MutableVector;
use futures::stream::{self, StreamExt};
use futures::Stream;
use query::parser::QueryStatement;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::Value as SqlValue;
use sql::statements::insert::Insert;
use sql::statements::statement::Statement;
use sql::statements::{self};
use table::engine::TableReference;
use table::requests::*;
use table::TableRef;
use crate::error::{
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
ExecuteLogicalPlanSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
ParseSqlValueSnafu, PlanStatementSnafu, Result, TableNotFoundSnafu,
CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
ColumnValuesNumberMismatchSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
ParseSqlValueSnafu, Result, TableNotFoundSnafu,
};
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
use crate::sql::{table_idents_to_full_name, SqlHandler};
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
type InsertRequestStream = Pin<Box<dyn Stream<Item = Result<SqlRequest>> + Send>>;
pub(crate) enum InsertRequests {
// Single request
Request(SqlRequest),
// Streaming requests
Stream(InsertRequestStream),
}
impl SqlHandler {
pub(crate) async fn insert(&self, req: InsertRequest) -> Result<Output> {
// FIXME(dennis): table_ref is used in InsertSnafu and the req is consumed
@@ -77,7 +59,7 @@ impl SqlHandler {
table_ref: TableReference,
table: &TableRef,
stmt: Insert,
) -> Result<SqlRequest> {
) -> Result<InsertRequest> {
let values = stmt
.values_body()
.context(ParseSqlValueSnafu)?
@@ -129,7 +111,7 @@ impl SqlHandler {
}
}
Ok(SqlRequest::Insert(InsertRequest {
Ok(InsertRequest {
catalog_name: table_ref.catalog.to_string(),
schema_name: table_ref.schema.to_string(),
table_name: table_ref.table.to_string(),
@@ -138,150 +120,14 @@ impl SqlHandler {
.map(|(cs, mut b)| (cs.name.to_string(), b.to_vector()))
.collect(),
region_number: 0,
}))
})
}
fn build_request_from_batch(
stmt: Insert,
table: TableRef,
batch: RecordBatch,
query_ctx: QueryContextRef,
) -> Result<SqlRequest> {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
let schema = table.schema();
let columns: Vec<_> = if stmt.columns().is_empty() {
schema
.column_schemas()
.iter()
.map(|c| c.name.to_string())
.collect()
} else {
stmt.columns().iter().map(|c| (*c).clone()).collect()
};
let columns_num = columns.len();
ensure!(
batch.num_columns() == columns_num,
ColumnValuesNumberMismatchSnafu {
columns: columns_num,
values: batch.num_columns(),
}
);
let batch_schema = &batch.schema;
let batch_columns = batch_schema.column_schemas();
assert_eq!(batch_columns.len(), columns_num);
let mut columns_values = HashMap::with_capacity(columns_num);
for (i, column_name) in columns.into_iter().enumerate() {
let column_schema = schema
.column_schema_by_name(&column_name)
.with_context(|| ColumnNotFoundSnafu {
table_name: &table_name,
column_name: &column_name,
})?;
let expect_datatype = column_schema.data_type.as_arrow_type();
// It's safe to retrieve the column schema by index, we already
// check columns number is the same above.
let batch_datatype = batch_columns[i].data_type.as_arrow_type();
let coerced_type = coerce_types(&expect_datatype, &Operator::Eq, &batch_datatype)
.map_err(|_| Error::ColumnTypeMismatch {
column: column_name.clone(),
expected: column_schema.data_type.clone(),
actual: batch_columns[i].data_type.clone(),
})?;
ensure!(
expect_datatype == coerced_type,
ColumnTypeMismatchSnafu {
column: column_name,
expected: column_schema.data_type.clone(),
actual: batch_columns[i].data_type.clone(),
}
);
let vector = batch
.column(i)
.cast(&column_schema.data_type)
.map_err(|_| Error::ColumnTypeMismatch {
column: column_name.clone(),
expected: column_schema.data_type.clone(),
actual: batch_columns[i].data_type.clone(),
})?;
columns_values.insert(column_name, vector);
}
Ok(SqlRequest::Insert(InsertRequest {
catalog_name,
schema_name,
table_name,
columns_values,
region_number: 0,
}))
}
// FIXME(dennis): move it to frontend when refactor is done.
async fn build_stream_from_query(
&self,
table: TableRef,
stmt: Insert,
query_ctx: QueryContextRef,
) -> Result<InsertRequestStream> {
let query = stmt
.query_body()
.context(ParseSqlValueSnafu)?
.context(MissingInsertBodySnafu)?;
let logical_plan = self
.query_engine
.planner()
.plan(
QueryStatement::Sql(Statement::Query(Box::new(query))),
query_ctx.clone(),
)
.await
.context(PlanStatementSnafu)?;
let output = self
.query_engine
.execute(&logical_plan)
.await
.context(ExecuteLogicalPlanSnafu)?;
let stream: InsertRequestStream = match output {
Output::RecordBatches(batches) => {
Box::pin(stream::iter(batches.take()).map(move |batch| {
Self::build_request_from_batch(
stmt.clone(),
table.clone(),
batch,
query_ctx.clone(),
)
}))
}
Output::Stream(stream) => Box::pin(stream.map(move |batch| {
Self::build_request_from_batch(
stmt.clone(),
table.clone(),
batch.context(CollectRecordsSnafu)?,
query_ctx.clone(),
)
})),
_ => unreachable!(),
};
Ok(stream)
}
pub(crate) async fn insert_to_requests(
&self,
pub async fn insert_to_request(
catalog_manager: CatalogManagerRef,
stmt: Insert,
query_ctx: QueryContextRef,
) -> Result<InsertRequests> {
) -> Result<InsertRequest> {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(stmt.table_name(), query_ctx.clone())?;
@@ -293,16 +139,8 @@ impl SqlHandler {
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
})?;
if stmt.is_insert_select() {
Ok(InsertRequests::Stream(
self.build_stream_from_query(table, stmt, query_ctx).await?,
))
} else {
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
Ok(InsertRequests::Request(Self::build_request_from_values(
table_ref, &table, stmt,
)?))
}
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
Self::build_request_from_values(table_ref, &table, stmt)
}
}

View File

@@ -12,15 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env;
use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util;
use common_telemetry::logging;
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
use query::parser::{QueryLanguageParser, QueryStatement};
use session::context::QueryContext;
use session::context::{QueryContext, QueryContextRef};
use snafu::ResultExt;
use sql::statements::statement::Statement;
@@ -215,20 +217,20 @@ async fn test_execute_insert_by_select() {
try_execute_sql(&instance, "insert into demo2(host) select * from demo1")
.await
.unwrap_err(),
Error::ColumnValuesNumberMismatch { .. }
Error::PlanStatement { .. }
));
assert!(matches!(
try_execute_sql(&instance, "insert into demo2 select cpu,memory from demo1")
.await
.unwrap_err(),
Error::ColumnValuesNumberMismatch { .. }
Error::PlanStatement { .. }
));
assert!(matches!(
try_execute_sql(&instance, "insert into demo2(ts) select memory from demo1")
.await
.unwrap_err(),
Error::ColumnTypeMismatch { .. }
Error::PlanStatement { .. }
));
let output = execute_sql(&instance, "insert into demo2 select * from demo1").await;
@@ -749,7 +751,7 @@ async fn test_delete() {
let output = execute_sql(
&instance,
"delete from test_table where host = host1 and ts = 1655276557000 ",
"delete from test_table where host = 'host1' and ts = 1655276557000 ",
)
.await;
assert!(matches!(output, Output::AffectedRows(1)));
@@ -768,117 +770,141 @@ async fn test_delete() {
}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_to() {
let instance = setup_test_instance("test_execute_copy_to").await;
async fn test_execute_copy_to_s3() {
logging::init_default_ut_logging();
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
if !bucket.is_empty() {
let instance = setup_test_instance("test_execute_copy_to_s3").await;
// setups
execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
.await;
// setups
execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
.await;
let output = execute_sql(
&instance,
r#"insert into demo(host, cpu, memory, ts) values
let output = execute_sql(
&instance,
r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
('host2', 88.8, 333.3, 1655276558000)
"#,
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
let key = env::var("GT_S3_ACCESS_KEY").unwrap();
let url =
env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
// exports
let data_dir = instance.data_tmp_dir().path();
let root = uuid::Uuid::new_v4().to_string();
let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
// exports
let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
let output = execute_sql(&instance, &copy_to_stmt).await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = execute_sql(&instance, &copy_to_stmt).await;
assert!(matches!(output, Output::AffectedRows(2)));
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_from() {
let instance = setup_test_instance("test_execute_copy_from").await;
async fn test_execute_copy_from_s3() {
logging::init_default_ut_logging();
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
if !bucket.is_empty() {
let instance = setup_test_instance("test_execute_copy_from_s3").await;
// setups
execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
.await;
// setups
execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
.await;
let output = execute_sql(
&instance,
r#"insert into demo(host, cpu, memory, ts) values
let output = execute_sql(
&instance,
r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
('host2', 88.8, 333.3, 1655276558000)
"#,
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
// export
let data_dir = instance.data_tmp_dir().path();
// export
let root = uuid::Uuid::new_v4().to_string();
let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
let key = env::var("GT_S3_ACCESS_KEY").unwrap();
let url =
env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
logging::info!("Copy table to s3: {}", copy_to_stmt);
let output = execute_sql(&instance, &copy_to_stmt).await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = execute_sql(&instance, &copy_to_stmt).await;
assert!(matches!(output, Output::AffectedRows(2)));
struct Test<'a> {
sql: &'a str,
table_name: &'a str,
}
let tests = [
Test {
sql: &format!(
"Copy with_filename FROM '{}/export/demo.parquet_1_2'",
data_dir.display()
),
table_name: "with_filename",
},
Test {
sql: &format!("Copy with_path FROM '{}/export/'", data_dir.display()),
table_name: "with_path",
},
Test {
sql: &format!(
"Copy with_pattern FROM '{}/export/' WITH (PATTERN = 'demo.*')",
data_dir.display()
),
table_name: "with_pattern",
},
];
struct Test<'a> {
sql: &'a str,
table_name: &'a str,
}
let tests = [
Test {
sql: &format!(
"Copy with_filename FROM 's3://{}/{}/export/demo.parquet_1_2'",
bucket, root
),
table_name: "with_filename",
},
Test {
sql: &format!("Copy with_path FROM 's3://{}/{}/export/'", bucket, root),
table_name: "with_path",
},
Test {
sql: &format!(
"Copy with_pattern FROM 's3://{}/{}/export/' WITH (PATTERN = 'demo.*')",
bucket, root
),
table_name: "with_pattern",
},
];
for test in tests {
// import
execute_sql(
&instance,
&format!(
for test in tests {
// import
execute_sql(
&instance,
&format!(
"create table {}(host string, cpu double, memory double, ts timestamp time index);",
test.table_name
),
)
.await;
)
.await;
let sql = format!(
"{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')",
test.sql, key_id, key, url
);
logging::info!("Running sql: {}", sql);
let output = execute_sql(&instance, test.sql).await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = execute_sql(&instance, &sql).await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = execute_sql(
&instance,
&format!("select * from {} order by ts", test.table_name),
)
.await;
let expected = "\
let output = execute_sql(
&instance,
&format!("select * from {} order by ts", test.table_name),
)
.await;
let expected = "\
+-------+------+--------+---------------------+
| host | cpu | memory | ts |
+-------+------+--------+---------------------+
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
+-------+------+--------+---------------------+"
.to_string();
check_output_stream(output, expected).await;
.to_string();
check_output_stream(output, expected).await;
}
}
}
}
@@ -936,16 +962,30 @@ async fn try_execute_sql_in_db(
) -> Result<Output, crate::error::Error> {
let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
async fn plan_exec(
instance: &MockInstance,
stmt: QueryStatement,
query_ctx: QueryContextRef,
) -> Result<Output, Error> {
let engine = instance.inner().query_engine();
let plan = engine
.planner()
.plan(stmt, query_ctx.clone())
.await
.context(PlanStatementSnafu)?;
engine
.execute(plan, query_ctx)
.await
.context(ExecuteLogicalPlanSnafu)
}
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
match stmt {
QueryStatement::Sql(Statement::Query(_)) => {
let engine = instance.inner().query_engine();
let plan = engine
.planner()
.plan(stmt, query_ctx)
.await
.context(PlanStatementSnafu)?;
engine.execute(&plan).await.context(ExecuteLogicalPlanSnafu)
QueryStatement::Sql(Statement::Query(_)) | QueryStatement::Sql(Statement::Delete(_)) => {
plan_exec(instance, stmt, query_ctx).await
}
QueryStatement::Sql(Statement::Insert(ref insert)) if insert.is_insert_select() => {
plan_exec(instance, stmt, query_ctx).await
}
_ => instance.inner().execute_stmt(stmt, query_ctx).await,
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_query::Output;
@@ -23,7 +24,6 @@ use datatypes::schema::{ColumnSchema, RawSchema};
use mito::config::EngineConfig;
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::QueryEngineFactory;
use servers::Mode;
use session::context::QueryContext;
use snafu::ResultExt;
@@ -64,6 +64,8 @@ impl MockInstance {
store: ObjectStoreConfig::File(FileConfig {
data_dir: procedure_dir.path().to_str().unwrap().to_string(),
}),
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
});
let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
@@ -84,7 +86,7 @@ impl MockInstance {
match stmt {
QueryStatement::Sql(Statement::Query(_)) => {
let plan = planner.plan(stmt, QueryContext::arc()).await.unwrap();
engine.execute(&plan).await.unwrap()
engine.execute(plan, QueryContext::arc()).await.unwrap()
}
QueryStatement::Sql(Statement::Tql(tql)) => {
let plan = match tql {
@@ -100,7 +102,7 @@ impl MockInstance {
}
Tql::Explain(_) => unimplemented!(),
};
engine.execute(&plan).await.unwrap()
engine.execute(plan, QueryContext::arc()).await.unwrap()
}
_ => self
.inner()
@@ -113,10 +115,6 @@ impl MockInstance {
pub(crate) fn inner(&self) -> &Instance {
&self.instance
}
pub(crate) fn data_tmp_dir(&self) -> &TempDir {
&self._guard._data_tmp_dir
}
}
struct TestGuard {
@@ -202,17 +200,7 @@ pub async fn create_mock_sql_handler() -> SqlHandler {
.await
.unwrap(),
);
let catalog_list = catalog::local::new_memory_catalog_list().unwrap();
let factory = QueryEngineFactory::new(catalog_list);
SqlHandler::new(
mock_engine.clone(),
catalog_manager,
factory.query_engine(),
mock_engine,
None,
)
SqlHandler::new(mock_engine.clone(), catalog_manager, mock_engine, None)
}
pub(crate) async fn setup_test_instance(test_name: &str) -> MockInstance {

View File

@@ -428,52 +428,63 @@ fn parse_stmt(sql: &str) -> Result<Vec<Statement>> {
}
impl Instance {
async fn plan_exec(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
let planner = self.query_engine.planner();
let plan = planner
.plan(QueryStatement::Sql(stmt), query_ctx.clone())
.await
.context(PlanStatementSnafu)?;
self.query_engine
.execute(plan, query_ctx)
.await
.context(ExecLogicalPlanSnafu)
}
async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
let plan = match tql {
Tql::Eval(eval) => {
let promql = PromQuery {
start: eval.start,
end: eval.end,
step: eval.step,
query: eval.query,
};
let stmt = QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
self.query_engine
.planner()
.plan(stmt, query_ctx.clone())
.await
.context(PlanStatementSnafu)?
}
Tql::Explain(_) => unimplemented!(),
};
self.query_engine
.execute(plan, query_ctx)
.await
.context(ExecLogicalPlanSnafu)
}
async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
let planner = self.query_engine.planner();
match stmt {
Statement::Query(_) | Statement::Explain(_) => {
let plan = planner
.plan(QueryStatement::Sql(stmt), query_ctx)
.await
.context(PlanStatementSnafu)?;
self.query_engine
.execute(&plan)
.await
.context(ExecLogicalPlanSnafu)
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
self.plan_exec(stmt, query_ctx).await
}
Statement::Tql(tql) => {
let plan = match tql {
Tql::Eval(eval) => {
let promql = PromQuery {
start: eval.start,
end: eval.end,
step: eval.step,
query: eval.query,
};
let stmt =
QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
planner
.plan(stmt, query_ctx)
.await
.context(PlanStatementSnafu)?
}
Tql::Explain(_) => unimplemented!(),
};
self.query_engine
.execute(&plan)
.await
.context(ExecLogicalPlanSnafu)
// For performance consideration, only "insert with select" is executed by query engine.
// Plain insert ("insert with values") is still executed directly in statement.
Statement::Insert(ref insert) if insert.is_insert_select() => {
self.plan_exec(stmt, query_ctx).await
}
Statement::Tql(tql) => self.execute_tql(tql, query_ctx).await,
Statement::CreateDatabase(_)
| Statement::ShowDatabases(_)
| Statement::CreateTable(_)
| Statement::ShowTables(_)
| Statement::DescribeTable(_)
| Statement::Insert(_)
| Statement::Delete(_)
| Statement::Alter(_)
| Statement::DropTable(_)
| Statement::Copy(_) => self
@@ -647,8 +658,8 @@ pub fn check_permission(
}
match stmt {
// query,explain and tql will be checked in QueryEngineState
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) => {}
// These are executed by query engine, and will be checked there.
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) | Statement::Delete(_) => {}
// database ops won't be checked
Statement::CreateDatabase(_) | Statement::ShowDatabases(_) | Statement::Use(_) => {}
// show create table and alter are not supported yet
@@ -673,11 +684,8 @@ pub fn check_permission(
Statement::DescribeTable(stmt) => {
validate_param(stmt.name(), query_ctx)?;
}
Statement::Delete(delete) => {
validate_param(delete.table_name(), query_ctx)?;
}
Statement::Copy(stmd) => match stmd {
CopyTable::To(copy_table_to) => validate_param(copy_table_to.table_name(), query_ctx)?,
CopyTable::To(copy_table_to) => validate_param(&copy_table_to.table_name, query_ctx)?,
CopyTable::From(copy_table_from) => {
validate_param(&copy_table_from.table_name, query_ctx)?
}
@@ -1086,7 +1094,7 @@ mod tests {
.plan(stmt.clone(), QueryContext::arc())
.await
.unwrap();
let output = engine.execute(&plan).await.unwrap();
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
let Output::Stream(stream) = output else { unreachable!() };
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let actual = recordbatches.pretty_print().unwrap();

View File

@@ -33,6 +33,7 @@ use common_error::prelude::BoxedError;
use common_query::Output;
use common_telemetry::{debug, info};
use datanode::instance::sql::table_idents_to_full_name;
use datanode::sql::SqlHandler;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::RawSchema;
use meta_client::client::MetaClient;
@@ -60,13 +61,12 @@ use crate::catalog::FrontendCatalogManager;
use crate::datanode::DatanodeClients;
use crate::error::{
self, AlterExprToRequestSnafu, CatalogEntrySerdeSnafu, CatalogSnafu, ColumnDataTypeSnafu,
DeserializePartitionSnafu, NotSupportedSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu,
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu,
TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu,
UnrecognizedTableOptionSnafu,
DeserializePartitionSnafu, InvokeDatanodeSnafu, NotSupportedSnafu, ParseSqlSnafu,
PrimaryKeyNotFoundSnafu, RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu,
StartMetaClientSnafu, TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu,
ToTableInsertRequestSnafu, UnrecognizedTableOptionSnafu,
};
use crate::expr_factory;
use crate::sql::insert_to_request;
use crate::table::DistTable;
#[derive(Clone)]
@@ -374,7 +374,10 @@ impl DistInstance {
.context(CatalogSnafu)?
.context(TableNotFoundSnafu { table_name: table })?;
let insert_request = insert_to_request(&table, *insert, query_ctx)?;
let insert_request =
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
.await
.context(InvokeDatanodeSnafu)?;
return Ok(Output::AffectedRows(
table.insert(insert_request).await.context(TableSnafu)?,

View File

@@ -590,7 +590,7 @@ CREATE TABLE {table_name} (
.plan(stmt, QueryContext::arc())
.await
.unwrap();
let output = engine.execute(&plan).await.unwrap();
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
let Output::Stream(stream) = output else { unreachable!() };
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let actual = recordbatches.pretty_print().unwrap();

View File

@@ -28,7 +28,6 @@ pub mod postgres;
pub mod prom;
pub mod prometheus;
mod server;
mod sql;
mod table;
#[cfg(test)]
mod tests;

View File

@@ -1,130 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::BoxedError;
use common_error::snafu::ensure;
use datanode::instance::sql::table_idents_to_full_name;
use datatypes::data_type::DataType;
use datatypes::prelude::MutableVector;
use datatypes::schema::ColumnSchema;
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use sql::ast::Value as SqlValue;
use sql::statements;
use sql::statements::insert::Insert;
use table::requests::InsertRequest;
use table::TableRef;
use crate::error::{self, ExternalSnafu, Result};
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
// TODO(fys): Extract the common logic in datanode and frontend in the future.
// This function convert insert statement to an `InsertRequest` to region 0.
pub(crate) fn insert_to_request(
table: &TableRef,
stmt: Insert,
query_ctx: QueryContextRef,
) -> Result<InsertRequest> {
let columns = stmt.columns();
let values = stmt
.values_body()
.context(error::ParseSqlSnafu)?
.context(error::MissingInsertValuesSnafu)?;
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(stmt.table_name(), query_ctx)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let schema = table.schema();
let columns_num = if columns.is_empty() {
schema.column_schemas().len()
} else {
columns.len()
};
let rows_num = values.len();
let mut columns_builders: Vec<(&ColumnSchema, Box<dyn MutableVector>)> =
Vec::with_capacity(columns_num);
if columns.is_empty() {
for column_schema in schema.column_schemas() {
let data_type = &column_schema.data_type;
columns_builders.push((column_schema, data_type.create_mutable_vector(rows_num)));
}
} else {
for column_name in columns {
let column_schema = schema.column_schema_by_name(column_name).with_context(|| {
error::ColumnNotFoundSnafu {
table_name: &table_name,
column_name: column_name.to_string(),
}
})?;
let data_type = &column_schema.data_type;
columns_builders.push((column_schema, data_type.create_mutable_vector(rows_num)));
}
}
for row in values {
ensure!(
row.len() == columns_num,
error::ColumnValuesNumberMismatchSnafu {
columns: columns_num,
values: row.len(),
}
);
for (sql_val, (column_schema, builder)) in row.iter().zip(columns_builders.iter_mut()) {
add_row_to_vector(column_schema, sql_val, builder)?;
}
}
Ok(InsertRequest {
catalog_name,
schema_name,
table_name,
columns_values: columns_builders
.into_iter()
.map(|(cs, mut b)| (cs.name.to_string(), b.to_vector()))
.collect(),
region_number: 0,
})
}
fn add_row_to_vector(
column_schema: &ColumnSchema,
sql_val: &SqlValue,
builder: &mut Box<dyn MutableVector>,
) -> Result<()> {
let value = if replace_default(sql_val) {
column_schema
.create_default()
.context(error::ColumnDefaultValueSnafu {
column: column_schema.name.to_string(),
})?
.context(error::ColumnNoneDefaultValueSnafu {
column: column_schema.name.to_string(),
})?
} else {
statements::sql_value_to_value(&column_schema.name, &column_schema.data_type, sql_val)
.context(error::ParseSqlSnafu)?
};
builder.push_value_ref(value.as_value_ref());
Ok(())
}
fn replace_default(sql_val: &SqlValue) -> bool {
matches!(sql_val, SqlValue::Placeholder(s) if s.to_lowercase() == DEFAULT_PLACEHOLDER_VALUE)
}

View File

@@ -33,4 +33,4 @@ tokio-util.workspace = true
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
rand = "0.8"
rand.workspace = true

View File

@@ -12,7 +12,7 @@ common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-telemetry = { path = "../common/telemetry" }
etcd-client = "0.10"
rand = "0.8"
rand.workspace = true
serde.workspace = true
serde_json.workspace = true
snafu.workspace = true

View File

@@ -28,6 +28,7 @@ http-body = "0.4"
lazy_static = "1.4"
parking_lot = "0.12"
prost.workspace = true
rand.workspace = true
regex = "1.6"
serde = "1.0"
serde_json = "1.0"

View File

@@ -22,6 +22,7 @@ use api::v1::meta::store_server::StoreServer;
use etcd_client::Client;
use snafu::ResultExt;
use tokio::net::TcpListener;
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio_stream::wrappers::TcpListenerStream;
use tonic::transport::server::Router;
@@ -44,44 +45,65 @@ pub struct MetaSrvInstance {
meta_srv: MetaSrv,
opts: MetaSrvOptions,
signal_sender: Option<Sender<()>>,
}
impl MetaSrvInstance {
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
let meta_srv = build_meta_srv(&opts).await?;
Ok(MetaSrvInstance { meta_srv, opts })
Ok(MetaSrvInstance {
meta_srv,
opts,
signal_sender: None,
})
}
pub async fn start(&self) -> Result<()> {
pub async fn start(&mut self) -> Result<()> {
self.meta_srv.start().await;
bootstrap_meta_srv_with_router(&self.opts.bind_addr, router(self.meta_srv.clone())).await?;
let (tx, mut rx) = mpsc::channel::<()>(1);
self.signal_sender = Some(tx);
bootstrap_meta_srv_with_router(
&self.opts.bind_addr,
router(self.meta_srv.clone()),
&mut rx,
)
.await?;
Ok(())
}
pub async fn close(&self) -> Result<()> {
// TODO: shutdown the router
pub async fn shutdown(&self) -> Result<()> {
if let Some(signal) = &self.signal_sender {
signal
.send(())
.await
.context(error::SendShutdownSignalSnafu)?;
}
self.meta_srv.shutdown();
Ok(())
}
}
// Bootstrap the rpc server to serve incoming request
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
let meta_srv = make_meta_srv(&opts).await?;
bootstrap_meta_srv_with_router(&opts.bind_addr, router(meta_srv)).await
}
pub async fn bootstrap_meta_srv_with_router(bind_addr: &str, router: Router) -> Result<()> {
pub async fn bootstrap_meta_srv_with_router(
bind_addr: &str,
router: Router,
signal: &mut Receiver<()>,
) -> Result<()> {
let listener = TcpListener::bind(bind_addr)
.await
.context(error::TcpBindSnafu { addr: bind_addr })?;
let listener = TcpListenerStream::new(listener);
router
.serve_with_incoming(listener)
.serve_with_incoming_shutdown(listener, async {
signal.recv().await;
})
.await
.context(error::StartGrpcSnafu)?;

View File

@@ -261,7 +261,7 @@ mod tests {
let stat_val = StatValue { stats: vec![stat] }.try_into().unwrap();
let kv = KeyValue {
key: stat_key.clone().into(),
key: stat_key.into(),
value: stat_val,
};

View File

@@ -15,12 +15,16 @@
use std::string::FromUtf8Error;
use common_error::prelude::*;
use tokio::sync::mpsc::error::SendError;
use tonic::codegen::http;
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to send shutdown signal"))]
SendShutdownSignal { source: SendError<()> },
#[snafu(display("Error stream request next is None"))]
StreamNone { backtrace: Backtrace },
@@ -312,6 +316,7 @@ impl ErrorExt for Error {
| Error::LeaseGrant { .. }
| Error::LockNotConfig { .. }
| Error::ExceededRetryLimit { .. }
| Error::SendShutdownSignal { .. }
| Error::StartGrpc { .. } => StatusCode::Internal,
Error::EmptyKey { .. }
| Error::MissingRequiredParameter { .. }

View File

@@ -0,0 +1,546 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::VecDeque;
/// This is our port of Akka's "[PhiAccrualFailureDetector](https://github.com/akka/akka/blob/main/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala)"
/// You can find it's document here:
/// https://doc.akka.io/docs/akka/current/typed/failure-detector.html
///
/// Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their
/// paper: [https://oneofus.la/have-emacs-will-hack/files/HDY04.pdf]
///
/// The suspicion level of failure is given by a value called φ (phi).
/// The basic idea of the φ failure detector is to express the value of φ on a scale that
/// is dynamically adjusted to reflect current network conditions. A configurable
/// threshold is used to decide if φ is considered to be a failure.
///
/// The value of φ is calculated as:
///
/// φ = -log10(1 - F(timeSinceLastHeartbeat)
///
/// where F is the cumulative distribution function of a normal distribution with mean
/// and standard deviation estimated from historical heartbeat inter-arrival times.
#[cfg_attr(test, derive(Clone))]
pub(crate) struct PhiAccrualFailureDetector {
/// A low threshold is prone to generate many wrong suspicions but ensures a quick detection
/// in the event of a real crash. Conversely, a high threshold generates fewer mistakes but
/// needs more time to detect actual crashes.
threshold: f32,
/// Minimum standard deviation to use for the normal distribution used when calculating phi.
/// Too low standard deviation might result in too much sensitivity for sudden, but normal,
/// deviations in heartbeat inter arrival times.
min_std_deviation_millis: f32,
/// Duration corresponding to number of potentially lost/delayed heartbeats that will be
/// accepted before considering it to be an anomaly.
/// This margin is important to be able to survive sudden, occasional, pauses in heartbeat
/// arrivals, due to for example network drop.
acceptable_heartbeat_pause_millis: u32,
/// Bootstrap the stats with heartbeats that corresponds to this duration, with a rather high
/// standard deviation (since environment is unknown in the beginning).
first_heartbeat_estimate_millis: u32,
heartbeat_history: HeartbeatHistory,
last_heartbeat_millis: Option<i64>,
}
impl Default for PhiAccrualFailureDetector {
fn default() -> Self {
// default configuration is the same as of Akka:
// https://github.com/akka/akka/blob/main/akka-cluster/src/main/resources/reference.conf#L181
Self {
threshold: 8_f32,
min_std_deviation_millis: 100_f32,
acceptable_heartbeat_pause_millis: 3000,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
}
}
}
impl PhiAccrualFailureDetector {
pub(crate) fn heartbeat(&mut self, ts_millis: i64) {
if let Some(last_heartbeat_millis) = self.last_heartbeat_millis {
if ts_millis < last_heartbeat_millis {
return;
}
if self.is_available(ts_millis) {
let interval = ts_millis - last_heartbeat_millis;
self.heartbeat_history.add(interval)
}
} else {
// guess statistics for first heartbeat,
// important so that connections with only one heartbeat becomes unavailable
// bootstrap with 2 entries with rather high standard deviation
let std_deviation = self.first_heartbeat_estimate_millis / 4;
self.heartbeat_history
.add((self.first_heartbeat_estimate_millis - std_deviation) as _);
self.heartbeat_history
.add((self.first_heartbeat_estimate_millis + std_deviation) as _);
}
let _ = self.last_heartbeat_millis.insert(ts_millis);
}
pub(crate) fn is_available(&self, ts_millis: i64) -> bool {
self.phi(ts_millis) < self.threshold as _
}
/// The suspicion level of the accrual failure detector.
///
/// If a connection does not have any records in failure detector then it is considered healthy.
pub(crate) fn phi(&self, ts_millis: i64) -> f64 {
if let Some(last_heartbeat_millis) = self.last_heartbeat_millis {
let time_diff = ts_millis - last_heartbeat_millis;
let mean = self.heartbeat_history.mean();
let std_deviation = self
.heartbeat_history
.std_deviation()
.max(self.min_std_deviation_millis as _);
phi(
time_diff,
mean + self.acceptable_heartbeat_pause_millis as f64,
std_deviation,
)
} else {
// treat unmanaged connections, e.g. with zero heartbeats, as healthy connections
0.0
}
}
#[cfg(test)]
pub(crate) fn threshold(&self) -> f32 {
self.threshold
}
#[cfg(test)]
pub(crate) fn acceptable_heartbeat_pause_millis(&self) -> u32 {
self.acceptable_heartbeat_pause_millis
}
}
/// Calculation of phi, derived from the Cumulative distribution function for
/// N(mean, stdDeviation) normal distribution, given by
/// 1.0 / (1.0 + math.exp(-y * (1.5976 + 0.070566 * y * y)))
/// where y = (x - mean) / standard_deviation
/// This is an approximation defined in β Mathematics Handbook (Logistic approximation).
/// Error is 0.00014 at +- 3.16
/// The calculated value is equivalent to -log10(1 - CDF(y))
///
/// Usually phi = 1 means likeliness that we will make a mistake is about 10%.
/// The likeliness is about 1% with phi = 2, 0.1% with phi = 3 and so on.
fn phi(time_diff: i64, mean: f64, std_deviation: f64) -> f64 {
assert_ne!(std_deviation, 0.0);
let time_diff = time_diff as f64;
let y = (time_diff - mean) / std_deviation;
let e = (-y * (1.5976 + 0.070566 * y * y)).exp();
if time_diff > mean {
-(e / (1.0 + e)).log10()
} else {
-(1.0 - 1.0 / (1.0 + e)).log10()
}
}
/// Holds the heartbeat statistics.
/// It is capped by the number of samples specified in `max_sample_size`.
///
/// The stats (mean, variance, std_deviation) are not defined for empty HeartbeatHistory.
#[derive(Clone)]
struct HeartbeatHistory {
/// Number of samples to use for calculation of mean and standard deviation of inter-arrival
/// times.
max_sample_size: u32,
intervals: VecDeque<i64>,
interval_sum: i64,
squared_interval_sum: i64,
}
impl HeartbeatHistory {
fn new(max_sample_size: u32) -> Self {
Self {
max_sample_size,
intervals: VecDeque::with_capacity(max_sample_size as usize),
interval_sum: 0,
squared_interval_sum: 0,
}
}
fn mean(&self) -> f64 {
self.interval_sum as f64 / self.intervals.len() as f64
}
fn variance(&self) -> f64 {
let mean = self.mean();
self.squared_interval_sum as f64 / self.intervals.len() as f64 - mean * mean
}
fn std_deviation(&self) -> f64 {
self.variance().sqrt()
}
fn add(&mut self, interval: i64) {
if self.intervals.len() as u32 >= self.max_sample_size {
self.drop_oldest();
}
self.intervals.push_back(interval);
self.interval_sum += interval;
self.squared_interval_sum += interval * interval;
}
fn drop_oldest(&mut self) {
let oldest = self
.intervals
.pop_front()
.expect("intervals must not be empty here");
self.interval_sum -= oldest;
self.squared_interval_sum -= oldest * oldest;
}
}
#[cfg(test)]
mod tests {
use common_time::util::current_time_millis;
use super::*;
#[test]
fn test_is_available() {
let ts_millis = current_time_millis();
let mut fd = PhiAccrualFailureDetector::default();
// is available before first heartbeat
assert!(fd.is_available(ts_millis));
fd.heartbeat(ts_millis);
let acceptable_heartbeat_pause_millis = fd.acceptable_heartbeat_pause_millis as i64;
// is available when heartbeat
assert!(fd.is_available(ts_millis));
// is available before heartbeat timeout
assert!(fd.is_available(ts_millis + acceptable_heartbeat_pause_millis / 2));
// is not available after heartbeat timeout
assert!(!fd.is_available(ts_millis + acceptable_heartbeat_pause_millis * 2));
}
#[test]
fn test_last_heartbeat() {
let ts_millis = current_time_millis();
let mut fd = PhiAccrualFailureDetector::default();
// no heartbeat yet
assert!(fd.last_heartbeat_millis.is_none());
fd.heartbeat(ts_millis);
assert_eq!(fd.last_heartbeat_millis, Some(ts_millis));
}
#[test]
fn test_phi() {
let ts_millis = current_time_millis();
let mut fd = PhiAccrualFailureDetector::default();
// phi == 0 before first heartbeat
assert_eq!(fd.phi(ts_millis), 0.0);
fd.heartbeat(ts_millis);
let acceptable_heartbeat_pause_millis = fd.acceptable_heartbeat_pause_millis as i64;
// phi == 0 when heartbeat
assert_eq!(fd.phi(ts_millis), 0.0);
// phi < threshold before heartbeat timeout
let now = ts_millis + acceptable_heartbeat_pause_millis / 2;
assert!(fd.phi(now) < fd.threshold as _);
// phi >= threshold after heartbeat timeout
let now = ts_millis + acceptable_heartbeat_pause_millis * 2;
assert!(fd.phi(now) >= fd.threshold as _);
}
// The following test cases are port from Akka's test:
// [AccrualFailureDetectorSpec.scala](https://github.com/akka/akka/blob/main/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala).
#[test]
fn test_use_good_enough_cumulative_distribution_function() {
fn cdf(phi: f64) -> f64 {
1.0 - 10.0_f64.powf(-phi)
}
assert!((cdf(phi(0, 0.0, 10.0)) - 0.5).abs() < 0.001);
assert!((cdf(phi(6, 0.0, 10.0)) - 0.7257).abs() < 0.001);
assert!((cdf(phi(15, 0.0, 10.0)) - 0.9332).abs() < 0.001);
assert!((cdf(phi(20, 0.0, 10.0)) - 0.97725).abs() < 0.001);
assert!((cdf(phi(25, 0.0, 10.0)) - 0.99379).abs() < 0.001);
assert!((cdf(phi(35, 0.0, 10.0)) - 0.99977).abs() < 0.001);
assert!((cdf(phi(40, 0.0, 10.0)) - 0.99997).abs() < 0.0001);
for w in (0..40).collect::<Vec<i64>>().windows(2) {
assert!(phi(w[0], 0.0, 10.0) < phi(w[1], 0.0, 10.0));
}
assert!((cdf(phi(22, 20.0, 3.0)) - 0.7475).abs() < 0.001);
}
#[test]
fn test_handle_outliers_without_losing_precision_or_hitting_exceptions() {
assert!((phi(10, 0.0, 1.0) - 38.0).abs() < 1.0);
assert_eq!(phi(-25, 0.0, 1.0), 0.0);
}
#[test]
fn test_return_realistic_phi_values() {
let test = vec![
(0, 0.0),
(500, 0.1),
(1000, 0.3),
(1200, 1.6),
(1400, 4.7),
(1600, 10.8),
(1700, 15.3),
];
for (time_diff, expected_phi) in test {
assert!((phi(time_diff, 1000.0, 100.0) - expected_phi).abs() < 0.1);
}
// larger std_deviation results => lower phi
assert!(phi(1100, 1000.0, 500.0) < phi(1100, 1000.0, 100.0));
}
#[test]
fn test_return_phi_of_0_on_startup_when_no_heartbeats() {
let fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 0,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
assert_eq!(fd.phi(current_time_millis()), 0.0);
assert_eq!(fd.phi(current_time_millis()), 0.0);
}
#[test]
fn test_return_phi_based_on_guess_when_only_one_heartbeat() {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 0,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
fd.heartbeat(0);
assert!((fd.phi(1000)).abs() - 0.3 < 0.2);
assert!((fd.phi(2000)).abs() - 4.5 < 0.3);
assert!((fd.phi(3000)).abs() > 15.0);
}
#[test]
fn test_return_phi_using_first_interval_after_second_heartbeat() {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 0,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
fd.heartbeat(0);
assert!(fd.phi(100) > 0.0);
fd.heartbeat(200);
assert!(fd.phi(300) > 0.0);
}
#[test]
fn test_is_available_after_a_series_of_successful_heartbeats() {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 0,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
assert!(fd.last_heartbeat_millis.is_none());
fd.heartbeat(0);
fd.heartbeat(1000);
fd.heartbeat(1100);
assert!(fd.last_heartbeat_millis.is_some());
assert!(fd.is_available(1200));
}
#[test]
fn test_is_not_available_if_heartbeat_are_missed() {
let mut fd = PhiAccrualFailureDetector {
threshold: 3.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 0,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
fd.heartbeat(0);
fd.heartbeat(1000);
fd.heartbeat(1100);
assert!(fd.is_available(1200));
assert!(!fd.is_available(8200));
}
#[test]
fn test_is_available_if_it_starts_heartbeat_again_after_being_marked_dead_due_to_detection_of_failure(
) {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 3000,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
// 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger
// unreachable again
let mut now = 0;
for _ in 0..1000 {
fd.heartbeat(now);
now += 1000;
}
now += 5 * 60 * 1000;
assert!(!fd.is_available(now)); // after the long pause
now += 100;
fd.heartbeat(now);
now += 900;
assert!(fd.is_available(now));
now += 100;
fd.heartbeat(now);
now += 7000;
assert!(!fd.is_available(now)); // after the 7 seconds pause
now += 100;
fd.heartbeat(now);
now += 900;
assert!(fd.is_available(now));
now += 100;
fd.heartbeat(now);
now += 900;
assert!(fd.is_available(now));
}
#[test]
fn test_accept_some_configured_missing_heartbeats() {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 3000,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
fd.heartbeat(0);
fd.heartbeat(1000);
fd.heartbeat(2000);
fd.heartbeat(3000);
assert!(fd.is_available(7000));
fd.heartbeat(8000);
assert!(fd.is_available(9000));
}
#[test]
fn test_fail_after_configured_acceptable_missing_heartbeats() {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 3000,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(1000),
last_heartbeat_millis: None,
};
fd.heartbeat(0);
fd.heartbeat(1000);
fd.heartbeat(2000);
fd.heartbeat(3000);
fd.heartbeat(4000);
fd.heartbeat(5000);
assert!(fd.is_available(5500));
fd.heartbeat(6000);
assert!(!fd.is_available(11000));
}
#[test]
fn test_use_max_sample_size_heartbeats() {
let mut fd = PhiAccrualFailureDetector {
threshold: 8.0,
min_std_deviation_millis: 100.0,
acceptable_heartbeat_pause_millis: 0,
first_heartbeat_estimate_millis: 1000,
heartbeat_history: HeartbeatHistory::new(3),
last_heartbeat_millis: None,
};
// 100 ms interval
fd.heartbeat(0);
fd.heartbeat(100);
fd.heartbeat(200);
fd.heartbeat(300);
let phi1 = fd.phi(400);
// 500 ms interval, should become same phi when 100 ms intervals have been dropped
fd.heartbeat(1000);
fd.heartbeat(1500);
fd.heartbeat(2000);
fd.heartbeat(2500);
let phi2 = fd.phi(3000);
assert_eq!(phi1, phi2);
}
#[test]
fn test_heartbeat_history_calculate_correct_mean_and_variance() {
let mut history = HeartbeatHistory::new(20);
for i in [100, 200, 125, 340, 130] {
history.add(i);
}
assert!((history.mean() - 179.0).abs() < 0.00001);
assert!((history.variance() - 7584.0).abs() < 0.00001);
}
#[test]
fn test_heartbeat_history_have_0_variance_for_one_sample() {
let mut history = HeartbeatHistory::new(600);
history.add(1000);
assert!((history.variance() - 0.0).abs() < 0.00001);
}
#[test]
fn test_heartbeat_history_be_capped_by_the_specified_max_sample_size() {
let mut history = HeartbeatHistory::new(3);
history.add(100);
history.add(110);
history.add(90);
assert!((history.mean() - 100.0).abs() < 0.00001);
assert!((history.variance() - 66.6666667).abs() < 0.00001);
history.add(140);
assert!((history.mean() - 113.333333).abs() < 0.00001);
assert!((history.variance() - 422.222222).abs() < 0.00001);
history.add(80);
assert!((history.mean() - 103.333333).abs() < 0.00001);
assert!((history.variance() - 688.88888889).abs() < 0.00001);
}
}

View File

@@ -14,6 +14,7 @@
pub use check_leader_handler::CheckLeaderHandler;
pub use collect_stats_handler::CollectStatsHandler;
pub use failure_handler::RegionFailureHandler;
pub use keep_lease_handler::KeepLeaseHandler;
pub use on_leader_start::OnLeaderStartHandler;
pub use persist_stats_handler::PersistStatsHandler;
@@ -21,6 +22,7 @@ pub use response_header_handler::ResponseHeaderHandler;
mod check_leader_handler;
mod collect_stats_handler;
mod failure_handler;
mod instruction;
mod keep_lease_handler;
pub mod node_stat;
@@ -54,8 +56,8 @@ pub trait HeartbeatHandler: Send + Sync {
#[derive(Debug, Default)]
pub struct HeartbeatAccumulator {
pub header: Option<ResponseHeader>,
pub stats: Vec<Stat>,
pub instructions: Vec<Instruction>,
pub stat: Option<Stat>,
}
impl HeartbeatAccumulator {

View File

@@ -12,39 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::VecDeque;
use api::v1::meta::HeartbeatRequest;
use common_telemetry::debug;
use dashmap::mapref::entry::Entry;
use dashmap::DashMap;
use super::node_stat::Stat;
use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
type StatKey = (u64, u64);
pub struct CollectStatsHandler {
max_cached_stats_per_key: usize,
cache: DashMap<StatKey, VecDeque<Stat>>,
}
impl Default for CollectStatsHandler {
fn default() -> Self {
Self::new(10)
}
}
impl CollectStatsHandler {
pub fn new(max_cached_stats_per_key: usize) -> Self {
Self {
max_cached_stats_per_key,
cache: DashMap::new(),
}
}
}
pub struct CollectStatsHandler;
#[async_trait::async_trait]
impl HeartbeatHandler for CollectStatsHandler {
@@ -60,21 +36,7 @@ impl HeartbeatHandler for CollectStatsHandler {
match Stat::try_from(req.clone()) {
Ok(stat) => {
let key = (stat.cluster_id, stat.id);
match self.cache.entry(key) {
Entry::Occupied(mut e) => {
let deque = e.get_mut();
deque.push_front(stat);
if deque.len() >= self.max_cached_stats_per_key {
acc.stats = deque.drain(..).collect();
}
}
Entry::Vacant(e) => {
let mut stat_vec = VecDeque::with_capacity(self.max_cached_stats_per_key);
stat_vec.push_front(stat);
e.insert(stat_vec);
}
}
let _ = acc.stat.insert(stat);
}
Err(_) => {
debug!("Incomplete heartbeat data: {:?}", req);

View File

@@ -0,0 +1,151 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod runner;
use api::v1::meta::HeartbeatRequest;
use async_trait::async_trait;
use crate::error::Result;
use crate::handler::failure_handler::runner::{FailureDetectControl, FailureDetectRunner};
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::{Context, ElectionRef};
#[derive(Eq, Hash, PartialEq, Clone)]
pub(crate) struct RegionIdent {
catalog: String,
schema: String,
table: String,
region_id: u64,
}
// TODO(LFC): TBC
pub(crate) struct DatanodeHeartbeat {
#[allow(dead_code)]
cluster_id: u64,
#[allow(dead_code)]
node_id: u64,
region_idents: Vec<RegionIdent>,
heartbeat_time: i64,
}
pub struct RegionFailureHandler {
failure_detect_runner: FailureDetectRunner,
}
impl RegionFailureHandler {
pub fn new(election: Option<ElectionRef>) -> Self {
Self {
failure_detect_runner: FailureDetectRunner::new(election),
}
}
pub async fn start(&mut self) {
self.failure_detect_runner.start().await;
}
}
#[async_trait]
impl HeartbeatHandler for RegionFailureHandler {
async fn handle(
&self,
_: &HeartbeatRequest,
ctx: &mut Context,
acc: &mut HeartbeatAccumulator,
) -> Result<()> {
if ctx.is_infancy {
self.failure_detect_runner
.send_control(FailureDetectControl::Purge)
.await;
}
if ctx.is_skip_all() {
return Ok(());
}
let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
let heartbeat = DatanodeHeartbeat {
cluster_id: stat.cluster_id,
node_id: stat.id,
region_idents: stat
.region_stats
.iter()
.map(|x| RegionIdent {
catalog: x.catalog.clone(),
schema: x.schema.clone(),
table: x.table.clone(),
region_id: x.id,
})
.collect(),
heartbeat_time: stat.timestamp_millis,
};
self.failure_detect_runner.send_heartbeat(heartbeat).await;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::handler::node_stat::{RegionStat, Stat};
use crate::metasrv::builder::MetaSrvBuilder;
#[tokio::test(flavor = "multi_thread")]
async fn test_handle_heartbeat() {
let mut handler = RegionFailureHandler::new(None);
handler.start().await;
let req = &HeartbeatRequest::default();
let builder = MetaSrvBuilder::new();
let metasrv = builder.build().await;
let mut ctx = metasrv.new_ctx();
ctx.is_infancy = false;
let acc = &mut HeartbeatAccumulator::default();
fn new_region_stat(region_id: u64) -> RegionStat {
RegionStat {
id: region_id,
catalog: "a".to_string(),
schema: "b".to_string(),
table: "c".to_string(),
rcus: 0,
wcus: 0,
approximate_bytes: 0,
approximate_rows: 0,
}
}
acc.stat = Some(Stat {
cluster_id: 1,
id: 42,
region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)],
timestamp_millis: 1000,
..Default::default()
});
handler.handle(req, &mut ctx, acc).await.unwrap();
let dump = handler.failure_detect_runner.dump().await;
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 3);
// infancy makes heartbeats re-accumulated
ctx.is_infancy = true;
acc.stat = None;
handler.handle(req, &mut ctx, acc).await.unwrap();
let dump = handler.failure_detect_runner.dump().await;
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 0);
}
}

View File

@@ -0,0 +1,309 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::ops::DerefMut;
use std::sync::Arc;
use std::time::{Duration, Instant};
use common_telemetry::error;
use common_time::util::current_time_millis;
use dashmap::mapref::multiple::RefMulti;
use dashmap::DashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::task::JoinHandle;
use crate::failure_detector::PhiAccrualFailureDetector;
use crate::handler::failure_handler::{DatanodeHeartbeat, RegionIdent};
use crate::metasrv::ElectionRef;
pub(crate) enum FailureDetectControl {
Purge,
#[cfg(test)]
Dump(tokio::sync::oneshot::Sender<FailureDetectorContainer>),
}
pub(crate) struct FailureDetectRunner {
election: Option<ElectionRef>,
heartbeat_tx: Sender<DatanodeHeartbeat>,
heartbeat_rx: Option<Receiver<DatanodeHeartbeat>>,
control_tx: Sender<FailureDetectControl>,
control_rx: Option<Receiver<FailureDetectControl>>,
receiver_handle: Option<JoinHandle<()>>,
runner_handle: Option<JoinHandle<()>>,
}
impl FailureDetectRunner {
pub(crate) fn new(election: Option<ElectionRef>) -> Self {
let (heartbeat_tx, heartbeat_rx) = mpsc::channel::<DatanodeHeartbeat>(1024);
let (control_tx, control_rx) = mpsc::channel::<FailureDetectControl>(1024);
Self {
election,
heartbeat_tx,
heartbeat_rx: Some(heartbeat_rx),
control_tx,
control_rx: Some(control_rx),
receiver_handle: None,
runner_handle: None,
}
}
pub(crate) async fn send_heartbeat(&self, heartbeat: DatanodeHeartbeat) {
if let Err(e) = self.heartbeat_tx.send(heartbeat).await {
error!("FailureDetectRunner is stop receiving heartbeats: {}", e)
}
}
pub(crate) async fn send_control(&self, control: FailureDetectControl) {
if let Err(e) = self.control_tx.send(control).await {
error!("FailureDetectRunner is stop receiving controls: {}", e)
}
}
pub(crate) async fn start(&mut self) {
let failure_detectors = Arc::new(FailureDetectorContainer(DashMap::new()));
self.start_with(failure_detectors).await
}
async fn start_with(&mut self, failure_detectors: Arc<FailureDetectorContainer>) {
let Some(mut heartbeat_rx) = self.heartbeat_rx.take() else { return };
let Some(mut control_rx) = self.control_rx.take() else { return };
let container = failure_detectors.clone();
let receiver_handle = common_runtime::spawn_bg(async move {
loop {
tokio::select! {
Some(control) = control_rx.recv() => {
match control {
FailureDetectControl::Purge => container.clear(),
#[cfg(test)]
FailureDetectControl::Dump(tx) => {
// Drain any heartbeats that are not handled before dump.
while let Ok(heartbeat) = heartbeat_rx.try_recv() {
for ident in heartbeat.region_idents {
let mut detector = container.get_failure_detector(ident);
detector.heartbeat(heartbeat.heartbeat_time);
}
}
let _ = tx.send(container.dump());
}
}
}
Some(heartbeat) = heartbeat_rx.recv() => {
for ident in heartbeat.region_idents {
let mut detector = container.get_failure_detector(ident);
detector.heartbeat(heartbeat.heartbeat_time);
}
}
}
}
});
self.receiver_handle = Some(receiver_handle);
let election = self.election.clone();
let runner_handle = common_runtime::spawn_bg(async move {
loop {
let start = Instant::now();
let is_leader = election.as_ref().map(|x| x.is_leader()).unwrap_or(true);
if is_leader {
for e in failure_detectors.iter() {
if e.failure_detector().is_available(current_time_millis()) {
// TODO(LFC): TBC
}
}
}
let elapsed = Instant::now().duration_since(start);
if let Some(sleep) = Duration::from_secs(1).checked_sub(elapsed) {
tokio::time::sleep(sleep).await;
} // else the elapsed time is exceeding one second, we should continue working immediately
}
});
self.runner_handle = Some(runner_handle);
}
#[cfg(test)]
fn abort(&mut self) {
let Some(handle) = self.receiver_handle.take() else { return };
handle.abort();
let Some(handle) = self.runner_handle.take() else { return };
handle.abort();
}
#[cfg(test)]
pub(crate) async fn dump(&self) -> FailureDetectorContainer {
let (tx, rx) = tokio::sync::oneshot::channel();
self.send_control(FailureDetectControl::Dump(tx)).await;
rx.await.unwrap()
}
}
pub(crate) struct FailureDetectorEntry<'a> {
e: RefMulti<'a, RegionIdent, PhiAccrualFailureDetector>,
}
impl FailureDetectorEntry<'_> {
fn failure_detector(&self) -> &PhiAccrualFailureDetector {
self.e.value()
}
}
pub(crate) struct FailureDetectorContainer(DashMap<RegionIdent, PhiAccrualFailureDetector>);
impl FailureDetectorContainer {
fn get_failure_detector(
&self,
ident: RegionIdent,
) -> impl DerefMut<Target = PhiAccrualFailureDetector> + '_ {
self.0
.entry(ident)
.or_insert_with(PhiAccrualFailureDetector::default)
}
pub(crate) fn iter(&self) -> Box<dyn Iterator<Item = FailureDetectorEntry> + '_> {
Box::new(self.0.iter().map(move |e| FailureDetectorEntry { e })) as _
}
fn clear(&self) {
self.0.clear()
}
#[cfg(test)]
fn dump(&self) -> FailureDetectorContainer {
let mut m = DashMap::with_capacity(self.0.len());
m.extend(self.0.iter().map(|x| (x.key().clone(), x.value().clone())));
Self(m)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
use super::*;
#[test]
fn test_default_failure_detector_container() {
let container = FailureDetectorContainer(DashMap::new());
let ident = RegionIdent {
catalog: "a".to_string(),
schema: "b".to_string(),
table: "c".to_string(),
region_id: 1,
};
let _ = container.get_failure_detector(ident.clone());
assert!(container.0.contains_key(&ident));
{
let mut iter = container.iter();
assert!(iter.next().is_some());
assert!(iter.next().is_none());
}
container.clear();
assert!(container.0.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_control() {
let container = FailureDetectorContainer(DashMap::new());
let ident = RegionIdent {
catalog: "a".to_string(),
schema: "b".to_string(),
table: "c".to_string(),
region_id: 1,
};
container.get_failure_detector(ident.clone());
let mut runner = FailureDetectRunner::new(None);
runner.start_with(Arc::new(container)).await;
let dump = runner.dump().await;
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 1);
runner.send_control(FailureDetectControl::Purge).await;
let dump = runner.dump().await;
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 0);
runner.abort();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_heartbeat() {
let mut runner = FailureDetectRunner::new(None);
runner.start().await;
// Generate 2000 heartbeats start from now. Heartbeat interval is one second, plus some random millis.
fn generate_heartbeats(node_id: u64, region_ids: Vec<u64>) -> Vec<DatanodeHeartbeat> {
let mut rng = rand::thread_rng();
let start = current_time_millis();
(0..2000)
.map(|i| DatanodeHeartbeat {
cluster_id: 1,
node_id,
region_idents: region_ids
.iter()
.map(|&region_id| RegionIdent {
catalog: "a".to_string(),
schema: "b".to_string(),
table: "c".to_string(),
region_id,
})
.collect(),
heartbeat_time: start + i * 1000 + rng.gen_range(0..100),
})
.collect::<Vec<_>>()
}
let heartbeats = generate_heartbeats(100, vec![1, 2, 3]);
let last_heartbeat_time = heartbeats.last().unwrap().heartbeat_time;
for heartbeat in heartbeats {
runner.send_heartbeat(heartbeat).await;
}
let dump = runner.dump().await;
let failure_detectors = dump.iter().collect::<Vec<_>>();
assert_eq!(failure_detectors.len(), 3);
failure_detectors.iter().for_each(|e| {
let fd = e.failure_detector();
let acceptable_heartbeat_pause_millis = fd.acceptable_heartbeat_pause_millis() as i64;
let start = last_heartbeat_time;
// Within the "acceptable_heartbeat_pause_millis" period, phi is zero ...
for i in 1..=acceptable_heartbeat_pause_millis / 1000 {
let now = start + i * 1000;
assert_eq!(fd.phi(now), 0.0);
}
// ... then in less than two seconds, phi is above the threshold.
// The same effect can be seen in the diagrams in Akka's document.
let now = start + acceptable_heartbeat_pause_millis + 1000;
assert!(fd.phi(now) < fd.threshold() as _);
let now = start + acceptable_heartbeat_pause_millis + 2000;
assert!(fd.phi(now) > fd.threshold() as _);
});
runner.abort();
}
}

View File

@@ -31,6 +31,7 @@ impl HeartbeatHandler for OnLeaderStartHandler {
) -> Result<()> {
if let Some(election) = &ctx.election {
if election.in_infancy() {
ctx.is_infancy = true;
ctx.reset_in_memory();
}
}

View File

@@ -13,14 +13,20 @@
// limitations under the License.
use api::v1::meta::{HeartbeatRequest, PutRequest};
use dashmap::DashMap;
use crate::error::Result;
use crate::handler::node_stat::Stat;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::keys::StatValue;
use crate::keys::{StatKey, StatValue};
use crate::metasrv::Context;
const MAX_CACHED_STATS_PER_KEY: usize = 10;
#[derive(Default)]
pub struct PersistStatsHandler;
pub struct PersistStatsHandler {
stats_cache: DashMap<StatKey, Vec<Stat>>,
}
#[async_trait::async_trait]
impl HeartbeatHandler for PersistStatsHandler {
@@ -30,18 +36,25 @@ impl HeartbeatHandler for PersistStatsHandler {
ctx: &mut Context,
acc: &mut HeartbeatAccumulator,
) -> Result<()> {
if ctx.is_skip_all() || acc.stats.is_empty() {
if ctx.is_skip_all() {
return Ok(());
}
let stats = &mut acc.stats;
let key = match stats.get(0) {
Some(stat) => stat.stat_key(),
None => return Ok(()),
};
let Some(stat) = acc.stat.take() else { return Ok(()) };
// take stats from &mut acc.stats, avoid clone of vec
let stats = std::mem::take(stats);
let key = stat.stat_key();
let mut entry = self
.stats_cache
.entry(key)
.or_insert_with(|| Vec::with_capacity(MAX_CACHED_STATS_PER_KEY));
let stats = entry.value_mut();
stats.push(stat);
if stats.len() < MAX_CACHED_STATS_PER_KEY {
return Ok(());
}
let stats = stats.drain(..).collect();
let val = StatValue { stats };
@@ -65,7 +78,6 @@ mod tests {
use api::v1::meta::RangeRequest;
use super::*;
use crate::handler::node_stat::Stat;
use crate::keys::StatKey;
use crate::service::store::memory::MemStore;
@@ -83,24 +95,23 @@ mod tests {
catalog: None,
schema: None,
table: None,
is_infancy: false,
};
let req = HeartbeatRequest::default();
let mut acc = HeartbeatAccumulator {
stats: vec![Stat {
cluster_id: 3,
id: 101,
region_num: Some(100),
let handler = PersistStatsHandler::default();
for i in 1..=MAX_CACHED_STATS_PER_KEY {
let mut acc = HeartbeatAccumulator {
stat: Some(Stat {
cluster_id: 3,
id: 101,
region_num: Some(i as _),
..Default::default()
}),
..Default::default()
}],
..Default::default()
};
let stats_handler = PersistStatsHandler;
stats_handler
.handle(&req, &mut ctx, &mut acc)
.await
.unwrap();
};
handler.handle(&req, &mut ctx, &mut acc).await.unwrap();
}
let key = StatKey {
cluster_id: 3,
@@ -124,7 +135,7 @@ mod tests {
let val: StatValue = kv.value.clone().try_into().unwrap();
assert_eq!(1, val.stats.len());
assert_eq!(Some(100), val.stats[0].region_num);
assert_eq!(10, val.stats.len());
assert_eq!(Some(1), val.stats[0].region_num);
}
}

View File

@@ -65,6 +65,7 @@ mod tests {
catalog: None,
schema: None,
table: None,
is_infancy: false,
};
let req = HeartbeatRequest {

View File

@@ -178,7 +178,7 @@ pub(crate) fn to_removed_key(key: &str) -> String {
format!("{REMOVED_PREFIX}-{key}")
}
#[derive(Eq, PartialEq, Debug, Clone, Hash)]
#[derive(Eq, PartialEq, Debug, Clone, Hash, Copy)]
pub struct StatKey {
pub cluster_id: u64,
pub node_id: u64,

View File

@@ -17,6 +17,7 @@ pub mod bootstrap;
pub mod cluster;
pub mod election;
pub mod error;
mod failure_detector;
pub mod handler;
pub mod keys;
pub mod lease;

View File

@@ -66,6 +66,7 @@ pub struct Context {
pub catalog: Option<String>,
pub schema: Option<String>,
pub table: Option<String>,
pub is_infancy: bool,
}
impl Context {
@@ -199,6 +200,7 @@ impl MetaSrv {
catalog: None,
schema: None,
table: None,
is_infancy: false,
}
}
}

View File

@@ -18,7 +18,7 @@ use std::sync::Arc;
use crate::cluster::MetaPeerClient;
use crate::handler::{
CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, KeepLeaseHandler,
OnLeaderStartHandler, PersistStatsHandler, ResponseHeaderHandler,
OnLeaderStartHandler, PersistStatsHandler, RegionFailureHandler, ResponseHeaderHandler,
};
use crate::lock::DistLockRef;
use crate::metasrv::{ElectionRef, MetaSrv, MetaSrvOptions, SelectorRef, TABLE_ID_SEQ};
@@ -118,6 +118,9 @@ impl MetaSrvBuilder {
let handler_group = match handler_group {
Some(handler_group) => handler_group,
None => {
let mut region_failure_handler = RegionFailureHandler::new(election.clone());
region_failure_handler.start().await;
let group = HeartbeatHandlerGroup::default();
let keep_lease_handler = KeepLeaseHandler::new(kv_store.clone());
group.add_handler(ResponseHeaderHandler::default()).await;
@@ -127,7 +130,8 @@ impl MetaSrvBuilder {
group.add_handler(keep_lease_handler).await;
group.add_handler(CheckLeaderHandler::default()).await;
group.add_handler(OnLeaderStartHandler::default()).await;
group.add_handler(CollectStatsHandler::default()).await;
group.add_handler(CollectStatsHandler).await;
group.add_handler(region_failure_handler).await;
group.add_handler(PersistStatsHandler::default()).await;
group
}

View File

@@ -523,6 +523,7 @@ async fn test_alter_table_add_column() {
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
assert_eq!(new_schema.version(), old_schema.version() + 1);
assert_eq!(new_meta.next_column_id, old_meta.next_column_id + 2);
assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
}
#[tokio::test]
@@ -572,6 +573,7 @@ async fn test_alter_table_remove_column() {
assert_eq!(&[1, 2], &new_meta.value_indices[..]);
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
assert_eq!(new_schema.version(), old_schema.version() + 1);
assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
}
#[tokio::test]
@@ -793,10 +795,10 @@ async fn test_flush_table_all_regions() {
assert!(!has_parquet_file(&region_dir));
// Trigger flush all region
table.flush(None).await.unwrap();
table.flush(None, None).await.unwrap();
// Trigger again, wait for the previous task finished
table.flush(None).await.unwrap();
table.flush(None, None).await.unwrap();
assert!(has_parquet_file(&region_dir));
}
@@ -832,10 +834,10 @@ async fn test_flush_table_with_region_id() {
};
// Trigger flush all region
table.flush(req.region_number).await.unwrap();
table.flush(req.region_number, Some(false)).await.unwrap();
// Trigger again, wait for the previous task finished
table.flush(req.region_number).await.unwrap();
table.flush(req.region_number, Some(true)).await.unwrap();
assert!(has_parquet_file(&region_dir));
}

View File

@@ -35,8 +35,8 @@ use object_store::ObjectStore;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
use store_api::storage::{
AddColumn, AlterOperation, AlterRequest, ChunkReader, ReadContext, Region, RegionMeta,
RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
AddColumn, AlterOperation, AlterRequest, ChunkReader, FlushContext, ReadContext, Region,
RegionMeta, RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
};
use table::error as table_error;
use table::error::{RegionSchemaMismatchSnafu, Result as TableResult, TableOperationSnafu};
@@ -47,7 +47,7 @@ use table::requests::{
AddColumnRequest, AlterKind, AlterTableRequest, DeleteRequest, InsertRequest,
};
use table::table::scan::SimpleTableScan;
use table::table::{AlterContext, Table};
use table::table::{AlterContext, RegionStat, Table};
use tokio::sync::Mutex;
use crate::error;
@@ -323,20 +323,27 @@ impl<R: Region> Table for MitoTable<R> {
Ok(rows_deleted)
}
async fn flush(&self, region_number: Option<RegionNumber>) -> TableResult<()> {
async fn flush(
&self,
region_number: Option<RegionNumber>,
wait: Option<bool>,
) -> TableResult<()> {
let flush_ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
if let Some(region_number) = region_number {
if let Some(region) = self.regions.get(&region_number) {
region
.flush()
.flush(&flush_ctx)
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
}
} else {
futures::future::try_join_all(self.regions.values().map(|region| region.flush()))
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
futures::future::try_join_all(
self.regions.values().map(|region| region.flush(&flush_ctx)),
)
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
}
Ok(())
@@ -350,6 +357,17 @@ impl<R: Region> Table for MitoTable<R> {
Ok(())
}
fn region_stats(&self) -> TableResult<Vec<RegionStat>> {
Ok(self
.regions
.values()
.map(|region| RegionStat {
region_id: region.id(),
disk_usage_bytes: region.disk_usage_bytes(),
})
.collect())
}
}
struct ChunkStream {

View File

@@ -26,9 +26,9 @@ use datatypes::schema::{ColumnSchema, Schema};
use storage::metadata::{RegionMetaImpl, RegionMetadata};
use storage::write_batch::WriteBatch;
use store_api::storage::{
AlterRequest, Chunk, ChunkReader, CreateOptions, EngineContext, GetRequest, GetResponse,
OpenOptions, ReadContext, Region, RegionDescriptor, RegionId, ScanRequest, ScanResponse,
SchemaRef, Snapshot, StorageEngine, WriteContext, WriteResponse,
AlterRequest, Chunk, ChunkReader, CreateOptions, EngineContext, FlushContext, GetRequest,
GetResponse, OpenOptions, ReadContext, Region, RegionDescriptor, RegionId, ScanRequest,
ScanResponse, SchemaRef, Snapshot, StorageEngine, WriteContext, WriteResponse,
};
pub type Result<T> = std::result::Result<T, MockError>;
@@ -201,7 +201,7 @@ impl Region for MockRegion {
0
}
async fn flush(&self) -> Result<()> {
async fn flush(&self, _ctx: &FlushContext) -> Result<()> {
unimplemented!()
}
}

View File

@@ -13,8 +13,11 @@
// limitations under the License.
mod aggr_over_time;
mod changes;
mod deriv;
mod idelta;
mod increase;
mod resets;
#[cfg(test)]
mod test_util;

View File

@@ -0,0 +1,16 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of [`reset`](https://prometheus.io/docs/prometheus/latest/querying/functions/#changes) in PromQL. Refer to the [original
//! implementation](https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L1023-L1040).

View File

@@ -0,0 +1,16 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of [`reset`](https://prometheus.io/docs/prometheus/latest/querying/functions/#deriv) in PromQL. Refer to the [original
//! implementation](https://github.com/prometheus/prometheus/blob/90b2f7a540b8a70d8d81372e6692dcbb67ccbaaa/promql/functions.go#L839-L856).

View File

@@ -0,0 +1,16 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of [`reset`](https://prometheus.io/docs/prometheus/latest/querying/functions/#resets) in PromQL. Refer to the [original
//! implementation](https://github.com/prometheus/prometheus/blob/90b2f7a540b8a70d8d81372e6692dcbb67ccbaaa/promql/functions.go#L1004-L1021).

View File

@@ -957,7 +957,12 @@ impl PromPlanner {
.tag_columns
.iter()
.chain(self.ctx.time_index_column.iter())
.map(|col| Ok(DfExpr::Column(Column::from(col))));
.map(|col| {
Ok(DfExpr::Column(Column::new(
self.ctx.table_name.clone(),
col,
)))
});
// build computation exprs
let result_value_columns = self
@@ -1485,7 +1490,7 @@ mod test {
.unwrap();
let expected = String::from(
"Projection: lhs.tag_0, lhs.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\
"Projection: some_metric.tag_0, some_metric.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\
\n Inner Join: lhs.tag_0 = some_metric.tag_0, lhs.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
\n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
@@ -1592,7 +1597,6 @@ mod test {
}
#[tokio::test]
#[ignore = "wait for https://github.com/apache/arrow-datafusion/issues/5513"]
async fn increase_aggr() {
let query = "increase(some_metric[5m])";
let expected = String::from(
@@ -1626,7 +1630,6 @@ mod test {
}
#[tokio::test]
#[ignore = "wait for https://github.com/apache/arrow-datafusion/issues/5513"]
async fn count_over_time() {
let query = "count_over_time(some_metric[5m])";
let expected = String::from(

View File

@@ -46,7 +46,7 @@ format_num = "0.1"
num = "0.4"
num-traits = "0.2"
paste = "1.0"
rand = "0.8"
rand.workspace = true
statrs = "0.16"
stats-cli = "3.0"
streaming-stats = "0.2"

View File

@@ -18,6 +18,7 @@ mod catalog_adapter;
mod error;
mod planner;
use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
@@ -33,12 +34,23 @@ use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream};
use common_telemetry::timer;
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
use datafusion::physical_plan::ExecutionPlan;
use datafusion_common::ResolvedTableReference;
use datafusion_expr::{DmlStatement, LogicalPlan as DfLogicalPlan, WriteOp};
use datatypes::prelude::VectorRef;
use datatypes::schema::Schema;
use snafu::{OptionExt, ResultExt};
use futures_util::StreamExt;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use table::requests::{DeleteRequest, InsertRequest};
use table::TableRef;
pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
pub use crate::datafusion::planner::DfContextProviderAdapter;
use crate::error::{DataFusionSnafu, QueryExecutionSnafu, Result};
use crate::error::{
CatalogNotFoundSnafu, CatalogSnafu, CreateRecordBatchSnafu, DataFusionSnafu,
MissingTimestampColumnSnafu, QueryExecutionSnafu, Result, SchemaNotFoundSnafu,
TableNotFoundSnafu, UnsupportedExprSnafu,
};
use crate::executor::QueryExecutor;
use crate::logical_optimizer::LogicalOptimizer;
use crate::physical_optimizer::PhysicalOptimizer;
@@ -56,6 +68,145 @@ impl DatafusionQueryEngine {
pub fn new(state: Arc<QueryEngineState>) -> Self {
Self { state }
}
async fn exec_query_plan(&self, plan: LogicalPlan) -> Result<Output> {
let mut ctx = QueryEngineContext::new(self.state.session_state());
// `create_physical_plan` will optimize logical plan internally
let physical_plan = self.create_physical_plan(&mut ctx, &plan).await?;
let physical_plan = self.optimize_physical_plan(&mut ctx, physical_plan)?;
Ok(Output::Stream(self.execute_stream(&ctx, &physical_plan)?))
}
async fn exec_dml_statement(
&self,
dml: DmlStatement,
query_ctx: QueryContextRef,
) -> Result<Output> {
ensure!(
matches!(dml.op, WriteOp::Insert | WriteOp::Delete),
UnsupportedExprSnafu {
name: format!("DML op {}", dml.op),
}
);
let default_catalog = query_ctx.current_catalog();
let default_schema = query_ctx.current_schema();
let table_name = dml
.table_name
.as_table_reference()
.resolve(&default_catalog, &default_schema);
let table = self.find_table(&table_name).await?;
let output = self
.exec_query_plan(LogicalPlan::DfPlan((*dml.input).clone()))
.await?;
let mut stream = match output {
Output::RecordBatches(batches) => batches.as_stream(),
Output::Stream(stream) => stream,
_ => unreachable!(),
};
let mut affected_rows = 0;
while let Some(batch) = stream.next().await {
let batch = batch.context(CreateRecordBatchSnafu)?;
let column_vectors = batch
.column_vectors(&table_name.to_string(), table.schema())
.map_err(BoxedError::new)
.context(QueryExecutionSnafu)?;
let rows = match dml.op {
WriteOp::Insert => Self::insert(&table_name, &table, column_vectors).await?,
WriteOp::Delete => Self::delete(&table_name, &table, column_vectors).await?,
_ => unreachable!("guarded by the 'ensure!' at the beginning"),
};
affected_rows += rows;
}
Ok(Output::AffectedRows(affected_rows))
}
async fn delete<'a>(
table_name: &ResolvedTableReference<'a>,
table: &TableRef,
column_vectors: HashMap<String, VectorRef>,
) -> Result<usize> {
let table_schema = table.schema();
let ts_column = table_schema
.timestamp_column()
.map(|x| &x.name)
.with_context(|| MissingTimestampColumnSnafu {
table_name: table_name.to_string(),
})?;
let table_info = table.table_info();
let rowkey_columns = table_info
.meta
.row_key_column_names()
.collect::<Vec<&String>>();
let column_vectors = column_vectors
.into_iter()
.filter(|x| &x.0 == ts_column || rowkey_columns.contains(&&x.0))
.collect::<HashMap<_, _>>();
let request = DeleteRequest {
key_column_values: column_vectors,
};
table
.delete(request)
.await
.map_err(BoxedError::new)
.context(QueryExecutionSnafu)
}
async fn insert<'a>(
table_name: &ResolvedTableReference<'a>,
table: &TableRef,
column_vectors: HashMap<String, VectorRef>,
) -> Result<usize> {
let request = InsertRequest {
catalog_name: table_name.catalog.to_string(),
schema_name: table_name.schema.to_string(),
table_name: table_name.table.to_string(),
columns_values: column_vectors,
region_number: 0,
};
table
.insert(request)
.await
.map_err(BoxedError::new)
.context(QueryExecutionSnafu)
}
async fn find_table(&self, table_name: &ResolvedTableReference<'_>) -> Result<TableRef> {
let catalog_name = table_name.catalog.as_ref();
let schema_name = table_name.schema.as_ref();
let table_name = table_name.table.as_ref();
let catalog = self
.state
.catalog_list()
.catalog(catalog_name)
.context(CatalogSnafu)?
.context(CatalogNotFoundSnafu {
catalog: catalog_name,
})?;
let schema =
catalog
.schema(schema_name)
.context(CatalogSnafu)?
.context(SchemaNotFoundSnafu {
schema: schema_name,
})?;
let table = schema
.table(table_name)
.await
.context(CatalogSnafu)?
.context(TableNotFoundSnafu { table: table_name })?;
Ok(table)
}
}
#[async_trait]
@@ -75,14 +226,13 @@ impl QueryEngine for DatafusionQueryEngine {
optimised_plan.schema()
}
async fn execute(&self, plan: &LogicalPlan) -> Result<Output> {
let logical_plan = self.optimize(plan)?;
let mut ctx = QueryEngineContext::new(self.state.session_state());
let physical_plan = self.create_physical_plan(&mut ctx, &logical_plan).await?;
let physical_plan = self.optimize_physical_plan(&mut ctx, physical_plan)?;
Ok(Output::Stream(self.execute_stream(&ctx, &physical_plan)?))
async fn execute(&self, plan: LogicalPlan, query_ctx: QueryContextRef) -> Result<Output> {
match plan {
LogicalPlan::DfPlan(DfLogicalPlan::Dml(dml)) => {
self.exec_dml_statement(dml, query_ctx).await
}
_ => self.exec_query_plan(plan).await,
}
}
fn register_udf(&self, udf: ScalarUdf) {
@@ -292,11 +442,11 @@ mod tests {
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
let plan = engine
.planner()
.plan(stmt, Arc::new(QueryContext::new()))
.plan(stmt, QueryContext::arc())
.await
.unwrap();
let output = engine.execute(&plan).await.unwrap();
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
match output {
Output::Stream(recordbatch) => {

Some files were not shown because too many files have changed in this diff Show More