mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
95 Commits
v0.1.1-alp
...
v0.2.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48c2841e4d | ||
|
|
d2542552d3 | ||
|
|
c0132e6cc0 | ||
|
|
aea932b891 | ||
|
|
0253136333 | ||
|
|
6a05f617a4 | ||
|
|
a2b262ebc0 | ||
|
|
972f64c3d7 | ||
|
|
eb77f9aafd | ||
|
|
dee20144d7 | ||
|
|
563adbabe9 | ||
|
|
b71bb4e5fa | ||
|
|
fae293310c | ||
|
|
3e51640442 | ||
|
|
b40193d7da | ||
|
|
b5e5f8e555 | ||
|
|
192fa0caa5 | ||
|
|
30eb676d6a | ||
|
|
d7cadf6e6d | ||
|
|
d7a1435517 | ||
|
|
0943079de2 | ||
|
|
509d07b798 | ||
|
|
e72ce5eaa9 | ||
|
|
f491a040f5 | ||
|
|
47179a7812 | ||
|
|
995a28a27d | ||
|
|
ed1cb73ffc | ||
|
|
0ffa628c22 | ||
|
|
5edd2a3dbe | ||
|
|
e63b28bff1 | ||
|
|
8140d4e3e5 | ||
|
|
6825459c75 | ||
|
|
7eb4d81929 | ||
|
|
8ba0741c81 | ||
|
|
0eeb5b460c | ||
|
|
65ea6fd85f | ||
|
|
4f15b26b28 | ||
|
|
15ee4ac729 | ||
|
|
b4fc8c5b78 | ||
|
|
6f81717866 | ||
|
|
77f9383daf | ||
|
|
c788b7fc26 | ||
|
|
0f160a73be | ||
|
|
92963b9614 | ||
|
|
f1139fba59 | ||
|
|
4e552245b1 | ||
|
|
3126bbc1c7 | ||
|
|
b77b561bc8 | ||
|
|
501faad8ab | ||
|
|
5397a9bbe6 | ||
|
|
f351ee7042 | ||
|
|
e0493e0b8f | ||
|
|
b2a09c888a | ||
|
|
af101480b3 | ||
|
|
b8f7f603cf | ||
|
|
8fb97ea1d8 | ||
|
|
21ce9c1163 | ||
|
|
0a22375ac1 | ||
|
|
0596d20a3b | ||
|
|
e19c8fa2b6 | ||
|
|
ad886f5b3e | ||
|
|
f6669a8201 | ||
|
|
ad5c47185d | ||
|
|
64441616db | ||
|
|
09491d6aee | ||
|
|
7cfa30b2ab | ||
|
|
a7676d8860 | ||
|
|
62e2a60b7b | ||
|
|
128c5cabe1 | ||
|
|
9a001d3392 | ||
|
|
facdda4d9f | ||
|
|
17eb99bc52 | ||
|
|
cd8be77968 | ||
|
|
b530ac9e60 | ||
|
|
76f1a79f1b | ||
|
|
4705245d60 | ||
|
|
f712f978cf | ||
|
|
cbf64e65b9 | ||
|
|
242ce5c2aa | ||
|
|
e8d2e82335 | ||
|
|
0086cc2d3d | ||
|
|
cdc111b607 | ||
|
|
81ca1d8399 | ||
|
|
8d3999df5f | ||
|
|
a60788e92e | ||
|
|
296c6dfcbf | ||
|
|
604c20a83d | ||
|
|
c7f114c8fa | ||
|
|
8a83de4ea5 | ||
|
|
3377930a50 | ||
|
|
85dd7e4f24 | ||
|
|
f790fa05c1 | ||
|
|
dfd91a1bf8 | ||
|
|
ded31fb069 | ||
|
|
6a574fc52b |
@@ -2,7 +2,7 @@
|
||||
GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
|
||||
GT_S3_ENDPOINT_URL=S3 endpoint url
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
|
||||
267
.github/workflows/release.yml
vendored
267
.github/workflows/release.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# Mannually trigger only builds binaries.
|
||||
workflow_dispatch:
|
||||
|
||||
name: Release
|
||||
@@ -18,6 +19,9 @@ env:
|
||||
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
## FIXME(zyy17): Enable it after the tests are stabled.
|
||||
DISABLE_RUN_TESTS: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build binary
|
||||
@@ -29,18 +33,42 @@ jobs:
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: true
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: true
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: true
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
@@ -96,7 +124,14 @@ jobs:
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
|
||||
- name: Compile Python 3.10.10 from source for linux
|
||||
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -108,10 +143,55 @@ jobs:
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for amd64-linux
|
||||
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "version=3.10" >> pyo3.config
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "shared=true" >> pyo3.config
|
||||
echo "abi3=true" >> pyo3.config
|
||||
echo "lib_name=python3.10" >> pyo3.config
|
||||
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
|
||||
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
|
||||
echo "pointer_width=64" >> pyo3.config
|
||||
echo "build_flags=" >> pyo3.config
|
||||
echo "suppress_build_script_link_lines=false" >> pyo3.config
|
||||
|
||||
cat pyo3.config
|
||||
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
@@ -132,11 +212,98 @@ jobs:
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
release:
|
||||
name: Release artifacts
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64-pyo3
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
|
||||
cp -r amd64 docker/ci
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64-pyo3
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
|
||||
cp -r arm64 docker/ci
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
release:
|
||||
name: Release artifacts
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build,docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -175,15 +342,23 @@ jobs:
|
||||
files: |
|
||||
**/greptime-*
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
docker-push-uhub:
|
||||
name: Push docker image to UCloud Container Registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -191,12 +366,6 @@ jobs:
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
@@ -212,63 +381,9 @@ jobs:
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
docker buildx imagetools create \
|
||||
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
|
||||
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -35,3 +35,7 @@ benchmarks/data
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# dashboard files
|
||||
!/src/servers/dashboard/VERSION
|
||||
/src/servers/dashboard/*
|
||||
|
||||
1180
Cargo.lock
generated
1180
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
28
Cargo.toml
28
Cargo.toml
@@ -7,6 +7,7 @@ members = [
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
@@ -45,33 +46,34 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "33.0" }
|
||||
arrow-array = "33.0"
|
||||
arrow-flight = "33.0"
|
||||
arrow-schema = { version = "33.0", features = ["serde"] }
|
||||
arrow = { version = "36.0" }
|
||||
arrow-array = "36.0"
|
||||
arrow-flight = "36.0"
|
||||
arrow-schema = { version = "36.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "33.0"
|
||||
parquet = "36.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.30"
|
||||
sqlparser = "0.32"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
|
||||
19
README.md
19
README.md
@@ -1,8 +1,8 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
@@ -61,12 +61,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
|
||||
in cpython): this install a Python shared library required for running python
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default.
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -147,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
downloadable pre-built binaries for Linux and MacOS
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
|
||||
Docker images
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
@@ -158,6 +158,7 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
19
SECURITY.md
Normal file
19
SECURITY.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| >= v0.1.0 | :white_check_mark: |
|
||||
| < v0.1.0 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We place great importance on the security of GreptimeDB code, software,
|
||||
and cloud platform. If you come across a security vulnerability in GreptimeDB,
|
||||
we kindly request that you inform us immediately. We will thoroughly investigate
|
||||
all valid reports and make every effort to resolve the issue promptly.
|
||||
|
||||
To report any issues or vulnerabilities, please email us at info@greptime.com, rather than
|
||||
posting publicly on GitHub. Be sure to provide us with the version identifier as well as details
|
||||
on how the vulnerability can be exploited.
|
||||
@@ -21,12 +21,12 @@ use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -61,7 +61,7 @@ struct Args {
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
@@ -97,6 +97,9 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
@@ -122,11 +125,17 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let values = build_values(array);
|
||||
let (values, datatype) = build_values(array);
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().to_owned(),
|
||||
values: Some(values),
|
||||
null_mask: vec![],
|
||||
null_mask: array
|
||||
.data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
};
|
||||
@@ -136,7 +145,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
@@ -144,10 +153,13 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
@@ -155,29 +167,38 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
ts_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
@@ -213,6 +234,10 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
@@ -228,13 +253,13 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
datatype: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
datatype: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
},
|
||||
@@ -340,7 +365,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,10 +10,6 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# MySQL server address, "127.0.0.1:4406" by default.
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
# The number of MySQL server worker threads, 2 by default.
|
||||
mysql_runtime_size = 2
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
@@ -41,12 +37,22 @@ type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[compaction]
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '30s'
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
# [procedure.store]
|
||||
# type = 'File'
|
||||
# data_dir = '/tmp/greptimedb/procedure/'
|
||||
# type = "File"
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# max_retry_times = 3
|
||||
# retry_delay = "500ms"
|
||||
|
||||
@@ -99,7 +99,7 @@ type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options.
|
||||
[compaction]
|
||||
[storage.compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
@@ -107,6 +107,15 @@ max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '30s'
|
||||
|
||||
|
||||
# Procedure storage options.
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
@@ -114,3 +123,7 @@ max_purge_tasks = 32
|
||||
# type = "File"
|
||||
# # Procedure data path.
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# # Procedure max retry time.
|
||||
# max_retry_times = 3
|
||||
# # Initial retry delay of procedures, increases exponentially
|
||||
# retry_delay = "500ms"
|
||||
|
||||
57
docker/aarch64/Dockerfile
Normal file
57
docker/aarch64/Dockerfile
Normal file
@@ -0,0 +1,57 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install cross platform toolchain
|
||||
RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
|
||||
COPY ./rust-toolchain.toml .
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
COPY . .
|
||||
# Update dependency, using separate `RUN` to separate cache
|
||||
RUN cargo fetch
|
||||
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||
# Build the project in release mode.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
87
docker/aarch64/compile-python.sh
Normal file
87
docker/aarch64/compile-python.sh
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
@@ -1,6 +1,14 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
COPY requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
5
docker/ci/requirements.txt
Normal file
5
docker/ci/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
numpy>=1.24.2
|
||||
pandas>=1.5.3
|
||||
pyarrow>=11.0.0
|
||||
requests>=2.28.2
|
||||
scipy>=1.10.1
|
||||
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
Feature Name: "Fault Tolerance for Region"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
|
||||
Date: 2023-03-08
|
||||
Author: "Luo Fucong <luofucong@greptime.com>"
|
||||
---
|
||||
|
||||
Fault Tolerance for Region
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
|
||||
|
||||
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
|
||||
|
||||
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
|
||||
|
||||
# Motivation
|
||||
|
||||
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
|
||||
|
||||
# Details
|
||||
|
||||
## Background
|
||||
|
||||
Some backgrounds about region in distributed mode:
|
||||
|
||||
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
|
||||
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
|
||||
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
|
||||
|
||||
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
|
||||
|
||||
## Failure Detection
|
||||
|
||||
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
|
||||
|
||||
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
|
||||
|
||||
```protobuf
|
||||
message HeartbeatRequest {
|
||||
...
|
||||
// Region stats on this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
...
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
|
||||
|
||||
> The heartbeat interval is now hardcoded to five seconds.
|
||||
|
||||
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
|
||||
|
||||
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
|
||||
|
||||
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
|
||||
|
||||
To conclude, the failure detection pseudo-codes are like this:
|
||||
|
||||
```rust
|
||||
// in Metasrv:
|
||||
fn failure_detection() {
|
||||
loop {
|
||||
// passive detection
|
||||
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
|
||||
|
||||
// find the datanodes that contains the failed regions
|
||||
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
|
||||
|
||||
// active detection
|
||||
for (datanode, regions) in datanodes_and_regions {
|
||||
if !datanode.is_healthy(regions) {
|
||||
do_failover(datanode, regions);
|
||||
}
|
||||
}
|
||||
|
||||
sleep(config.detect_interval);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Some design considerations:
|
||||
|
||||
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
|
||||
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
|
||||
|
||||
So we detect some regions are not available. How to regain the availability back?
|
||||
|
||||
## Region Failover
|
||||
|
||||
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
|
||||
|
||||
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
|
||||
|
||||
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
|
||||
|
||||
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
|
||||
|
||||
```protobuf
|
||||
message Region {
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
message State {
|
||||
Active,
|
||||
Passive,
|
||||
}
|
||||
State state = 4;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
```
|
||||
|
||||
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
|
||||
|
||||
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
|
||||
|
||||
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
|
||||
|
||||
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
|
||||
|
||||
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
|
||||
|
||||
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
|
||||
|
||||
A picture is worth a 1000 words:
|
||||
|
||||
```text
|
||||
+-------------------------+
|
||||
| Metasrv detects region |
|
||||
| failure |
|
||||
+-------------------------+
|
||||
|
|
||||
v
|
||||
+----------------------------+
|
||||
| Metasrv chooses candidates |
|
||||
| to hold failed regions |
|
||||
+----------------------------+
|
||||
|
|
||||
v
|
||||
+-------------------------+ +-------------------------+
|
||||
| Metasrv "passive" the |------>| Frontend rejects writes |
|
||||
| failed regions | | to "passive" regions |
|
||||
+-------------------------+ +-------------------------+
|
||||
|
|
||||
v
|
||||
+--------------------------+ +---------------------------+
|
||||
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
|
||||
| regions from remote WAL | | "open" region requests |
|
||||
+--------------------------+ +---------------------------+
|
||||
|
|
||||
|
|
||||
| +-------------------------+ +-------------------------+
|
||||
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
|
||||
| failed regions | | restriction to regions |
|
||||
+-------------------------+ +-------------------------+
|
||||
|
|
||||
v
|
||||
+-------------------------+
|
||||
| Region failover done, |
|
||||
| HA regain |
|
||||
+-------------------------+
|
||||
```
|
||||
|
||||
# Alternatives
|
||||
|
||||
## The "Neon" Way
|
||||
|
||||
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
|
||||
|
||||
However, the main concerns we don't adopt this method are two-fold:
|
||||
|
||||
1. Remote WAL is planned to be quorum based, it can be efficiently written;
|
||||
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
|
||||
|
||||
## No WAL, Replication instead
|
||||
|
||||
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
|
||||
|
||||
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
|
||||
|
||||
# Future Work
|
||||
|
||||
Some optimizations we could take:
|
||||
|
||||
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
|
||||
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.
|
||||
39
scripts/fetch-dashboard-assets.sh
Executable file
39
scripts/fetch-dashboard-assets.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
FreeBSD | Darwin)
|
||||
echo "$(cat sha256.txt)" | shasum --algorithm 256 --check \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
Linux)
|
||||
echo "$(cat sha256.txt)" | sha256sum --check -- \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
*)
|
||||
echo "The '$(uname -s)' operating system is not supported as a build host for the dashboard" >&2
|
||||
exit 1
|
||||
esac
|
||||
|
||||
# Extract the assets and clean up.
|
||||
tar -xzf build.tar.gz -C "$STATIC_DIR"
|
||||
rm sha256.txt
|
||||
rm build.tar.gz
|
||||
|
||||
echo "Successfully download dashboard assets to $STATIC_DIR"
|
||||
@@ -59,5 +59,5 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
fi
|
||||
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ad0187295035e83f76272da553453e649b7570de" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d3861c34f7920238869d0d4e50dc1e6b189d2a6b" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -204,6 +204,27 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
source
|
||||
))]
|
||||
RegionStats {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system table definition: {err_msg}"))]
|
||||
InvalidSystemTableDef {
|
||||
err_msg: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -216,7 +237,8 @@ impl ErrorExt for Error {
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::InvalidSystemTableDef { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
@@ -238,7 +260,8 @@ impl ErrorExt for Error {
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. } => source.status_code(),
|
||||
| Error::DeregisterTable { source, .. }
|
||||
| Error::RegionStats { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
|
||||
@@ -18,8 +18,9 @@ use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -225,39 +226,52 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The number of regions in the datanode node.
|
||||
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
/// The stat of regions in the datanode node.
|
||||
/// The number of regions can be got from len of vec.
|
||||
///
|
||||
/// Ignores any errors occurred during iterating regions. The intention of this method is to
|
||||
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
|
||||
/// "try our best" job.
|
||||
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
let Ok(schema_names) = catalog.schema_names() else { continue };
|
||||
for schema_name in schema_names {
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table =
|
||||
schema
|
||||
.table(&table_name)
|
||||
.await?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
let Ok(table_names) = schema.table_names() else { continue };
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
region_stats.extend(stats);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get region status, err: {:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_number)
|
||||
(region_number, region_stats)
|
||||
}
|
||||
|
||||
@@ -400,7 +400,7 @@ mod tests {
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use object_store::ObjectStore;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
@@ -482,11 +482,9 @@ mod tests {
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = object_store::services::Fs::default();
|
||||
builder.root(&store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
@@ -87,9 +87,8 @@ impl DfTableSourceProvider {
|
||||
|
||||
pub async fn resolve_table(
|
||||
&mut self,
|
||||
table_ref: OwnedTableReference,
|
||||
table_ref: TableReference<'_>,
|
||||
) -> Result<Arc<dyn TableSource>> {
|
||||
let table_ref = table_ref.as_table_reference();
|
||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||
|
||||
let resolved_name = table_ref.to_string();
|
||||
|
||||
@@ -221,4 +221,8 @@ impl TableEngine for MockTableEngine {
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,13 +16,14 @@ common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
@@ -23,6 +26,10 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
@@ -118,7 +125,7 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
@@ -131,11 +138,30 @@ impl Client {
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let mut client = HealthCheckClient::new(channel);
|
||||
client.health_check(HealthCheckRequest {}).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,46 +12,67 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
||||
InsertRequest, QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_telemetry::logging;
|
||||
use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
// The dbname follows naming rule as out mysql, postgres and http
|
||||
// protocol. The server treat dbname in priority of catalog/schema.
|
||||
dbname: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
/// Create database service client using catalog and schema
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create database service client using dbname.
|
||||
///
|
||||
/// This API is designed for external usage. `dbname` is:
|
||||
///
|
||||
/// - the name of database when using GreptimeDB standalone or cluster
|
||||
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
dbname: dbname.into(),
|
||||
client,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,14 +92,41 @@ impl Database {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn dbname(&self) -> &String {
|
||||
&self.dbname
|
||||
}
|
||||
|
||||
pub fn set_dbname(&mut self, dbname: impl Into<String>) {
|
||||
self.dbname = dbname.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
@@ -95,6 +143,24 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn prom_range_query(
|
||||
&self,
|
||||
promql: &str,
|
||||
start: &str,
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
@@ -116,12 +182,20 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
@@ -129,7 +203,7 @@ impl Database {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
@@ -138,17 +212,26 @@ impl Database {
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
})
|
||||
.unwrap_err()
|
||||
})?;
|
||||
|
||||
@@ -175,12 +258,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -26,12 +27,7 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
addr,
|
||||
tonic_code,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
|
||||
FlightGet {
|
||||
addr: String,
|
||||
tonic_code: Code,
|
||||
@@ -70,9 +66,12 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
/// Error deserialized from gRPC metadata
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -82,13 +81,15 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ExternalError { code, .. } => *code,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,3 +101,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![doc = include_str!("../../../../README.md")]
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
@@ -30,9 +32,39 @@ struct Command {
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
Datanode(datanode::Instance),
|
||||
Frontend(frontend::Instance),
|
||||
Metasrv(metasrv::Instance),
|
||||
Standalone(standalone::Instance),
|
||||
Cli(cli::Instance),
|
||||
}
|
||||
|
||||
impl Application {
|
||||
async fn run(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.run().await,
|
||||
Application::Frontend(instance) => instance.run().await,
|
||||
Application::Metasrv(instance) => instance.run().await,
|
||||
Application::Standalone(instance) => instance.run().await,
|
||||
Application::Cli(instance) => instance.run().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.stop().await,
|
||||
Application::Frontend(instance) => instance.stop().await,
|
||||
Application::Metasrv(instance) => instance.stop().await,
|
||||
Application::Standalone(instance) => instance.stop().await,
|
||||
Application::Cli(instance) => instance.stop().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
async fn build(self) -> Result<Application> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,13 +83,28 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Application> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.run().await,
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Cli(cmd) => cmd.run().await,
|
||||
SubCommand::Datanode(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
SubCommand::Metasrv(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
SubCommand::Standalone(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
SubCommand::Cli(cmd) => {
|
||||
let app = cmd.build().await?;
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,13 +151,18 @@ async fn main() -> Result<()> {
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, log_dir, log_level, false);
|
||||
|
||||
let mut app = cmd.build().await?;
|
||||
|
||||
tokio::select! {
|
||||
result = cmd.run() => {
|
||||
result = app.run() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,10 +17,24 @@ mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use repl::Repl;
|
||||
pub use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub struct Instance {
|
||||
repl: Repl,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.repl.run().await
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +42,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.cmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.cmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +53,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.run().await,
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,8 +71,8 @@ pub(crate) struct AttachCommand {
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let mut repl = Repl::try_new(&self).await?;
|
||||
repl.run().await
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance { repl })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::QueryEngineState;
|
||||
use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
@@ -49,7 +50,7 @@ use crate::error::{
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub(crate) struct Repl {
|
||||
pub struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
@@ -166,12 +167,16 @@ impl Repl {
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
let LogicalPlan::DfPlan(plan) = query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
|
||||
let plan = query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.and_then(|x| query_engine.optimize(&x))
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let LogicalPlan::DfPlan(plan) =
|
||||
query_engine.optimize(&plan).context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
@@ -262,6 +267,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
));
|
||||
let state = Arc::new(QueryEngineState::new(catalog_list, Default::default()));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(catalog_list, Default::default()))
|
||||
Ok(DatafusionQueryEngine::new(state))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{
|
||||
@@ -21,9 +23,26 @@ use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -31,8 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,9 +61,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,22 +88,23 @@ struct StartCommand {
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
|
||||
let opts: DatanodeOptions = self.try_into()?;
|
||||
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
Datanode::new(opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?
|
||||
.start()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)
|
||||
let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,16 +152,21 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
opts.http_opts.addr = http_addr
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -153,8 +178,9 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -190,10 +216,14 @@ mod tests {
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -224,9 +254,9 @@ mod tests {
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
match options.storage {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
|
||||
match &options.storage.store {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir, .. }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/", data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
@@ -234,11 +264,19 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_inflight_tasks: 3,
|
||||
max_files_in_level0: 7,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
},
|
||||
options.compaction
|
||||
options.storage.compaction,
|
||||
);
|
||||
assert_eq!(
|
||||
RegionManifestConfig {
|
||||
checkpoint_margin: Some(9),
|
||||
gc_duration: Some(Duration::from_secs(7)),
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -26,18 +26,42 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
StartMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
|
||||
ReadConfig {
|
||||
path: String,
|
||||
@@ -137,7 +161,11 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
|
||||
@@ -16,10 +16,10 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::Instance;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
@@ -34,6 +34,26 @@ use snafu::ResultExt;
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownFrontendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -41,8 +61,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,9 +72,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -90,16 +110,20 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let opts: FrontendOptions = self.try_into()?;
|
||||
|
||||
let instance = Instance::try_new_distributed(&opts, plugins.clone())
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
let mut frontend = Frontend::new(opts, instance, plugins);
|
||||
frontend.start().await.context(error::StartFrontendSnafu)
|
||||
instance
|
||||
.build_servers(&opts, plugins)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,15 +12,37 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap;
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.instance
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownMetaServerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -28,8 +50,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +61,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -60,19 +82,24 @@ struct StartCommand {
|
||||
selector: Option<String>,
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
|
||||
let opts: MetaSrvOptions = self.try_into()?;
|
||||
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
bootstrap::bootstrap_meta_srv(opts)
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +134,13 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
opts.http_opts.addr = http_addr;
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
@@ -129,6 +163,8 @@ mod tests {
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
@@ -157,6 +193,8 @@ mod tests {
|
||||
selector: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
|
||||
@@ -17,14 +17,12 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::Instance as FeInstance;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
@@ -36,7 +34,10 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -47,8 +48,8 @@ pub struct Command {
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.subcmd.run().await
|
||||
pub async fn build(self) -> Result<Instance> {
|
||||
self.subcmd.build().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,9 +59,9 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.run().await,
|
||||
SubCommand::Start(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -79,8 +80,7 @@ pub struct StandaloneOptions {
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
@@ -98,8 +98,7 @@ impl Default for StandaloneOptions {
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
compaction: CompactionConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
@@ -126,13 +125,46 @@ impl StandaloneOptions {
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
compaction: self.compaction,
|
||||
procedure: self.procedure,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode
|
||||
.start_instance()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown_instance()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
@@ -164,7 +196,7 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = self.config_file.clone();
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
@@ -184,33 +216,31 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let mut datanode = Datanode::new(dn_opts.clone())
|
||||
let datanode = Datanode::new(dn_opts.clone())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
|
||||
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
datanode
|
||||
.start_instance()
|
||||
let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts, plugins)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
Ok(Instance { datanode, frontend })
|
||||
}
|
||||
}
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
fe_opts: FrontendOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
datanode_instance: InstanceRef,
|
||||
) -> Result<Frontend<FeInstance>> {
|
||||
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
||||
frontend_instance.set_script_handler(datanode_instance);
|
||||
) -> Result<FeInstance> {
|
||||
let mut frontend_instance = FeInstance::try_new_standalone(datanode_instance.clone())
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
frontend_instance.set_plugins(plugins.clone());
|
||||
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
|
||||
Ok(frontend_instance)
|
||||
}
|
||||
|
||||
impl TryFrom<StartCommand> for FrontendOptions {
|
||||
|
||||
@@ -46,6 +46,9 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(LFC): Un-ignore this REPL test.
|
||||
// Ignore this REPL test because some logical plans like create database are not supported yet in Datanode.
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_dir = create_temp_dir("data");
|
||||
|
||||
@@ -53,6 +53,10 @@ impl ReadableSize {
|
||||
pub const fn as_mb(self) -> u64 {
|
||||
self.0 / MIB
|
||||
}
|
||||
|
||||
pub const fn as_bytes(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<u64> for ReadableSize {
|
||||
|
||||
13
src/common/datasource/Cargo.toml
Normal file
13
src/common/datasource/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "common-datasource"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
url = "2.3"
|
||||
75
src/common/datasource/src/error.rs
Normal file
75
src/common/datasource/src/error.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
UnsupportedBackendProtocol { protocol: String },
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String },
|
||||
|
||||
#[snafu(display("Invalid path: {}", path))]
|
||||
InvalidPath { path: String },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
InvalidUrl { url: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| InvalidConnection { .. }
|
||||
| InvalidUrl { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
18
src/common/datasource/src/lib.rs
Normal file
18
src/common/datasource/src/lib.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod util;
|
||||
83
src/common/datasource/src/lister.rs
Normal file
83
src/common/datasource/src/lister.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::{future, TryStreamExt};
|
||||
use object_store::{Entry, ObjectStore};
|
||||
use regex::Regex;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
pub struct Lister {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl Lister {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
) -> Self {
|
||||
Lister {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Entry>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.list(&self.path)
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = self
|
||||
.regex
|
||||
.as_ref()
|
||||
.map(|x| x.is_match(f.name()))
|
||||
.unwrap_or(true);
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
// make sure this file exists
|
||||
let file_full_path = format!("{}{}", self.path, filename);
|
||||
let _ = self.object_store.stat(&file_full_path).await.context(
|
||||
error::ListObjectsSnafu {
|
||||
path: &file_full_path,
|
||||
},
|
||||
)?;
|
||||
Ok(vec![Entry::new(&file_full_path)])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
60
src/common/datasource/src/object_store.rs
Normal file
60
src/common/datasource/src/object_store.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod fs;
|
||||
pub mod s3;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::ObjectStore;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use self::fs::build_fs_backend;
|
||||
use self::s3::build_s3_backend;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// parse url returns (schema,Option<host>,path)
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
Ok(url) => Ok((
|
||||
url.scheme().to_string(),
|
||||
url.host_str().map(|s| s.to_string()),
|
||||
url.path().to_string(),
|
||||
)),
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let host = host.context(error::EmptyHostPathSnafu {
|
||||
url: url.to_string(),
|
||||
})?;
|
||||
Ok(build_s3_backend(&host, "/", connection)?)
|
||||
}
|
||||
FS_SCHEMA => Ok(build_fs_backend("/")?),
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
|
||||
}
|
||||
}
|
||||
28
src/common/datasource/src/object_store/fs.rs
Normal file
28
src/common/datasource/src/object_store/fs.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use object_store::services::Fs;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{BuildBackendSnafu, Result};
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let mut builder = Fs::default();
|
||||
builder.root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(BuildBackendSnafu)?
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
79
src/common/datasource/src/object_store/s3.rs
Normal file
79
src/common/datasource/src/object_store/s3.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::services::S3;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: &str,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
builder.bucket(host);
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
125
src/common/datasource/src/util.rs
Normal file
125
src/common/datasource/src/util.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,8 @@ macro_rules! ok {
|
||||
}
|
||||
|
||||
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
let mut result = TokenStream::new();
|
||||
|
||||
// extract arg map
|
||||
let arg_pairs = parse_macro_input!(args as AttributeArgs);
|
||||
let arg_span = arg_pairs[0].span();
|
||||
@@ -59,12 +61,17 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
||||
let arg_types = ok!(extract_input_types(inputs));
|
||||
|
||||
// build the struct and its impl block
|
||||
let struct_code = build_struct(
|
||||
attrs,
|
||||
vis,
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
ok!(get_ident(&arg_map, "display_name", arg_span)),
|
||||
);
|
||||
// only do this when `display_name` is specified
|
||||
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
|
||||
let struct_code = build_struct(
|
||||
attrs,
|
||||
vis,
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
display_name,
|
||||
);
|
||||
result.extend(struct_code);
|
||||
}
|
||||
|
||||
let calc_fn_code = build_calc_fn(
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
arg_types,
|
||||
@@ -77,8 +84,6 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
||||
}
|
||||
.into();
|
||||
|
||||
let mut result = TokenStream::new();
|
||||
result.extend(struct_code);
|
||||
result.extend(calc_fn_code);
|
||||
result.extend(input_fn_code);
|
||||
result
|
||||
|
||||
@@ -11,11 +11,10 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod from_unixtime;
|
||||
mod to_unixtime;
|
||||
|
||||
use from_unixtime::FromUnixtimeFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -23,6 +22,6 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(FromUnixtimeFunction::default()));
|
||||
registry.register(Arc::new(ToUnixtimeFunction::default()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! from_unixtime function.
|
||||
/// TODO(dennis) It can be removed after we upgrade datafusion.
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{
|
||||
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::arrow::compute;
|
||||
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct FromUnixtimeFunction;
|
||||
|
||||
const NAME: &str = "from_unixtime";
|
||||
|
||||
impl Function for FromUnixtimeFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::timestamp_millisecond_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::int64_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::Int64(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
// Our timestamp vector's time unit is millisecond
|
||||
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
|
||||
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
|
||||
Ok(Arc::new(
|
||||
TimestampMillisecondVector::try_from_arrow_array(
|
||||
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
|
||||
typ: ArrowDatatype::Int64,
|
||||
})?,
|
||||
)
|
||||
.context(IntoVectorSnafu {
|
||||
data_type: arrow_datatype.clone(),
|
||||
})?,
|
||||
))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FromUnixtimeFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "FROM_UNIXTIME")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::Int64Vector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_from_unixtime() {
|
||||
let f = FromUnixtimeFunction::default();
|
||||
assert_eq!("from_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::int64_datatype()]
|
||||
));
|
||||
|
||||
let times = vec![Some(1494410783), None, Some(1494410983)];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
|
||||
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(3, vector.len());
|
||||
for (i, t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Timestamp(ts) => {
|
||||
assert_eq!(ts.value(), t.unwrap() * 1000);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
148
src/common/function/src/scalars/timestamp/to_unixtime.rs
Normal file
148
src/common/function/src/scalars/timestamp/to_unixtime.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToUnixtimeFunction;
|
||||
|
||||
const NAME: &str = "to_unixtime";
|
||||
|
||||
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||
match Timestamp::from_str(arg) {
|
||||
Ok(ts) => {
|
||||
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
|
||||
Some(ts.value().div_euclid(sec_mul))
|
||||
}
|
||||
Err(_err) => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToUnixtimeFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::timestamp_second_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::String(StringType)],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
error::InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array).unwrap();
|
||||
Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
)))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToUnixtimeFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "TO_UNIXTIME")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::String(StringType)]
|
||||
));
|
||||
|
||||
let times = vec![
|
||||
Some("2023-03-01T06:35:02Z"),
|
||||
None,
|
||||
Some("2022-06-30T23:59:60Z"),
|
||||
Some("invalid_time_stamp"),
|
||||
];
|
||||
let results = vec![Some(1677652502), None, Some(1656633600), None];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ pub enum Error {
|
||||
DecodeInsert { source: DecodeError },
|
||||
|
||||
#[snafu(display("Illegal insert data"))]
|
||||
IllegalInsertData,
|
||||
IllegalInsertData { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
|
||||
@@ -26,7 +26,7 @@ tower = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
[[bench]]
|
||||
name = "bench_main"
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
@@ -14,6 +15,7 @@ object-store = { path = "../../object-store" }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
backon = "0.4.0"
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::string::FromUtf8Error;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::prelude::*;
|
||||
@@ -47,10 +48,11 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to put {}, source: {}", key, source))]
|
||||
#[snafu(display("Failed to put state, key: '{key}', source: {source}"))]
|
||||
PutState {
|
||||
key: String,
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to delete {}, source: {}", key, source))]
|
||||
@@ -59,10 +61,18 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to delete keys: '{keys}', source: {source}"))]
|
||||
DeleteStates {
|
||||
keys: String,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list state, path: '{path}', source: {source}"))]
|
||||
ListState {
|
||||
path: String,
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read {}, source: {}", key, source))]
|
||||
@@ -97,6 +107,19 @@ pub enum Error {
|
||||
source: Arc<Error>,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
|
||||
procedure_id,
|
||||
source
|
||||
))]
|
||||
RetryTimesExceeded {
|
||||
source: Arc<Error>,
|
||||
procedure_id: ProcedureId,
|
||||
},
|
||||
|
||||
#[snafu(display("Corrupted data, error: {source}"))]
|
||||
CorruptedData { source: FromUtf8Error },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -104,19 +127,22 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::External { source } => source.status_code(),
|
||||
Error::External { source }
|
||||
| Error::PutState { source, .. }
|
||||
| Error::DeleteStates { source, .. }
|
||||
| Error::ListState { source, .. } => source.status_code(),
|
||||
|
||||
Error::ToJson { .. }
|
||||
| Error::PutState { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::ListState { .. }
|
||||
| Error::ReadState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::RetryTimesExceeded { .. }
|
||||
| Error::RetryLater { .. }
|
||||
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ProcedurePanic { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedurePanic { .. } | Error::CorruptedData { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedureExec { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
pub mod error;
|
||||
pub mod local;
|
||||
mod procedure;
|
||||
mod store;
|
||||
pub mod store;
|
||||
pub mod watcher;
|
||||
|
||||
pub use crate::error::{Error, Result};
|
||||
|
||||
@@ -17,10 +17,11 @@ mod runner;
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_telemetry::logging;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ensure;
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::Notify;
|
||||
@@ -29,7 +30,7 @@ use crate::error::{DuplicateProcedureSnafu, LoaderConflictSnafu, Result};
|
||||
use crate::local::lock::LockMap;
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::BoxedProcedureLoader;
|
||||
use crate::store::{ObjectStateStore, ProcedureMessage, ProcedureStore, StateStoreRef};
|
||||
use crate::store::{ProcedureMessage, ProcedureStore, StateStoreRef};
|
||||
use crate::{
|
||||
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
|
||||
ProcedureWithId, Watcher,
|
||||
@@ -289,22 +290,35 @@ impl ManagerContext {
|
||||
/// Config for [LocalManager].
|
||||
#[derive(Debug)]
|
||||
pub struct ManagerConfig {
|
||||
/// Object store
|
||||
pub object_store: ObjectStore,
|
||||
pub max_retry_times: usize,
|
||||
pub retry_delay: Duration,
|
||||
}
|
||||
|
||||
impl Default for ManagerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A [ProcedureManager] that maintains procedure states locally.
|
||||
pub struct LocalManager {
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
state_store: StateStoreRef,
|
||||
max_retry_times: usize,
|
||||
retry_delay: Duration,
|
||||
}
|
||||
|
||||
impl LocalManager {
|
||||
/// Create a new [LocalManager] with specific `config`.
|
||||
pub fn new(config: ManagerConfig) -> LocalManager {
|
||||
pub fn new(config: ManagerConfig, state_store: StateStoreRef) -> LocalManager {
|
||||
LocalManager {
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
|
||||
state_store,
|
||||
max_retry_times: config.max_retry_times,
|
||||
retry_delay: config.retry_delay,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,7 +335,11 @@ impl LocalManager {
|
||||
procedure,
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
step,
|
||||
exponential_builder: ExponentialBuilder::default()
|
||||
.with_min_delay(self.retry_delay)
|
||||
.with_max_times(self.max_retry_times),
|
||||
store: ProcedureStore::new(self.state_store.clone()),
|
||||
rolling_back: false,
|
||||
};
|
||||
|
||||
let watcher = meta.state_receiver.clone();
|
||||
@@ -411,7 +429,7 @@ impl ProcedureManager for LocalManager {
|
||||
mod test_util {
|
||||
use common_test_util::temp_dir::TempDir;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use object_store::ObjectStore;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -421,8 +439,9 @@ mod test_util {
|
||||
|
||||
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
ObjectStore::new(accessor).finish()
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
ObjectStore::new(builder).unwrap().finish()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -434,6 +453,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::store::ObjectStateStore;
|
||||
use crate::{Context, Procedure, Status};
|
||||
|
||||
#[test]
|
||||
@@ -542,9 +562,11 @@ mod tests {
|
||||
fn test_register_loader() {
|
||||
let dir = create_temp_dir("register");
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
@@ -561,9 +583,11 @@ mod tests {
|
||||
let dir = create_temp_dir("recover");
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let config = ManagerConfig {
|
||||
object_store: object_store.clone(),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
@@ -605,9 +629,11 @@ mod tests {
|
||||
async fn test_submit_procedure() {
|
||||
let dir = create_temp_dir("submit");
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
@@ -651,9 +677,11 @@ mod tests {
|
||||
async fn test_state_changed_on_err() {
|
||||
let dir = create_temp_dir("on_err");
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {
|
||||
|
||||
@@ -15,15 +15,15 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::logging;
|
||||
use tokio::time;
|
||||
|
||||
use crate::error::{ProcedurePanicSnafu, Result};
|
||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::store::ProcedureStore;
|
||||
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
|
||||
|
||||
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
|
||||
use crate::ProcedureState::Retrying;
|
||||
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ExecResult {
|
||||
@@ -108,7 +108,9 @@ pub(crate) struct Runner {
|
||||
pub(crate) procedure: BoxedProcedure,
|
||||
pub(crate) manager_ctx: Arc<ManagerContext>,
|
||||
pub(crate) step: u32,
|
||||
pub(crate) exponential_builder: ExponentialBuilder,
|
||||
pub(crate) store: ProcedureStore,
|
||||
pub(crate) rolling_back: bool,
|
||||
}
|
||||
|
||||
impl Runner {
|
||||
@@ -164,18 +166,56 @@ impl Runner {
|
||||
provider: self.manager_ctx.clone(),
|
||||
};
|
||||
|
||||
self.rolling_back = false;
|
||||
self.execute_once_with_retry(&ctx).await;
|
||||
}
|
||||
|
||||
async fn execute_once_with_retry(&mut self, ctx: &Context) {
|
||||
let mut retry = self.exponential_builder.build();
|
||||
let mut retry_times = 0;
|
||||
loop {
|
||||
match self.execute_once(&ctx).await {
|
||||
ExecResult::Continue => (),
|
||||
match self.execute_once(ctx).await {
|
||||
ExecResult::Done | ExecResult::Failed => return,
|
||||
ExecResult::Continue => (),
|
||||
ExecResult::RetryLater => {
|
||||
self.wait_on_err().await;
|
||||
retry_times += 1;
|
||||
if let Some(d) = retry.next() {
|
||||
self.wait_on_err(d, retry_times).await;
|
||||
} else {
|
||||
assert!(self.meta.state().is_retrying());
|
||||
if let Retrying { error } = self.meta.state() {
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(
|
||||
Error::RetryTimesExceeded {
|
||||
source: error,
|
||||
procedure_id: self.meta.id,
|
||||
},
|
||||
)))
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn rollback(&mut self, error: Arc<Error>) -> ExecResult {
|
||||
if let Err(e) = self.rollback_procedure().await {
|
||||
self.rolling_back = true;
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
self.meta.set_state(ProcedureState::failed(error));
|
||||
ExecResult::Failed
|
||||
}
|
||||
|
||||
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
|
||||
// if rolling_back, there is no need to execute again.
|
||||
if self.rolling_back {
|
||||
// We can definitely get the previous error here.
|
||||
let state = self.meta.state();
|
||||
let err = state.error().unwrap();
|
||||
return self.rollback(err.clone()).await;
|
||||
}
|
||||
match self.procedure.execute(ctx).await {
|
||||
Ok(status) => {
|
||||
logging::debug!(
|
||||
@@ -186,8 +226,11 @@ impl Runner {
|
||||
status.need_persist(),
|
||||
);
|
||||
|
||||
if status.need_persist() && self.persist_procedure().await.is_err() {
|
||||
return ExecResult::RetryLater;
|
||||
if status.need_persist() {
|
||||
if let Err(err) = self.persist_procedure().await {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
}
|
||||
|
||||
match status {
|
||||
@@ -196,7 +239,8 @@ impl Runner {
|
||||
self.on_suspended(subprocedures).await;
|
||||
}
|
||||
Status::Done => {
|
||||
if self.commit_procedure().await.is_err() {
|
||||
if let Err(e) = self.commit_procedure().await {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
@@ -217,17 +261,12 @@ impl Runner {
|
||||
);
|
||||
|
||||
if e.is_retry_later() {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
|
||||
|
||||
// Write rollback key so we can skip this procedure while recovering procedures.
|
||||
if self.rollback_procedure().await.is_err() {
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
ExecResult::Failed
|
||||
self.rollback(Arc::new(e)).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -261,7 +300,9 @@ impl Runner {
|
||||
procedure,
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
step,
|
||||
exponential_builder: self.exponential_builder.clone(),
|
||||
store: self.store.clone(),
|
||||
rolling_back: false,
|
||||
};
|
||||
|
||||
// Insert the procedure. We already check the procedure existence before inserting
|
||||
@@ -285,8 +326,16 @@ impl Runner {
|
||||
});
|
||||
}
|
||||
|
||||
async fn wait_on_err(&self) {
|
||||
time::sleep(ERR_WAIT_DURATION).await;
|
||||
/// Extend the retry time to wait for the next retry.
|
||||
async fn wait_on_err(&self, d: Duration, i: u64) {
|
||||
logging::info!(
|
||||
"Procedure {}-{} retry for the {} times after {} millis",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
i,
|
||||
d.as_millis(),
|
||||
);
|
||||
time::sleep(d).await;
|
||||
}
|
||||
|
||||
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
|
||||
@@ -416,14 +465,15 @@ mod tests {
|
||||
procedure,
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
step: 0,
|
||||
exponential_builder: ExponentialBuilder::default(),
|
||||
store,
|
||||
rolling_back: false,
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_files(object_store: &ObjectStore, procedure_id: ProcedureId, files: &[&str]) {
|
||||
let dir = format!("{procedure_id}/");
|
||||
let object = object_store.object(&dir);
|
||||
let lister = object.list().await.unwrap();
|
||||
let lister = object_store.list(&dir).await.unwrap();
|
||||
let mut files_in_dir: Vec<_> = lister
|
||||
.map_ok(|de| de.name().to_string())
|
||||
.try_collect()
|
||||
@@ -744,7 +794,7 @@ mod tests {
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_retry_later(), "{res:?}");
|
||||
assert!(meta.state().is_running());
|
||||
assert!(meta.state().is_retrying());
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_done(), "{res:?}");
|
||||
@@ -752,6 +802,36 @@ mod tests {
|
||||
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_exceed_max_retry_later() {
|
||||
let exec_fn =
|
||||
|_| async { Err(Error::retry_later(MockError::new(StatusCode::Unexpected))) }.boxed();
|
||||
|
||||
let exceed_max_retry_later = ProcedureAdapter {
|
||||
data: "exceed_max_retry_later".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("exceed_max_retry_later");
|
||||
let meta = exceed_max_retry_later.new_meta(ROOT_ID);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(
|
||||
meta.clone(),
|
||||
Box::new(exceed_max_retry_later),
|
||||
procedure_store,
|
||||
);
|
||||
runner.exponential_builder = ExponentialBuilder::default()
|
||||
.with_min_delay(Duration::from_millis(1))
|
||||
.with_max_times(3);
|
||||
|
||||
// Run the runner and execute the procedure.
|
||||
runner.execute_procedure_in_loop().await;
|
||||
let err = meta.state().error().unwrap().to_string();
|
||||
assert!(err.contains("Procedure retry exceeded max times"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_child_error() {
|
||||
let mut times = 0;
|
||||
@@ -819,7 +899,7 @@ mod tests {
|
||||
// Replace the manager ctx.
|
||||
runner.manager_ctx = manager_ctx;
|
||||
|
||||
// Run the runer and execute the procedure.
|
||||
// Run the runner and execute the procedure.
|
||||
runner.run().await;
|
||||
let err = meta.state().error().unwrap().to_string();
|
||||
assert!(err.contains("subprocedure failed"), "{err}");
|
||||
|
||||
@@ -206,6 +206,8 @@ pub enum ProcedureState {
|
||||
Running,
|
||||
/// The procedure is finished.
|
||||
Done,
|
||||
/// The procedure is failed and can be retried.
|
||||
Retrying { error: Arc<Error> },
|
||||
/// The procedure is failed and cannot proceed anymore.
|
||||
Failed { error: Arc<Error> },
|
||||
}
|
||||
@@ -216,6 +218,11 @@ impl ProcedureState {
|
||||
ProcedureState::Failed { error }
|
||||
}
|
||||
|
||||
/// Returns a [ProcedureState] with retrying state.
|
||||
pub fn retrying(error: Arc<Error>) -> ProcedureState {
|
||||
ProcedureState::Retrying { error }
|
||||
}
|
||||
|
||||
/// Returns true if the procedure state is running.
|
||||
pub fn is_running(&self) -> bool {
|
||||
matches!(self, ProcedureState::Running)
|
||||
@@ -231,10 +238,16 @@ impl ProcedureState {
|
||||
matches!(self, ProcedureState::Failed { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the procedure state is retrying.
|
||||
pub fn is_retrying(&self) -> bool {
|
||||
matches!(self, ProcedureState::Retrying { .. })
|
||||
}
|
||||
|
||||
/// Returns the error.
|
||||
pub fn error(&self) -> Option<&Arc<Error>> {
|
||||
match self {
|
||||
ProcedureState::Failed { error } => Some(error),
|
||||
ProcedureState::Retrying { error } => Some(error),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::error::{Result, ToJsonSnafu};
|
||||
pub(crate) use crate::store::state_store::{ObjectStateStore, StateStoreRef};
|
||||
use crate::{BoxedProcedure, ProcedureId};
|
||||
|
||||
mod state_store;
|
||||
pub mod state_store;
|
||||
|
||||
/// Serialized data of a procedure.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@@ -248,15 +248,15 @@ mod tests {
|
||||
use async_trait::async_trait;
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::{Context, LockKey, Procedure, Status};
|
||||
|
||||
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
|
||||
ProcedureStore::from(object_store)
|
||||
}
|
||||
|
||||
@@ -15,22 +15,25 @@
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use async_trait::async_trait;
|
||||
use futures::{Stream, TryStreamExt};
|
||||
use object_store::{ObjectMode, ObjectStore};
|
||||
use common_error::ext::PlainError;
|
||||
use common_error::prelude::{BoxedError, StatusCode};
|
||||
use futures::{Stream, StreamExt};
|
||||
use object_store::{EntryMode, Metakey, ObjectStore};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
|
||||
use crate::error::{DeleteStateSnafu, ListStateSnafu, PutStateSnafu, Result};
|
||||
|
||||
/// Key value from state store.
|
||||
type KeyValue = (String, Vec<u8>);
|
||||
pub type KeyValue = (String, Vec<u8>);
|
||||
|
||||
/// Stream that yields [KeyValue].
|
||||
type KeyValueStream = Pin<Box<dyn Stream<Item = Result<KeyValue>> + Send>>;
|
||||
pub type KeyValueStream = Pin<Box<dyn Stream<Item = Result<KeyValue>> + Send>>;
|
||||
|
||||
/// Storage layer for persisting procedure's state.
|
||||
#[async_trait]
|
||||
pub(crate) trait StateStore: Send + Sync {
|
||||
pub trait StateStore: Send + Sync {
|
||||
/// Puts `key` and `value` into the store.
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()>;
|
||||
|
||||
@@ -50,13 +53,13 @@ pub(crate) type StateStoreRef = Arc<dyn StateStore>;
|
||||
|
||||
/// [StateStore] based on [ObjectStore].
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ObjectStateStore {
|
||||
pub struct ObjectStateStore {
|
||||
store: ObjectStore,
|
||||
}
|
||||
|
||||
impl ObjectStateStore {
|
||||
/// Returns a new [ObjectStateStore] with specific `store`.
|
||||
pub(crate) fn new(store: ObjectStore) -> ObjectStateStore {
|
||||
pub fn new(store: ObjectStore) -> ObjectStateStore {
|
||||
ObjectStateStore { store }
|
||||
}
|
||||
}
|
||||
@@ -64,49 +67,83 @@ impl ObjectStateStore {
|
||||
#[async_trait]
|
||||
impl StateStore for ObjectStateStore {
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()> {
|
||||
let object = self.store.object(key);
|
||||
object.write(value).await.context(PutStateSnafu { key })
|
||||
self.store
|
||||
.write(key, value)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(PutStateSnafu { key })
|
||||
}
|
||||
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
|
||||
let path_string = path.to_string();
|
||||
|
||||
let lister = self
|
||||
let mut lister = self
|
||||
.store
|
||||
.object(path)
|
||||
.scan()
|
||||
.scan(path)
|
||||
.await
|
||||
.map_err(|e| Error::ListState {
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.with_context(|_| ListStateSnafu {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
let stream = lister
|
||||
.try_filter_map(|entry| async move {
|
||||
let store = self.store.clone();
|
||||
|
||||
let stream = try_stream!({
|
||||
while let Some(res) = lister.next().await {
|
||||
let entry = res
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(ListStateSnafu { path: &path_string })?;
|
||||
let key = entry.path();
|
||||
let key_value = match entry.mode().await? {
|
||||
ObjectMode::FILE => {
|
||||
let value = entry.read().await?;
|
||||
|
||||
Some((key.to_string(), value))
|
||||
}
|
||||
ObjectMode::DIR | ObjectMode::Unknown => None,
|
||||
};
|
||||
|
||||
Ok(key_value)
|
||||
})
|
||||
.map_err(move |e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
});
|
||||
let metadata = store
|
||||
.metadata(&entry, Metakey::Mode)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(ListStateSnafu { path: key })?;
|
||||
if let EntryMode::FILE = metadata.mode() {
|
||||
let value = store
|
||||
.read(key)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(ListStateSnafu { path: key })?;
|
||||
yield (key.to_string(), value);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
async fn delete(&self, keys: &[String]) -> Result<()> {
|
||||
for key in keys {
|
||||
let object = self.store.object(key);
|
||||
object.delete().await.context(DeleteStateSnafu { key })?;
|
||||
self.store
|
||||
.delete(key)
|
||||
.await
|
||||
.context(DeleteStateSnafu { key })?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -116,8 +153,8 @@ impl StateStore for ObjectStateStore {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -125,8 +162,10 @@ mod tests {
|
||||
async fn test_object_state_store() {
|
||||
let dir = create_temp_dir("state_store");
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
let data: Vec<_> = state_store
|
||||
|
||||
@@ -33,6 +33,9 @@ pub async fn wait(watcher: &mut Watcher) -> Result<()> {
|
||||
ProcedureState::Failed { error } => {
|
||||
return Err(error.clone()).context(ProcedureExecSnafu);
|
||||
}
|
||||
ProcedureState::Retrying { error } => {
|
||||
return Err(error.clone()).context(ProcedureExecSnafu);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::prelude::*;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -70,6 +71,26 @@ pub enum Error {
|
||||
source: datafusion_common::DataFusionError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Column {} not exists in table {}", column_name, table_name))]
|
||||
ColumnNotExists {
|
||||
column_name: String,
|
||||
table_name: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to cast vector of type '{:?}' to type '{:?}', source: {}",
|
||||
from_type,
|
||||
to_type,
|
||||
source
|
||||
))]
|
||||
CastVector {
|
||||
from_type: ConcreteDataType,
|
||||
to_type: ConcreteDataType,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -81,11 +102,14 @@ impl ErrorExt for Error {
|
||||
| Error::CreateRecordBatches { .. }
|
||||
| Error::PollStream { .. }
|
||||
| Error::Format { .. }
|
||||
| Error::InitRecordbatchStream { .. } => StatusCode::Internal,
|
||||
| Error::InitRecordbatchStream { .. }
|
||||
| Error::ColumnNotExists { .. } => StatusCode::Internal,
|
||||
|
||||
Error::External { source } => source.status_code(),
|
||||
|
||||
Error::SchemaConversion { source, .. } => source.status_code(),
|
||||
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,14 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use serde::ser::{Error, SerializeStruct};
|
||||
use serde::{Serialize, Serializer};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, CastVectorSnafu, ColumnNotExistsSnafu, Result};
|
||||
use crate::DfRecordBatch;
|
||||
|
||||
/// A two-dimensional batch of column-oriented data with a defined schema.
|
||||
@@ -108,6 +110,41 @@ impl RecordBatch {
|
||||
pub fn rows(&self) -> RecordBatchRowIterator<'_> {
|
||||
RecordBatchRowIterator::new(self)
|
||||
}
|
||||
|
||||
pub fn column_vectors(
|
||||
&self,
|
||||
table_name: &str,
|
||||
table_schema: SchemaRef,
|
||||
) -> Result<HashMap<String, VectorRef>> {
|
||||
let mut vectors = HashMap::with_capacity(self.num_columns());
|
||||
|
||||
// column schemas in recordbatch must match its vectors, otherwise it's corrupted
|
||||
for (vector_schema, vector) in self.schema.column_schemas().iter().zip(self.columns.iter())
|
||||
{
|
||||
let column_name = &vector_schema.name;
|
||||
let column_schema =
|
||||
table_schema
|
||||
.column_schema_by_name(column_name)
|
||||
.context(ColumnNotExistsSnafu {
|
||||
table_name,
|
||||
column_name,
|
||||
})?;
|
||||
let vector = if vector_schema.data_type != column_schema.data_type {
|
||||
vector
|
||||
.cast(&column_schema.data_type)
|
||||
.with_context(|_| CastVectorSnafu {
|
||||
from_type: vector.data_type(),
|
||||
to_type: column_schema.data_type.clone(),
|
||||
})?
|
||||
} else {
|
||||
vector.clone()
|
||||
};
|
||||
|
||||
vectors.insert(column_name.clone(), vector);
|
||||
}
|
||||
|
||||
Ok(vectors)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for RecordBatch {
|
||||
|
||||
@@ -5,6 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
metrics = "0.20"
|
||||
@@ -12,6 +13,7 @@ once_cell = "1.12"
|
||||
paste.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -26,6 +27,19 @@ pub enum Error {
|
||||
source: std::io::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
#[snafu(display("Repeated task {} not started yet", name))]
|
||||
IllegalState { name: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to wait for repeated task {} to stop, source: {}",
|
||||
name,
|
||||
source
|
||||
))]
|
||||
WaitGcTaskStop {
|
||||
name: String,
|
||||
source: JoinError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
pub mod error;
|
||||
mod global;
|
||||
pub mod metric;
|
||||
mod metrics;
|
||||
mod repeated_task;
|
||||
pub mod runtime;
|
||||
|
||||
pub use global::{
|
||||
@@ -23,4 +24,5 @@ pub use global::{
|
||||
spawn_read, spawn_write, write_runtime,
|
||||
};
|
||||
|
||||
pub use crate::repeated_task::{RepeatedTask, TaskFunction, TaskFunctionRef};
|
||||
pub use crate::runtime::{Builder, JoinError, JoinHandle, Runtime};
|
||||
|
||||
174
src/common/runtime/src/repeated_task.rs
Normal file
174
src/common/runtime/src/repeated_task.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_telemetry::logging;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::error::{IllegalStateSnafu, Result, WaitGcTaskStopSnafu};
|
||||
use crate::Runtime;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait TaskFunction<E: ErrorExt> {
|
||||
async fn call(&self) -> std::result::Result<(), E>;
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub type TaskFunctionRef<E> = Arc<dyn TaskFunction<E> + Send + Sync>;
|
||||
|
||||
pub struct RepeatedTask<E> {
|
||||
cancel_token: Mutex<Option<CancellationToken>>,
|
||||
task_handle: Mutex<Option<JoinHandle<()>>>,
|
||||
started: AtomicBool,
|
||||
interval: Duration,
|
||||
task_fn: TaskFunctionRef<E>,
|
||||
}
|
||||
|
||||
impl<E: ErrorExt> std::fmt::Display for RepeatedTask<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "RepeatedTask({})", self.task_fn.name())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: ErrorExt> std::fmt::Debug for RepeatedTask<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("RepeatedTask")
|
||||
.field(&self.task_fn.name())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
pub fn new(interval: Duration, task_fn: TaskFunctionRef<E>) -> Self {
|
||||
Self {
|
||||
cancel_token: Mutex::new(None),
|
||||
task_handle: Mutex::new(None),
|
||||
started: AtomicBool::new(false),
|
||||
interval,
|
||||
task_fn,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn started(&self) -> bool {
|
||||
self.started.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub async fn start(&self, runtime: Runtime) -> Result<()> {
|
||||
let token = CancellationToken::new();
|
||||
let interval = self.interval;
|
||||
let child = token.child_token();
|
||||
let task_fn = self.task_fn.clone();
|
||||
// TODO(hl): Maybe spawn to a blocking runtime.
|
||||
let handle = runtime.spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(interval) => {}
|
||||
_ = child.cancelled() => {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if let Err(e) = task_fn.call().await {
|
||||
logging::error!(e; "Failed to run repeated task: {}", task_fn.name());
|
||||
}
|
||||
}
|
||||
});
|
||||
*self.cancel_token.lock().await = Some(token);
|
||||
*self.task_handle.lock().await = Some(handle);
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
|
||||
logging::debug!(
|
||||
"Repeated task {} started with interval: {:?}",
|
||||
self.task_fn.name(),
|
||||
self.interval
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
let name = self.task_fn.name();
|
||||
ensure!(
|
||||
self.started
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok(),
|
||||
IllegalStateSnafu { name }
|
||||
);
|
||||
let token = self
|
||||
.cancel_token
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.context(IllegalStateSnafu { name })?;
|
||||
let handle = self
|
||||
.task_handle
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.context(IllegalStateSnafu { name })?;
|
||||
|
||||
token.cancel();
|
||||
handle.await.context(WaitGcTaskStopSnafu { name })?;
|
||||
|
||||
logging::debug!("Repeated task {} stopped", name);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::AtomicI32;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct TickTask {
|
||||
n: AtomicI32,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TaskFunction<crate::error::Error> for TickTask {
|
||||
fn name(&self) -> &str {
|
||||
"test"
|
||||
}
|
||||
|
||||
async fn call(&self) -> Result<()> {
|
||||
self.n.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_repeated_task() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let task_fn = Arc::new(TickTask {
|
||||
n: AtomicI32::new(0),
|
||||
});
|
||||
|
||||
let task = RepeatedTask::new(Duration::from_millis(100), task_fn.clone());
|
||||
|
||||
task.start(crate::bg_runtime()).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(550)).await;
|
||||
task.stop().await.unwrap();
|
||||
|
||||
assert_eq!(task_fn.n.load(Ordering::Relaxed), 5);
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ use tokio::sync::oneshot;
|
||||
pub use tokio::task::{JoinError, JoinHandle};
|
||||
|
||||
use crate::error::*;
|
||||
use crate::metric::*;
|
||||
use crate::metrics::*;
|
||||
|
||||
/// A runtime to run future tasks
|
||||
#[derive(Clone, Debug)]
|
||||
|
||||
@@ -581,7 +581,7 @@ pub fn expression_from_df_expr(
|
||||
| Expr::ScalarSubquery(..)
|
||||
| Expr::Placeholder { .. }
|
||||
| Expr::QualifiedWildcard { .. } => todo!(),
|
||||
Expr::GroupingSet(_) => UnsupportedExprSnafu {
|
||||
Expr::GroupingSet(_) | Expr::OuterReferenceColumn(_, _) => UnsupportedExprSnafu {
|
||||
name: expr.to_string(),
|
||||
}
|
||||
.fail()?,
|
||||
|
||||
@@ -22,9 +22,10 @@ use catalog::CatalogManagerRef;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
|
||||
use datafusion::common::{DFField, DFSchema, OwnedTableReference};
|
||||
use datafusion::common::{DFField, DFSchema};
|
||||
use datafusion::datasource::DefaultTableSource;
|
||||
use datafusion::physical_plan::project_schema;
|
||||
use datafusion::sql::TableReference;
|
||||
use datafusion_expr::{Filter, LogicalPlan, TableScan};
|
||||
use prost::Message;
|
||||
use session::context::QueryContext;
|
||||
@@ -240,13 +241,13 @@ impl DFLogicalSubstraitConvertor {
|
||||
.projection
|
||||
.map(|mask_expr| self.convert_mask_expression(mask_expr));
|
||||
|
||||
let table_ref = OwnedTableReference::Full {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
table: table_name.clone(),
|
||||
};
|
||||
let table_ref = TableReference::full(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.clone(),
|
||||
);
|
||||
let adapter = table_provider
|
||||
.resolve_table(table_ref)
|
||||
.resolve_table(table_ref.clone())
|
||||
.await
|
||||
.with_context(|_| ResolveTableSnafu {
|
||||
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
@@ -272,14 +273,13 @@ impl DFLogicalSubstraitConvertor {
|
||||
};
|
||||
|
||||
// Calculate the projected schema
|
||||
let qualified = &format_full_table_name(&catalog_name, &schema_name, &table_name);
|
||||
let projected_schema = Arc::new(
|
||||
project_schema(&stored_schema, projection.as_ref())
|
||||
.and_then(|x| {
|
||||
DFSchema::new_with_metadata(
|
||||
x.fields()
|
||||
.iter()
|
||||
.map(|f| DFField::from_qualified(qualified, f.clone()))
|
||||
.map(|f| DFField::from_qualified(table_ref.clone(), f.clone()))
|
||||
.collect(),
|
||||
x.metadata().clone(),
|
||||
)
|
||||
@@ -291,7 +291,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
|
||||
// TODO(ruihang): Support limit(fetch)
|
||||
Ok(LogicalPlan::TableScan(TableScan {
|
||||
table_name: qualified.to_string(),
|
||||
table_name: table_ref,
|
||||
source: adapter,
|
||||
projection,
|
||||
projected_schema,
|
||||
@@ -620,10 +620,13 @@ mod test {
|
||||
let projected_schema =
|
||||
Arc::new(DFSchema::new_with_metadata(projected_fields, Default::default()).unwrap());
|
||||
|
||||
let table_name = TableReference::full(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
DEFAULT_TABLE_NAME,
|
||||
);
|
||||
let table_scan_plan = LogicalPlan::TableScan(TableScan {
|
||||
table_name: format!(
|
||||
"{DEFAULT_CATALOG_NAME}.{DEFAULT_SCHEMA_NAME}.{DEFAULT_TABLE_NAME}",
|
||||
),
|
||||
table_name,
|
||||
source: adapter,
|
||||
projection: Some(projection),
|
||||
projected_schema,
|
||||
|
||||
@@ -12,7 +12,7 @@ deadlock_detection = ["parking_lot"]
|
||||
backtrace = "0.3"
|
||||
common-error = { path = "../error" }
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
metrics = "0.20"
|
||||
metrics = "0.20.1"
|
||||
metrics-exporter-prometheus = { version = "0.11", default-features = false }
|
||||
once_cell = "1.10"
|
||||
opentelemetry = { version = "0.17", default-features = false, features = [
|
||||
|
||||
@@ -12,4 +12,4 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![feature(int_roundings)]
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -26,6 +26,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
|
||||
use crate::util::div_ceil;
|
||||
|
||||
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
|
||||
pub struct Timestamp {
|
||||
@@ -143,7 +144,7 @@ impl Timestamp {
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
|
||||
Some(Timestamp::new(div_ceil(self.value, mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,17 @@ pub fn current_time_millis() -> i64 {
|
||||
chrono::Utc::now().timestamp_millis()
|
||||
}
|
||||
|
||||
/// Port of rust unstable features `int_roundings`.
|
||||
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
|
||||
let d = this / rhs;
|
||||
let r = this % rhs;
|
||||
if r > 0 && rhs > 0 {
|
||||
d + 1
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::{self, SystemTime};
|
||||
@@ -42,4 +53,10 @@ mod tests {
|
||||
assert_eq!(datetime_std.hour(), datetime_now.hour());
|
||||
assert_eq!(datetime_std.minute(), datetime_now.minute());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_div_ceil() {
|
||||
let v0 = 9223372036854676001;
|
||||
assert_eq!(9223372036854677, div_ceil(v0, 1000));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
python = ["dep:script"]
|
||||
|
||||
[dependencies]
|
||||
async-compat = "0.2"
|
||||
async-stream.workspace = true
|
||||
@@ -20,6 +16,8 @@ catalog = { path = "../catalog" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-datasource = { path = "../common/datasource" }
|
||||
common-function = { path = "../common/function" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
@@ -36,6 +34,7 @@ futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
humantime-serde = "1.1"
|
||||
log = "0.4"
|
||||
log-store = { path = "../log-store" }
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv", features = ["mock"] }
|
||||
@@ -46,7 +45,6 @@ pin-project = "1.0"
|
||||
prost.workspace = true
|
||||
query = { path = "../query" }
|
||||
regex = "1.6"
|
||||
script = { path = "../script", features = ["python"], optional = true }
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
servers = { path = "../servers" }
|
||||
|
||||
@@ -19,6 +19,7 @@ use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::SchedulerConfig;
|
||||
@@ -29,6 +30,7 @@ use crate::server::Services;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize(1024);
|
||||
|
||||
/// Object storage config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ObjectStoreConfig {
|
||||
@@ -37,6 +39,16 @@ pub enum ObjectStoreConfig {
|
||||
Oss(OssConfig),
|
||||
}
|
||||
|
||||
/// Storage engine config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(default)]
|
||||
pub struct StorageConfig {
|
||||
#[serde(flatten)]
|
||||
pub store: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub manifest: RegionManifestConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct FileConfig {
|
||||
@@ -77,6 +89,7 @@ impl Default for ObjectStoreConfig {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal directory
|
||||
pub dir: String,
|
||||
@@ -106,8 +119,30 @@ impl Default for WalConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for region manifest
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct RegionManifestConfig {
|
||||
/// Region manifest checkpoint actions margin.
|
||||
/// Manifest service create a checkpoint every [checkpoint_margin] actions.
|
||||
pub checkpoint_margin: Option<u16>,
|
||||
/// Region manifest logs and checkpoints gc task execution duration.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub gc_duration: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for RegionManifestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
checkpoint_margin: Some(10u16),
|
||||
gc_duration: Some(Duration::from_secs(30)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for table compaction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct CompactionConfig {
|
||||
/// Max task number that can concurrently run.
|
||||
pub max_inflight_tasks: usize,
|
||||
@@ -115,6 +150,8 @@ pub struct CompactionConfig {
|
||||
pub max_files_in_level0: usize,
|
||||
/// Max task number for SST purge task after compaction.
|
||||
pub max_purge_tasks: usize,
|
||||
/// Buffer threshold while writing SST files
|
||||
pub sst_write_buffer_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for CompactionConfig {
|
||||
@@ -123,6 +160,7 @@ impl Default for CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,7 +168,7 @@ impl Default for CompactionConfig {
|
||||
impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
max_inflight_tasks: value.compaction.max_inflight_tasks,
|
||||
max_inflight_tasks: value.storage.compaction.max_inflight_tasks,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,8 +176,11 @@ impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
max_files_in_l0: value.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.compaction.max_purge_tasks,
|
||||
manifest_checkpoint_margin: value.storage.manifest.checkpoint_margin,
|
||||
manifest_gc_duration: value.storage.manifest.gc_duration,
|
||||
max_files_in_l0: value.storage.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.storage.compaction.max_purge_tasks,
|
||||
sst_write_buffer_size: value.storage.compaction.sst_write_buffer_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -149,11 +190,22 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
pub struct ProcedureConfig {
|
||||
/// Storage config for procedure manager.
|
||||
pub store: ObjectStoreConfig,
|
||||
/// Max retry times of procedure.
|
||||
pub max_retry_times: usize,
|
||||
/// Initial retry delay of procedures, increases exponentially.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub retry_delay: Duration,
|
||||
}
|
||||
|
||||
impl Default for ProcedureConfig {
|
||||
fn default() -> ProcedureConfig {
|
||||
ProcedureConfig::from_file_path("/tmp/greptimedb/procedure/".to_string())
|
||||
ProcedureConfig {
|
||||
store: ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: "/tmp/greptimedb/procedure/".to_string(),
|
||||
}),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,6 +213,7 @@ impl ProcedureConfig {
|
||||
pub fn from_file_path(path: String) -> ProcedureConfig {
|
||||
ProcedureConfig {
|
||||
store: ObjectStoreConfig::File(FileConfig { data_dir: path }),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -176,10 +229,10 @@ pub struct DatanodeOptions {
|
||||
pub rpc_runtime_size: usize,
|
||||
pub mysql_addr: String,
|
||||
pub mysql_runtime_size: usize,
|
||||
pub http_opts: HttpOptions,
|
||||
pub meta_client_options: Option<MetaClientOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
@@ -194,10 +247,10 @@ impl Default for DatanodeOptions {
|
||||
rpc_runtime_size: 8,
|
||||
mysql_addr: "127.0.0.1:4406".to_string(),
|
||||
mysql_runtime_size: 2,
|
||||
http_opts: HttpOptions::default(),
|
||||
meta_client_options: None,
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
compaction: CompactionConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
@@ -206,14 +259,17 @@ impl Default for DatanodeOptions {
|
||||
/// Datanode service.
|
||||
pub struct Datanode {
|
||||
opts: DatanodeOptions,
|
||||
services: Services,
|
||||
services: Option<Services>,
|
||||
instance: InstanceRef,
|
||||
}
|
||||
|
||||
impl Datanode {
|
||||
pub async fn new(opts: DatanodeOptions) -> Result<Datanode> {
|
||||
let instance = Arc::new(Instance::new(&opts).await?);
|
||||
let services = Services::try_new(instance.clone(), &opts).await?;
|
||||
let services = match opts.mode {
|
||||
Mode::Distributed => Some(Services::try_new(instance.clone(), &opts).await?),
|
||||
Mode::Standalone => None,
|
||||
};
|
||||
Ok(Self {
|
||||
opts,
|
||||
services,
|
||||
@@ -234,12 +290,34 @@ impl Datanode {
|
||||
|
||||
/// Start services of datanode. This method call will block until services are shutdown.
|
||||
pub async fn start_services(&mut self) -> Result<()> {
|
||||
self.services.start(&self.opts).await
|
||||
if let Some(service) = self.services.as_mut() {
|
||||
service.start(&self.opts).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_instance(&self) -> InstanceRef {
|
||||
self.instance.clone()
|
||||
}
|
||||
|
||||
pub async fn shutdown_instance(&self) -> Result<()> {
|
||||
self.instance.shutdown().await
|
||||
}
|
||||
|
||||
async fn shutdown_services(&self) -> Result<()> {
|
||||
if let Some(service) = self.services.as_ref() {
|
||||
service.shutdown().await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
// We must shutdown services first
|
||||
self.shutdown_services().await?;
|
||||
self.shutdown_instance().await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -14,11 +14,10 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_datasource::error::Error as DataSourceError;
|
||||
use common_error::prelude::*;
|
||||
use common_procedure::ProcedureId;
|
||||
use common_recordbatch::error::Error as RecordBatchError;
|
||||
use datafusion::parquet;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use storage::error::Error as StorageError;
|
||||
use table::error::Error as TableError;
|
||||
use url::ParseError;
|
||||
@@ -35,6 +34,24 @@ pub enum Error {
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
PlanStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute statement, source: {}", source))]
|
||||
ExecuteStatement {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute logical plan, source: {}", source))]
|
||||
ExecuteLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan, source: {}", source))]
|
||||
DecodeLogicalPlan {
|
||||
#[snafu(backtrace)]
|
||||
@@ -106,24 +123,6 @@ pub enum Error {
|
||||
))]
|
||||
ColumnValuesNumberMismatch { columns: usize, values: usize },
|
||||
|
||||
#[snafu(display(
|
||||
"Column type mismatch, column: {}, expected type: {:?}, actual: {:?}",
|
||||
column,
|
||||
expected,
|
||||
actual,
|
||||
))]
|
||||
ColumnTypeMismatch {
|
||||
column: String,
|
||||
expected: ConcreteDataType,
|
||||
actual: ConcreteDataType,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect record batch, source: {}", source))]
|
||||
CollectRecords {
|
||||
#[snafu(backtrace)]
|
||||
source: RecordBatchError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse sql value, source: {}", source))]
|
||||
ParseSqlValue {
|
||||
#[snafu(backtrace)]
|
||||
@@ -151,6 +150,13 @@ pub enum Error {
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to flush table: {}, source: {}", table_name, source))]
|
||||
FlushTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start server, source: {}", source))]
|
||||
StartServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -175,6 +181,9 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create directory {}, source: {}", dir, source))]
|
||||
CreateDir { dir: String, source: std::io::Error },
|
||||
|
||||
#[snafu(display("Failed to remove directory {}, source: {}", dir, source))]
|
||||
RemoveDir { dir: String, source: std::io::Error },
|
||||
|
||||
#[snafu(display("Failed to open log store, source: {}", source))]
|
||||
OpenLogStore {
|
||||
#[snafu(backtrace)]
|
||||
@@ -193,7 +202,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse url, source: {}", source))]
|
||||
ParseUrl {
|
||||
source: DataSourceError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
@@ -224,6 +239,12 @@ pub enum Error {
|
||||
source: regex::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list objects, source: {}", source))]
|
||||
ListObjects {
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse the data, source: {}", source))]
|
||||
ParseDataTypes {
|
||||
#[snafu(backtrace)]
|
||||
@@ -293,12 +314,6 @@ pub enum Error {
|
||||
source: sql::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start script manager, source: {}", source))]
|
||||
StartScriptManager {
|
||||
#[snafu(backtrace)]
|
||||
source: script::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to parse string to timestamp, string: {}, source: {}",
|
||||
raw,
|
||||
@@ -418,12 +433,6 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write parquet file, source: {}", source))]
|
||||
WriteParquet {
|
||||
source: parquet::errors::ParquetError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to poll stream, source: {}", source))]
|
||||
PollStream {
|
||||
source: datafusion_common::DataFusionError,
|
||||
@@ -450,13 +459,6 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to lists object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
@@ -482,6 +484,30 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to close table engine, source: {}", source))]
|
||||
CloseTableEngine {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown server, source: {}", source))]
|
||||
ShutdownServer {
|
||||
#[snafu(backtrace)]
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown instance, source: {}", source))]
|
||||
ShutdownInstance {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to copy table to parquet file, source: {}", source))]
|
||||
WriteParquet {
|
||||
#[snafu(backtrace)]
|
||||
source: storage::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -490,7 +516,12 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
ExecuteSql { source } | DescribeStatement { source } => source.status_code(),
|
||||
ExecuteSql { source }
|
||||
| PlanStatement { source }
|
||||
| ExecuteStatement { source }
|
||||
| ExecuteLogicalPlan { source }
|
||||
| DescribeStatement { source } => source.status_code(),
|
||||
|
||||
DecodeLogicalPlan { source } => source.status_code(),
|
||||
NewCatalog { source } | RegisterSchema { source } => source.status_code(),
|
||||
FindTable { source, .. } => source.status_code(),
|
||||
@@ -498,11 +529,10 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
DropTable { source, .. } => source.status_code(),
|
||||
FlushTable { source, .. } => source.status_code(),
|
||||
|
||||
Insert { source, .. } => source.status_code(),
|
||||
Delete { source, .. } => source.status_code(),
|
||||
CollectRecords { source, .. } => source.status_code(),
|
||||
|
||||
TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
|
||||
|
||||
@@ -515,7 +545,6 @@ impl ErrorExt for Error {
|
||||
ConvertSchema { source, .. } | VectorComputation { source } => source.status_code(),
|
||||
|
||||
ColumnValuesNumberMismatch { .. }
|
||||
| ColumnTypeMismatch { .. }
|
||||
| InvalidSql { .. }
|
||||
| InvalidUrl { .. }
|
||||
| InvalidPath { .. }
|
||||
@@ -535,7 +564,8 @@ impl ErrorExt for Error {
|
||||
| DatabaseNotFound { .. }
|
||||
| MissingNodeId { .. }
|
||||
| MissingMetasrvOpts { .. }
|
||||
| ColumnNoneDefaultValue { .. } => StatusCode::InvalidArguments,
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| ParseUrl { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
// TODO(yingwen): Further categorize http error.
|
||||
StartServer { .. }
|
||||
@@ -543,6 +573,7 @@ impl ErrorExt for Error {
|
||||
| TcpBind { .. }
|
||||
| StartGrpc { .. }
|
||||
| CreateDir { .. }
|
||||
| RemoveDir { .. }
|
||||
| InsertSystemCatalog { .. }
|
||||
| RenameTable { .. }
|
||||
| Catalog { .. }
|
||||
@@ -550,7 +581,10 @@ impl ErrorExt for Error {
|
||||
| BuildParquetRecordBatchStream { .. }
|
||||
| InvalidSchema { .. }
|
||||
| ParseDataTypes { .. }
|
||||
| IncorrectInternalState { .. } => StatusCode::Internal,
|
||||
| IncorrectInternalState { .. }
|
||||
| ShutdownServer { .. }
|
||||
| ShutdownInstance { .. }
|
||||
| CloseTableEngine { .. } => StatusCode::Internal,
|
||||
|
||||
BuildBackend { .. }
|
||||
| InitBackend { .. }
|
||||
@@ -561,7 +595,6 @@ impl ErrorExt for Error {
|
||||
| WriteObject { .. }
|
||||
| ListObjects { .. } => StatusCode::StorageUnavailable,
|
||||
OpenLogStore { source } => source.status_code(),
|
||||
StartScriptManager { source } => source.status_code(),
|
||||
OpenStorageEngine { source } => source.status_code(),
|
||||
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
MetaClientInit { source, .. } => source.status_code(),
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
||||
use catalog::{region_number, CatalogManagerRef};
|
||||
use catalog::{datanode_stat, CatalogManagerRef};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use meta_client::client::{HeartbeatSender, MetaClient};
|
||||
use snafu::ResultExt;
|
||||
@@ -106,13 +106,7 @@ impl HeartbeatTask {
|
||||
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
||||
common_runtime::spawn_bg(async move {
|
||||
while running.load(Ordering::Acquire) {
|
||||
let region_num = match region_number(&catalog_manager_clone).await {
|
||||
Ok(region_num) => region_num as i64,
|
||||
Err(e) => {
|
||||
error!("failed to get region number, err: {e:?}");
|
||||
-1
|
||||
}
|
||||
};
|
||||
let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
|
||||
|
||||
let req = HeartbeatRequest {
|
||||
peer: Some(Peer {
|
||||
@@ -120,9 +114,10 @@ impl HeartbeatTask {
|
||||
addr: addr.clone(),
|
||||
}),
|
||||
node_stat: Some(NodeStat {
|
||||
region_num,
|
||||
region_num: region_num as _,
|
||||
..Default::default()
|
||||
}),
|
||||
region_stats,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -144,6 +139,18 @@ impl HeartbeatTask {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
let running = self.running.clone();
|
||||
if running
|
||||
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
|
||||
.is_err()
|
||||
{
|
||||
warn!("Call close heartbeat task multiple times");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves hostname:port address for meta registration
|
||||
|
||||
@@ -20,8 +20,10 @@ use catalog::remote::MetaKvBackend;
|
||||
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::store::state_store::ObjectStateStore;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::logging::info;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
@@ -36,12 +38,14 @@ use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder
|
||||
use object_store::{util, ObjectStore, ObjectStoreBuilder};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use storage::compaction::{CompactionHandler, CompactionSchedulerRef, SimplePicker};
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::{LocalScheduler, SchedulerConfig};
|
||||
use storage::EngineImpl;
|
||||
use store_api::logstore::LogStore;
|
||||
use table::requests::FlushTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProviderRef;
|
||||
use table::Table;
|
||||
@@ -51,14 +55,12 @@ use crate::datanode::{
|
||||
};
|
||||
use crate::error::{
|
||||
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result, ShutdownInstanceSnafu,
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
mod grpc;
|
||||
mod script;
|
||||
pub mod sql;
|
||||
|
||||
pub(crate) type DefaultEngine = MitoEngine<EngineImpl<RaftEngineLogStore>>;
|
||||
@@ -68,18 +70,15 @@ pub struct Instance {
|
||||
pub(crate) query_engine: QueryEngineRef,
|
||||
pub(crate) sql_handler: SqlHandler,
|
||||
pub(crate) catalog_manager: CatalogManagerRef,
|
||||
pub(crate) script_executor: ScriptExecutor,
|
||||
pub(crate) table_id_provider: Option<TableIdProviderRef>,
|
||||
pub(crate) heartbeat_task: Option<HeartbeatTask>,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
|
||||
pub type InstanceRef = Arc<Instance>;
|
||||
|
||||
impl Instance {
|
||||
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage).await?;
|
||||
let logstore = Arc::new(create_log_store(&opts.wal).await?);
|
||||
|
||||
let meta_client = match opts.mode {
|
||||
Mode::Standalone => None,
|
||||
Mode::Distributed => {
|
||||
@@ -96,11 +95,22 @@ impl Instance {
|
||||
|
||||
let compaction_scheduler = create_compaction_scheduler(opts);
|
||||
|
||||
Self::new_with(opts, meta_client, compaction_scheduler).await
|
||||
}
|
||||
|
||||
pub(crate) async fn new_with(
|
||||
opts: &DatanodeOptions,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
compaction_scheduler: CompactionSchedulerRef<RaftEngineLogStore>,
|
||||
) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage.store).await?;
|
||||
let log_store = Arc::new(create_log_store(&opts.wal).await?);
|
||||
|
||||
let table_engine = Arc::new(DefaultEngine::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::from(opts),
|
||||
logstore.clone(),
|
||||
log_store.clone(),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
@@ -108,7 +118,7 @@ impl Instance {
|
||||
));
|
||||
|
||||
// create remote catalog manager
|
||||
let (catalog_manager, factory, table_id_provider) = match opts.mode {
|
||||
let (catalog_manager, table_id_provider) = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
if opts.enable_memory_catalog {
|
||||
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
|
||||
@@ -125,11 +135,8 @@ impl Instance {
|
||||
.await
|
||||
.expect("Failed to register numbers");
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
} else {
|
||||
@@ -138,11 +145,9 @@ impl Instance {
|
||||
.await
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
|
||||
(
|
||||
catalog.clone() as CatalogManagerRef,
|
||||
factory,
|
||||
Some(catalog as TableIdProviderRef),
|
||||
)
|
||||
}
|
||||
@@ -156,14 +161,12 @@ impl Instance {
|
||||
client: meta_client.as_ref().unwrap().clone(),
|
||||
}),
|
||||
));
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(catalog as CatalogManagerRef, factory, None)
|
||||
(catalog as CatalogManagerRef, None)
|
||||
}
|
||||
};
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_manager.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let script_executor =
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||
|
||||
let heartbeat_task = match opts.mode {
|
||||
Mode::Standalone => None,
|
||||
@@ -177,6 +180,7 @@ impl Instance {
|
||||
};
|
||||
|
||||
let procedure_manager = create_procedure_manager(&opts.procedure).await?;
|
||||
// Register all procedures.
|
||||
if let Some(procedure_manager) = &procedure_manager {
|
||||
table_engine.register_procedure_loaders(&**procedure_manager);
|
||||
table_procedure::register_procedure_loaders(
|
||||
@@ -185,12 +189,6 @@ impl Instance {
|
||||
table_engine.clone(),
|
||||
&**procedure_manager,
|
||||
);
|
||||
|
||||
// Recover procedures.
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
@@ -198,14 +196,13 @@ impl Instance {
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
procedure_manager,
|
||||
procedure_manager.clone(),
|
||||
),
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
heartbeat_task,
|
||||
table_id_provider,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -217,6 +214,70 @@ impl Instance {
|
||||
if let Some(task) = &self.heartbeat_task {
|
||||
task.start().await?;
|
||||
}
|
||||
|
||||
// Recover procedures after the catalog manager is started, so we can
|
||||
// ensure we can access all tables from the catalog manager.
|
||||
if let Some(procedure_manager) = &self.procedure_manager {
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
if let Some(heartbeat_task) = &self.heartbeat_task {
|
||||
heartbeat_task
|
||||
.close()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
}
|
||||
|
||||
self.flush_tables().await?;
|
||||
|
||||
self.sql_handler
|
||||
.close()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)
|
||||
}
|
||||
|
||||
pub async fn flush_tables(&self) -> Result<()> {
|
||||
info!("going to flush all schemas");
|
||||
let schema_list = self
|
||||
.catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?
|
||||
.expect("Default schema not found")
|
||||
.schema_names()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
let flush_requests = schema_list
|
||||
.into_iter()
|
||||
.map(|schema_name| {
|
||||
SqlRequest::FlushTable(FlushTableRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name,
|
||||
table_name: None,
|
||||
region_number: None,
|
||||
wait: Some(true),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let flush_result = futures::future::try_join_all(
|
||||
flush_requests
|
||||
.into_iter()
|
||||
.map(|request| self.sql_handler.execute(request, QueryContext::arc())),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu);
|
||||
info!("Flushed all tables result: {}", flush_result.is_ok());
|
||||
flush_result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -227,6 +288,10 @@ impl Instance {
|
||||
pub fn catalog_manager(&self) -> &CatalogManagerRef {
|
||||
&self.catalog_manager
|
||||
}
|
||||
|
||||
pub fn query_engine(&self) -> QueryEngineRef {
|
||||
self.query_engine.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn create_compaction_scheduler<S: LogStore>(opts: &DatanodeOptions) -> CompactionSchedulerRef<S> {
|
||||
@@ -244,11 +309,22 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
|
||||
ObjectStoreConfig::Oss { .. } => new_oss_object_store(store_config).await,
|
||||
};
|
||||
|
||||
// Don't enable retry layer when using local file backend.
|
||||
let object_store = if !matches!(store_config, ObjectStoreConfig::File(..)) {
|
||||
object_store.map(|object_store| object_store.layer(RetryLayer::new().with_jitter()))
|
||||
} else {
|
||||
object_store
|
||||
};
|
||||
|
||||
object_store.map(|object_store| {
|
||||
object_store
|
||||
.layer(RetryLayer::new().with_jitter())
|
||||
.layer(MetricsLayer)
|
||||
.layer(LoggingLayer::default())
|
||||
.layer(
|
||||
LoggingLayer::default()
|
||||
// Print the expected error only in DEBUG level.
|
||||
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
|
||||
.with_error_level(Some(log::Level::Debug)),
|
||||
)
|
||||
.layer(TracingLayer)
|
||||
})
|
||||
}
|
||||
@@ -266,18 +342,20 @@ pub(crate) async fn new_oss_object_store(store_config: &ObjectStoreConfig) -> Re
|
||||
);
|
||||
|
||||
let mut builder = OSSBuilder::default();
|
||||
let builder = builder
|
||||
builder
|
||||
.root(&root)
|
||||
.bucket(&oss_config.bucket)
|
||||
.endpoint(&oss_config.endpoint)
|
||||
.access_key_id(&oss_config.access_key_id)
|
||||
.access_key_secret(&oss_config.access_key_secret);
|
||||
|
||||
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?
|
||||
.finish();
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
create_object_store_with_cache(object_store, store_config)
|
||||
}
|
||||
|
||||
fn create_object_store_with_cache(
|
||||
@@ -330,24 +408,27 @@ pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
);
|
||||
|
||||
let mut builder = S3Builder::default();
|
||||
let mut builder = builder
|
||||
builder
|
||||
.root(&root)
|
||||
.bucket(&s3_config.bucket)
|
||||
.access_key_id(&s3_config.access_key_id)
|
||||
.secret_access_key(&s3_config.secret_access_key);
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
}
|
||||
if s3_config.region.is_some() {
|
||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||
builder.region(s3_config.region.as_ref().unwrap());
|
||||
}
|
||||
|
||||
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
create_object_store_with_cache(
|
||||
ObjectStore::new(builder)
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?
|
||||
.finish(),
|
||||
store_config,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
|
||||
@@ -361,16 +442,27 @@ pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
info!("The file storage directory is: {}", &data_dir);
|
||||
|
||||
let atomic_write_dir = format!("{data_dir}/.tmp/");
|
||||
if path::Path::new(&atomic_write_dir).exists() {
|
||||
info!(
|
||||
"Begin to clean temp storage directory: {}",
|
||||
&atomic_write_dir
|
||||
);
|
||||
fs::remove_dir_all(&atomic_write_dir).context(error::RemoveDirSnafu {
|
||||
dir: &atomic_write_dir,
|
||||
})?;
|
||||
info!("Cleaned temp storage directory: {}", &atomic_write_dir);
|
||||
}
|
||||
|
||||
let accessor = FsBuilder::default()
|
||||
.root(&data_dir)
|
||||
.atomic_write_dir(&atomic_write_dir)
|
||||
.build()
|
||||
let mut builder = FsBuilder::default();
|
||||
builder.root(&data_dir).atomic_write_dir(&atomic_write_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
})?
|
||||
.finish();
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
/// Create metasrv client instance and spawn heartbeat loop.
|
||||
@@ -436,7 +528,15 @@ pub(crate) async fn create_procedure_manager(
|
||||
);
|
||||
|
||||
let object_store = new_object_store(&procedure_config.store).await?;
|
||||
let manager_config = ManagerConfig { object_store };
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store));
|
||||
|
||||
Ok(Some(Arc::new(LocalManager::new(manager_config))))
|
||||
let manager_config = ManagerConfig {
|
||||
max_retry_times: procedure_config.max_retry_times,
|
||||
retry_delay: procedure_config.retry_delay,
|
||||
};
|
||||
|
||||
Ok(Some(Arc::new(LocalManager::new(
|
||||
manager_config,
|
||||
state_store,
|
||||
))))
|
||||
}
|
||||
|
||||
@@ -18,15 +18,19 @@ use api::v1::query_request::Query;
|
||||
use api::v1::{CreateDatabaseExpr, DdlRequest, InsertRequest};
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::plan::LogicalPlan;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::statements::statement::Statement;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use table::requests::CreateDatabaseRequest;
|
||||
|
||||
use crate::error::{self, DecodeLogicalPlanSnafu, ExecuteSqlSnafu, Result};
|
||||
use crate::error::{
|
||||
self, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu, ExecuteSqlSnafu, PlanStatementSnafu,
|
||||
Result,
|
||||
};
|
||||
use crate::instance::Instance;
|
||||
|
||||
impl Instance {
|
||||
@@ -49,19 +53,44 @@ impl Instance {
|
||||
.context(DecodeLogicalPlanSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&LogicalPlan::DfPlan(logical_plan))
|
||||
.execute(LogicalPlan::DfPlan(logical_plan), QueryContext::arc())
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
async fn handle_query(&self, query: Query, ctx: QueryContextRef) -> Result<Output> {
|
||||
Ok(match query {
|
||||
match query {
|
||||
Query::Sql(sql) => {
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql).context(ExecuteSqlSnafu)?;
|
||||
self.execute_stmt(stmt, ctx).await?
|
||||
match stmt {
|
||||
// TODO(LFC): Remove SQL execution branch here.
|
||||
// Keep this because substrait can't handle much of SQLs now.
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(stmt, ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(plan, ctx)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
_ => self.execute_stmt(stmt, ctx).await,
|
||||
}
|
||||
}
|
||||
Query::LogicalPlan(plan) => self.execute_logical(plan).await?,
|
||||
})
|
||||
Query::LogicalPlan(plan) => self.execute_logical(plan).await,
|
||||
Query::PromRangeQuery(promql) => {
|
||||
let prom_query = PromQuery {
|
||||
query: promql.query,
|
||||
start: promql.start,
|
||||
end: promql.end,
|
||||
step: promql.step,
|
||||
};
|
||||
self.execute_promql(&prom_query, ctx).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_insert(
|
||||
@@ -98,7 +127,7 @@ impl Instance {
|
||||
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
|
||||
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
|
||||
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
|
||||
DdlExpr::FlushTable(_) => todo!(),
|
||||
DdlExpr::FlushTable(expr) => self.handle_flush_table(expr).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -132,11 +161,23 @@ mod test {
|
||||
};
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::prelude::*;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::test_util::{self, MockInstance};
|
||||
|
||||
async fn exec_selection(instance: &Instance, sql: &str) -> Output {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let engine = instance.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_handle_ddl() {
|
||||
let instance = MockInstance::new("test_handle_ddl").await;
|
||||
@@ -199,22 +240,17 @@ mod test {
|
||||
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(
|
||||
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
|
||||
)
|
||||
.unwrap();
|
||||
let output = instance
|
||||
.execute_sql(
|
||||
"INSERT INTO my_database.my_table (a, b, ts) VALUES ('s', 1, 1672384140000)",
|
||||
QueryContext::arc(),
|
||||
)
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(1)));
|
||||
|
||||
let output = instance
|
||||
.execute_sql(
|
||||
"SELECT ts, a, b FROM my_database.my_table",
|
||||
QueryContext::arc(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let output = exec_selection(instance, "SELECT ts, a, b FROM my_database.my_table").await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
@@ -280,10 +316,7 @@ mod test {
|
||||
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(3)));
|
||||
|
||||
let output = instance
|
||||
.execute_sql("SELECT ts, host, cpu FROM demo", QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = exec_selection(instance, "SELECT ts, host, cpu FROM demo").await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
|
||||
@@ -17,31 +17,28 @@ use std::time::{Duration, SystemTime};
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging::info;
|
||||
use common_telemetry::timer;
|
||||
use datatypes::schema::Schema;
|
||||
use futures::StreamExt;
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::query_engine::StatementHandler;
|
||||
use servers::error as server_error;
|
||||
use servers::prom::PromHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::ast::ObjectName;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::copy::{CopyTable, CopyTableArgument};
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{
|
||||
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
||||
};
|
||||
use table::requests::{CopyDirection, CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
|
||||
|
||||
use crate::error::{self, BumpTableIdSnafu, ExecuteSqlSnafu, Result, TableIdProviderNotFoundSnafu};
|
||||
use crate::error::{
|
||||
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
|
||||
TableIdProviderNotFoundSnafu,
|
||||
};
|
||||
use crate::instance::Instance;
|
||||
use crate::metric;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
use crate::sql::SqlRequest;
|
||||
use crate::metrics;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
impl Instance {
|
||||
pub async fn execute_stmt(
|
||||
@@ -50,50 +47,11 @@ impl Instance {
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) | QueryStatement::Promql(_) => {
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
QueryStatement::Sql(Statement::Insert(insert)) => {
|
||||
let requests = self
|
||||
.sql_handler
|
||||
.insert_to_requests(self.catalog_manager.clone(), *insert, query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
match requests {
|
||||
InsertRequests::Request(request) => {
|
||||
self.sql_handler.execute(request, query_ctx.clone()).await
|
||||
}
|
||||
|
||||
InsertRequests::Stream(mut s) => {
|
||||
let mut rows = 0;
|
||||
while let Some(request) = s.next().await {
|
||||
match self
|
||||
.sql_handler
|
||||
.execute(request?, query_ctx.clone())
|
||||
.await?
|
||||
{
|
||||
Output::AffectedRows(n) => {
|
||||
rows += n;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
Ok(Output::AffectedRows(rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
QueryStatement::Sql(Statement::Delete(delete)) => {
|
||||
let request = SqlRequest::Delete(*delete);
|
||||
self.sql_handler.execute(request, query_ctx).await
|
||||
let request =
|
||||
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
|
||||
.await?;
|
||||
self.sql_handler.insert(request).await
|
||||
}
|
||||
QueryStatement::Sql(Statement::CreateDatabase(create_database)) => {
|
||||
let request = CreateDatabaseRequest {
|
||||
@@ -132,6 +90,9 @@ impl Instance {
|
||||
.execute(SqlRequest::CreateTable(request), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::CreateExternalTable(_create_external_table)) => {
|
||||
unimplemented!()
|
||||
}
|
||||
QueryStatement::Sql(Statement::Alter(alter_table)) => {
|
||||
let name = alter_table.table_name().clone();
|
||||
let (catalog, schema, table) = table_idents_to_full_name(&name, query_ctx.clone())?;
|
||||
@@ -163,106 +124,86 @@ impl Instance {
|
||||
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Explain(explain)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::Explain(Box::new(explain)), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::DescribeTable(describe_table)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::DescribeTable(describe_table), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||
}
|
||||
QueryStatement::Sql(Statement::Use(ref schema)) => {
|
||||
let catalog = &query_ctx.current_catalog();
|
||||
ensure!(
|
||||
self.is_valid_schema(catalog, schema)?,
|
||||
error::DatabaseNotFoundSnafu { catalog, schema }
|
||||
);
|
||||
|
||||
query_ctx.set_current_schema(schema);
|
||||
|
||||
Ok(Output::RecordBatches(RecordBatches::empty()))
|
||||
}
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(copy_table.table_name(), query_ctx.clone())?;
|
||||
let file_name = copy_table.file_name().to_string();
|
||||
|
||||
let req = CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
file_name,
|
||||
};
|
||||
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let req = CopyTableFromRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
connection: copy_table.connection,
|
||||
pattern: copy_table.pattern,
|
||||
from: copy_table.from,
|
||||
};
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTableFrom(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
},
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => self.execute_tql(tql, query_ctx).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => {
|
||||
let req = match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Export,
|
||||
}
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Import,
|
||||
}
|
||||
}
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ExecuteSqlSnafu)?;
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&logical_plan)
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
Tql::Explain(_explain) => {
|
||||
todo!("waiting for promql-parser ast adding a explain node")
|
||||
}
|
||||
QueryStatement::Sql(Statement::Query(_))
|
||||
| QueryStatement::Sql(Statement::Explain(_))
|
||||
| QueryStatement::Sql(Statement::Use(_))
|
||||
| QueryStatement::Sql(Statement::Tql(_))
|
||||
| QueryStatement::Sql(Statement::Delete(_))
|
||||
| QueryStatement::Sql(Statement::DescribeTable(_))
|
||||
| QueryStatement::Promql(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute_sql(&self, sql: &str, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).context(ExecuteSqlSnafu)?;
|
||||
self.execute_stmt(stmt, query_ctx).await
|
||||
}
|
||||
|
||||
pub async fn execute_promql(
|
||||
&self,
|
||||
promql: &PromQuery,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
|
||||
let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?;
|
||||
self.execute_stmt(stmt, query_ctx).await
|
||||
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
// TODO(ruihang): merge this and `execute_promql` after #951 landed
|
||||
@@ -291,7 +232,17 @@ impl Instance {
|
||||
eval_stmt.lookback_delta = lookback
|
||||
}
|
||||
}
|
||||
self.execute_stmt(stmt, query_ctx).await
|
||||
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,64 +278,23 @@ pub fn table_idents_to_full_name(
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SqlQueryHandler for Instance {
|
||||
type Error = error::Error;
|
||||
|
||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||
// we assume sql string has only 1 statement in datanode
|
||||
let result = self.execute_sql(query, query_ctx).await;
|
||||
vec![result]
|
||||
}
|
||||
|
||||
async fn do_promql_query(
|
||||
impl StatementHandler for Instance {
|
||||
async fn handle_statement(
|
||||
&self,
|
||||
query: &PromQuery,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Vec<Result<Output>> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
let result = self.execute_promql(query, query_ctx).await;
|
||||
vec![result]
|
||||
}
|
||||
|
||||
async fn do_statement_query(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||
self.execute_stmt(QueryStatement::Sql(stmt), query_ctx)
|
||||
) -> query::error::Result<Output> {
|
||||
self.execute_stmt(stmt, query_ctx)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_describe(
|
||||
&self,
|
||||
stmt: Statement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Option<Schema>> {
|
||||
if let Statement::Query(_) = stmt {
|
||||
self.query_engine
|
||||
.describe(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.map(Some)
|
||||
.context(error::DescribeStatementSnafu)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_valid_schema(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.catalog_manager
|
||||
.schema(catalog, schema)
|
||||
.map(|s| s.is_some())
|
||||
.context(error::CatalogSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(QueryExecutionSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PromHandler for Instance {
|
||||
async fn do_query(&self, query: &PromQuery) -> server_error::Result<Output> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
|
||||
self.execute_promql(query, QueryContext::arc())
|
||||
.await
|
||||
|
||||
@@ -13,14 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(trait_upcasting)]
|
||||
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
mod heartbeat;
|
||||
pub mod instance;
|
||||
mod metric;
|
||||
pub mod metrics;
|
||||
mod mock;
|
||||
mod script;
|
||||
pub mod server;
|
||||
pub mod sql;
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -15,6 +15,4 @@
|
||||
//! datanode metrics
|
||||
|
||||
pub const METRIC_HANDLE_SQL_ELAPSED: &str = "datanode.handle_sql_elapsed";
|
||||
pub const METRIC_HANDLE_SCRIPTS_ELAPSED: &str = "datanode.handle_scripts_elapsed";
|
||||
pub const METRIC_RUN_SCRIPT_ELAPSED: &str = "datanode.run_script_elapsed";
|
||||
pub const METRIC_HANDLE_PROMQL_ELAPSED: &str = "datanode.handle_promql_elapsed";
|
||||
@@ -12,32 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_srv::mocks::MockInfo;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
|
||||
use crate::datanode::DatanodeOptions;
|
||||
use crate::error::{CatalogSnafu, RecoverProcedureSnafu, Result};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::instance::{
|
||||
create_log_store, create_procedure_manager, new_object_store, DefaultEngine, Instance,
|
||||
};
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::error::Result;
|
||||
use crate::instance::Instance;
|
||||
|
||||
impl Instance {
|
||||
pub async fn with_mock_meta_client(opts: &DatanodeOptions) -> Result<Self> {
|
||||
@@ -46,98 +29,9 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn with_mock_meta_server(opts: &DatanodeOptions, meta_srv: MockInfo) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage).await?;
|
||||
let logstore = Arc::new(create_log_store(&opts.wal).await?);
|
||||
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(DefaultEngine::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
logstore.clone(),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
|
||||
// By default, catalog manager and factory are created in standalone mode
|
||||
let (catalog_manager, factory, heartbeat_task) = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
let catalog = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(catalog as CatalogManagerRef, factory, None)
|
||||
}
|
||||
Mode::Distributed => {
|
||||
let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
|
||||
table_engine.clone(),
|
||||
opts.node_id.unwrap_or(42),
|
||||
Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
}),
|
||||
));
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
opts.node_id.unwrap_or(42),
|
||||
opts.rpc_addr.clone(),
|
||||
None,
|
||||
meta_client.clone(),
|
||||
catalog.clone(),
|
||||
);
|
||||
(catalog as CatalogManagerRef, factory, Some(heartbeat_task))
|
||||
}
|
||||
};
|
||||
let query_engine = factory.query_engine();
|
||||
let script_executor =
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||
|
||||
let procedure_manager = create_procedure_manager(&opts.procedure).await?;
|
||||
if let Some(procedure_manager) = &procedure_manager {
|
||||
table_engine.register_procedure_loaders(&**procedure_manager);
|
||||
// Recover procedures.
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
query_engine: query_engine.clone(),
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
procedure_manager,
|
||||
),
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
table_id_provider: Some(Arc::new(LocalTableIdProvider::default())),
|
||||
heartbeat_task,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct LocalTableIdProvider {
|
||||
inner: Arc<AtomicU32>,
|
||||
}
|
||||
|
||||
impl Default for LocalTableIdProvider {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(AtomicU32::new(MIN_USER_TABLE_ID)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableIdProvider for LocalTableIdProvider {
|
||||
async fn next_table_id(&self) -> table::Result<TableId> {
|
||||
Ok(self.inner.fetch_add(1, Ordering::Relaxed))
|
||||
Instance::new_with(opts, Some(meta_client), compaction_scheduler).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,20 +17,18 @@ use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_runtime::Builder as RuntimeBuilder;
|
||||
use common_telemetry::tracing::log::info;
|
||||
use servers::error::Error::InternalIo;
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
|
||||
use servers::http::{HttpServer, HttpServerBuilder};
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
|
||||
use servers::server::Server;
|
||||
use servers::tls::TlsOption;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use tokio::select;
|
||||
|
||||
use crate::datanode::DatanodeOptions;
|
||||
use crate::error::Error::StartServer;
|
||||
use crate::error::{ParseAddrSnafu, Result, RuntimeResourceSnafu, StartServerSnafu};
|
||||
use crate::error::{
|
||||
ParseAddrSnafu, Result, RuntimeResourceSnafu, ShutdownServerSnafu, StartServerSnafu,
|
||||
};
|
||||
use crate::instance::InstanceRef;
|
||||
|
||||
pub mod grpc;
|
||||
@@ -38,7 +36,7 @@ pub mod grpc;
|
||||
/// All rpc services.
|
||||
pub struct Services {
|
||||
grpc_server: GrpcServer,
|
||||
mysql_server: Option<Box<dyn Server>>,
|
||||
http_server: HttpServer,
|
||||
}
|
||||
|
||||
impl Services {
|
||||
@@ -51,48 +49,15 @@ impl Services {
|
||||
.context(RuntimeResourceSnafu)?,
|
||||
);
|
||||
|
||||
let mysql_server = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
info!("Disable MySQL server on datanode when running in standalone mode");
|
||||
None
|
||||
}
|
||||
Mode::Distributed => {
|
||||
let mysql_io_runtime = Arc::new(
|
||||
RuntimeBuilder::default()
|
||||
.worker_threads(opts.mysql_runtime_size)
|
||||
.thread_name("mysql-io-handlers")
|
||||
.build()
|
||||
.context(RuntimeResourceSnafu)?,
|
||||
);
|
||||
let tls = TlsOption::default();
|
||||
// default tls config returns None
|
||||
// but try to think a better way to do this
|
||||
Some(MysqlServer::create_server(
|
||||
mysql_io_runtime,
|
||||
Arc::new(MysqlSpawnRef::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
||||
None,
|
||||
)),
|
||||
Arc::new(MysqlSpawnConfig::new(
|
||||
tls.should_force_tls(),
|
||||
tls.setup()
|
||||
.map_err(|e| StartServer {
|
||||
source: InternalIo { source: e },
|
||||
})?
|
||||
.map(Arc::new),
|
||||
false,
|
||||
)),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
grpc_server: GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance),
|
||||
None,
|
||||
grpc_runtime,
|
||||
),
|
||||
mysql_server,
|
||||
http_server: HttpServerBuilder::new(opts.http_opts.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.build(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -100,19 +65,27 @@ impl Services {
|
||||
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.rpc_addr,
|
||||
})?;
|
||||
let http_addr = opts.http_opts.addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.http_opts.addr,
|
||||
})?;
|
||||
let grpc = self.grpc_server.start(grpc_addr);
|
||||
let http = self.http_server.start(http_addr);
|
||||
select!(
|
||||
v = grpc => v.context(StartServerSnafu)?,
|
||||
v = http => v.context(StartServerSnafu)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
let mut res = vec![self.grpc_server.start(grpc_addr)];
|
||||
if let Some(mysql_server) = &self.mysql_server {
|
||||
let mysql_addr = &opts.mysql_addr;
|
||||
let mysql_addr: SocketAddr = mysql_addr
|
||||
.parse()
|
||||
.context(ParseAddrSnafu { addr: mysql_addr })?;
|
||||
res.push(mysql_server.start(mysql_addr));
|
||||
};
|
||||
|
||||
futures::future::try_join_all(res)
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
self.grpc_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(StartServerSnafu)?;
|
||||
.context(ShutdownServerSnafu)?;
|
||||
self.http_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr};
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, FlushTableExpr};
|
||||
use common_grpc_expr::{alter_expr_to_request, create_expr_to_request};
|
||||
use common_query::Output;
|
||||
use common_telemetry::info;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use table::requests::DropTableRequest;
|
||||
use table::requests::{DropTableRequest, FlushTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
AlterExprToRequestSnafu, BumpTableIdSnafu, CreateExprToRequestSnafu,
|
||||
@@ -82,6 +82,25 @@ impl Instance {
|
||||
.execute(SqlRequest::DropTable(req), QueryContext::arc())
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_flush_table(&self, expr: FlushTableExpr) -> Result<Output> {
|
||||
let table_name = if expr.table_name.trim().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(expr.table_name)
|
||||
};
|
||||
|
||||
let req = FlushTableRequest {
|
||||
catalog_name: expr.catalog_name,
|
||||
schema_name: expr.schema_name,
|
||||
table_name,
|
||||
region_number: expr.region_id,
|
||||
wait: None,
|
||||
};
|
||||
self.sql_handler()
|
||||
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -136,7 +155,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
fn test_create_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "a".to_string(),
|
||||
|
||||
@@ -13,53 +13,48 @@
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use query::query_engine::QueryEngineRef;
|
||||
use query::sql::{describe_table, explain, show_databases, show_tables};
|
||||
use query::sql::{show_databases, show_tables};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu};
|
||||
use crate::error::{
|
||||
CloseTableEngineSnafu, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
|
||||
mod alter;
|
||||
mod copy_table;
|
||||
mod copy_table_from;
|
||||
mod copy_table_to;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
mod flush_table;
|
||||
pub(crate) mod insert;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SqlRequest {
|
||||
Insert(InsertRequest),
|
||||
CreateTable(CreateTableRequest),
|
||||
CreateDatabase(CreateDatabaseRequest),
|
||||
Alter(AlterTableRequest),
|
||||
DropTable(DropTableRequest),
|
||||
FlushTable(FlushTableRequest),
|
||||
ShowDatabases(ShowDatabases),
|
||||
ShowTables(ShowTables),
|
||||
DescribeTable(DescribeTable),
|
||||
Explain(Box<Explain>),
|
||||
Delete(Delete),
|
||||
CopyTable(CopyTableRequest),
|
||||
CopyTableFrom(CopyTableFromRequest),
|
||||
}
|
||||
|
||||
// Handler to execute SQL except query
|
||||
#[derive(Clone)]
|
||||
pub struct SqlHandler {
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
@@ -68,14 +63,12 @@ impl SqlHandler {
|
||||
pub fn new(
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
catalog_manager,
|
||||
query_engine,
|
||||
engine_procedure,
|
||||
procedure_manager,
|
||||
}
|
||||
@@ -87,14 +80,14 @@ impl SqlHandler {
|
||||
// there, instead of executing here in a "static" fashion.
|
||||
pub async fn execute(&self, request: SqlRequest, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let result = match request {
|
||||
SqlRequest::Insert(req) => self.insert(req).await,
|
||||
SqlRequest::CreateTable(req) => self.create_table(req).await,
|
||||
SqlRequest::CreateDatabase(req) => self.create_database(req, query_ctx.clone()).await,
|
||||
SqlRequest::Alter(req) => self.alter(req).await,
|
||||
SqlRequest::DropTable(req) => self.drop_table(req).await,
|
||||
SqlRequest::Delete(req) => self.delete(query_ctx.clone(), req).await,
|
||||
SqlRequest::CopyTable(req) => self.copy_table(req).await,
|
||||
SqlRequest::CopyTableFrom(req) => self.copy_table_from(req).await,
|
||||
SqlRequest::CopyTable(req) => match req.direction {
|
||||
CopyDirection::Export => self.copy_table_to(req).await,
|
||||
CopyDirection::Import => self.copy_table_from(req).await,
|
||||
},
|
||||
SqlRequest::ShowDatabases(req) => {
|
||||
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
@@ -102,22 +95,7 @@ impl SqlHandler {
|
||||
show_tables(req, self.catalog_manager.clone(), query_ctx.clone())
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::DescribeTable(req) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(req.name(), query_ctx.clone())?;
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&catalog, &schema, &table)
|
||||
.await
|
||||
.context(error::CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: req.name().to_string(),
|
||||
})?;
|
||||
describe_table(table).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::Explain(req) => explain(req, self.query_engine.clone(), query_ctx.clone())
|
||||
.await
|
||||
.context(ExecuteSqlSnafu),
|
||||
SqlRequest::FlushTable(req) => self.flush_table(req).await,
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
error!(e; "{query_ctx}");
|
||||
@@ -139,240 +117,12 @@ impl SqlHandler {
|
||||
pub fn table_engine(&self) -> TableEngineRef {
|
||||
self.table_engine.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use futures::StreamExt;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use session::context::QueryContext;
|
||||
use sql::statements::statement::Statement;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::error::Result as TableResult;
|
||||
use table::metadata::TableInfoRef;
|
||||
use table::Table;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
|
||||
struct DemoTable;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for DemoTable {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
.with_time_index(true),
|
||||
];
|
||||
|
||||
Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> TableResult<PhysicalPlanRef> {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_statement_to_request() {
|
||||
let dir = create_temp_dir("setup_test_engine_and_table");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = Builder::default().root(&store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let sql = r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#;
|
||||
|
||||
let table_engine = Arc::new(MitoEngine::<EngineImpl<NoopLogStore>>::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
|
||||
let catalog_list = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
catalog_list.start().await.unwrap();
|
||||
assert!(catalog_list
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
table_id: 1,
|
||||
table: Arc::new(DemoTable),
|
||||
})
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
self.table_engine
|
||||
.close()
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_list.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let sql_handler = SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_list.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
None,
|
||||
);
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Request(SqlRequest::Insert(req)) => {
|
||||
assert_eq!(req.table_name, "demo");
|
||||
let columns_values = req.columns_values;
|
||||
assert_eq!(4, columns_values.len());
|
||||
|
||||
let hosts = &columns_values["host"];
|
||||
assert_eq!(2, hosts.len());
|
||||
assert_eq!(Value::from("host1"), hosts.get(0));
|
||||
assert_eq!(Value::from("host2"), hosts.get(1));
|
||||
|
||||
let cpus = &columns_values["cpu"];
|
||||
assert_eq!(2, cpus.len());
|
||||
assert_eq!(Value::from(66.6f64), cpus.get(0));
|
||||
assert_eq!(Value::from(88.8f64), cpus.get(1));
|
||||
|
||||
let memories = &columns_values["memory"];
|
||||
assert_eq!(2, memories.len());
|
||||
assert_eq!(Value::from(1024f64), memories.get(0));
|
||||
assert_eq!(Value::from(333.3f64), memories.get(1));
|
||||
|
||||
let ts = &columns_values["ts"];
|
||||
assert_eq!(2, ts.len());
|
||||
assert_eq!(
|
||||
Value::from(Timestamp::new_millisecond(1655276557000i64)),
|
||||
ts.get(0)
|
||||
);
|
||||
assert_eq!(
|
||||
Value::from(Timestamp::new_millisecond(1655276558000i64)),
|
||||
ts.get(1)
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
panic!("Not supposed to reach here")
|
||||
}
|
||||
}
|
||||
|
||||
// test inert into select
|
||||
|
||||
// type mismatch
|
||||
let sql = "insert into demo(ts) select number from numbers limit 3";
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Stream(mut stream) => {
|
||||
assert!(matches!(
|
||||
stream.next().await.unwrap().unwrap_err(),
|
||||
Error::ColumnTypeMismatch { .. }
|
||||
));
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let sql = "insert into demo(cpu) select cast(number as double) from numbers limit 3";
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Stream(mut stream) => {
|
||||
let mut times = 0;
|
||||
while let Some(Ok(SqlRequest::Insert(req))) = stream.next().await {
|
||||
times += 1;
|
||||
assert_eq!(req.table_name, "demo");
|
||||
let columns_values = req.columns_values;
|
||||
assert_eq!(1, columns_values.len());
|
||||
|
||||
let memories = &columns_values["cpu"];
|
||||
assert_eq!(3, memories.len());
|
||||
assert_eq!(Value::from(0.0f64), memories.get(0));
|
||||
assert_eq!(Value::from(1.0f64), memories.get(1));
|
||||
assert_eq!(Value::from(2.0f64), memories.get(2));
|
||||
}
|
||||
assert_eq!(1, times);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
.map_err(BoxedError::new)
|
||||
.context(CloseTableEngineSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,150 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::pin::Pin;
|
||||
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
use datafusion::parquet::arrow::ArrowWriter;
|
||||
use datafusion::parquet::basic::{Compression, Encoding};
|
||||
use datafusion::parquet::file::properties::WriterProperties;
|
||||
use datafusion::physical_plan::RecordBatchStream;
|
||||
use futures::TryStreamExt;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let stream = table
|
||||
.scan(None, &[], None)
|
||||
.await
|
||||
.with_context(|_| error::CopyTableSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let stream = stream
|
||||
.execute(0, SessionContext::default().task_ctx())
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
||||
|
||||
let accessor = Builder::default()
|
||||
.root("/")
|
||||
.build()
|
||||
.context(error::BuildBackendSnafu)?;
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
|
||||
let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
|
||||
// TODO(jiachun):
|
||||
// For now, COPY is implemented synchronously.
|
||||
// When copying large table, it will be blocked for a long time.
|
||||
// Maybe we should make "copy" runs in background?
|
||||
// Like PG: https://www.postgresql.org/docs/current/sql-copy.html
|
||||
let rows = parquet_writer.flush().await?;
|
||||
|
||||
Ok(Output::AffectedRows(rows))
|
||||
}
|
||||
}
|
||||
|
||||
type DfRecordBatchStream = Pin<Box<DfRecordBatchStreamAdapter>>;
|
||||
|
||||
struct ParquetWriter {
|
||||
file_name: String,
|
||||
stream: DfRecordBatchStream,
|
||||
object_store: ObjectStore,
|
||||
max_row_group_size: usize,
|
||||
max_rows_in_segment: usize,
|
||||
}
|
||||
|
||||
impl ParquetWriter {
|
||||
pub fn new(file_name: String, stream: DfRecordBatchStream, object_store: ObjectStore) -> Self {
|
||||
Self {
|
||||
file_name,
|
||||
stream,
|
||||
object_store,
|
||||
// TODO(jiachun): make these configurable: WITH (max_row_group_size=xxx, max_rows_in_segment=xxx)
|
||||
max_row_group_size: 4096,
|
||||
max_rows_in_segment: 5000000, // default 5M rows per segment
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn flush(&mut self) -> Result<usize> {
|
||||
let schema = self.stream.as_ref().schema();
|
||||
let writer_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD)
|
||||
.set_encoding(Encoding::PLAIN)
|
||||
.set_max_row_group_size(self.max_row_group_size)
|
||||
.build();
|
||||
let mut total_rows = 0;
|
||||
loop {
|
||||
let mut buf = vec![];
|
||||
let mut arrow_writer =
|
||||
ArrowWriter::try_new(&mut buf, schema.clone(), Some(writer_props.clone()))
|
||||
.context(error::WriteParquetSnafu)?;
|
||||
|
||||
let mut rows = 0;
|
||||
let mut end_loop = true;
|
||||
// TODO(hl & jiachun): Since OpenDAL's writer is async and ArrowWriter requires a `std::io::Write`,
|
||||
// here we use a Vec<u8> to buffer all parquet bytes in memory and write to object store
|
||||
// at a time. Maybe we should find a better way to bridge ArrowWriter and OpenDAL's object.
|
||||
while let Some(batch) = self
|
||||
.stream
|
||||
.try_next()
|
||||
.await
|
||||
.context(error::PollStreamSnafu)?
|
||||
{
|
||||
arrow_writer
|
||||
.write(&batch)
|
||||
.context(error::WriteParquetSnafu)?;
|
||||
rows += batch.num_rows();
|
||||
if rows >= self.max_rows_in_segment {
|
||||
end_loop = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let start_row_num = total_rows + 1;
|
||||
total_rows += rows;
|
||||
arrow_writer.close().context(error::WriteParquetSnafu)?;
|
||||
|
||||
// if rows == 0, we just end up with an empty file.
|
||||
//
|
||||
// file_name like:
|
||||
// "file_name_1_1000000" (row num: 1 ~ 1000000),
|
||||
// "file_name_1000001_xxx" (row num: 1000001 ~ xxx)
|
||||
let file_name = format!("{}_{}_{}", self.file_name, start_row_num, total_rows);
|
||||
let object = self.object_store.object(&file_name);
|
||||
object.write(buf).await.context(error::WriteObjectSnafu {
|
||||
path: object.path(),
|
||||
})?;
|
||||
|
||||
if end_loop {
|
||||
return Ok(total_rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,35 +15,26 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_compat::CompatExt;
|
||||
use common_datasource::lister::{Lister, Source};
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_datasource::util::find_dir_and_filename;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::DataTypesSnafu;
|
||||
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
|
||||
use datatypes::arrow::record_batch::RecordBatch;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use futures::future;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::{Object, ObjectStore, ObjectStoreBuilder};
|
||||
use regex::Regex;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{CopyTableFromRequest, InsertRequest};
|
||||
use table::requests::{CopyTableRequest, InsertRequest};
|
||||
use tokio::io::BufReader;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
const S3_SCHEMA: &str = "S3";
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableFromRequest) -> Result<Output> {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
@@ -51,16 +42,37 @@ impl SqlHandler {
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let datasource = DataSource::new(&req.from, req.pattern, req.connection)?;
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
|
||||
let objects = datasource.list().await?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let (dir, filename) = find_dir_and_filename(&path);
|
||||
let regex = req
|
||||
.pattern
|
||||
.as_ref()
|
||||
.map(|x| Regex::new(x))
|
||||
.transpose()
|
||||
.context(error::BuildRegexSnafu)?;
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let lister = Lister::new(object_store.clone(), source, dir, regex);
|
||||
|
||||
let entries = lister.list().await.context(error::ListObjectsSnafu)?;
|
||||
|
||||
let mut buf: Vec<RecordBatch> = Vec::new();
|
||||
|
||||
for obj in objects.iter() {
|
||||
let reader = obj.reader().await.context(error::ReadObjectSnafu {
|
||||
path: &obj.path().to_string(),
|
||||
})?;
|
||||
for entry in entries.iter() {
|
||||
let path = entry.path();
|
||||
let reader = object_store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let buf_reader = BufReader::new(reader.compat());
|
||||
|
||||
@@ -131,315 +143,3 @@ impl SqlHandler {
|
||||
Ok(Output::AffectedRows(result.iter().sum()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
struct DataSource {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl DataSource {
|
||||
fn from_path(url: &str, regex: Option<Regex>) -> Result<DataSource> {
|
||||
let result = if url.ends_with('/') {
|
||||
Url::from_directory_path(url)
|
||||
} else {
|
||||
Url::from_file_path(url)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let path = url.path();
|
||||
|
||||
let (path, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let accessor = Fs::default()
|
||||
.root(&path)
|
||||
.build()
|
||||
.context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(DataSource {
|
||||
object_store: ObjectStore::new(accessor).finish(),
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
Err(()) => error::InvalidPathSnafu {
|
||||
path: url.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_s3_backend(
|
||||
host: Option<&str>,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
if let Some(bucket) = host {
|
||||
builder.bucket(bucket);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
fn from_url(
|
||||
url: Url,
|
||||
regex: Option<Regex>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let host = url.host_str();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let (dir, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => DataSource::build_s3_backend(host, &dir, connection)?,
|
||||
_ => {
|
||||
return error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path: dir,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
url: &str,
|
||||
pattern: Option<String>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let regex = if let Some(pattern) = pattern {
|
||||
let regex = Regex::new(&pattern).context(error::BuildRegexSnafu)?;
|
||||
Some(regex)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => DataSource::from_url(url, regex, connection),
|
||||
Err(err) => {
|
||||
if ParseError::RelativeUrlWithoutBase == err {
|
||||
DataSource::from_path(url, regex)
|
||||
} else {
|
||||
Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Object>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.object("/")
|
||||
.list()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = if let Some(regex) = &self.regex {
|
||||
regex.is_match(f.name())
|
||||
} else {
|
||||
true
|
||||
};
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
let obj = self.object_store.object(filename);
|
||||
|
||||
Ok(vec![obj])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = DataSource::find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
63
src/datanode/src/sql/copy_table_to.rs
Normal file
63
src/datanode/src/sql/copy_table_to.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_datasource;
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use snafu::ResultExt;
|
||||
use storage::sst::SstInfo;
|
||||
use storage::{ParquetWriter, Source};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
|
||||
use crate::error::{self, Result, WriteParquetSnafu};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table_to(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let stream = table
|
||||
.scan(None, &[], None)
|
||||
.await
|
||||
.with_context(|_| error::CopyTableSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let stream = stream
|
||||
.execute(0, SessionContext::default().task_ctx())
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let writer = ParquetWriter::new(&path, Source::Stream(stream), object_store);
|
||||
|
||||
let rows_copied = writer
|
||||
.write_sst(&storage::sst::WriteOptions::default())
|
||||
.await
|
||||
.context(WriteParquetSnafu)?
|
||||
.map(|SstInfo { num_rows, .. }| num_rows)
|
||||
.unwrap_or(0);
|
||||
|
||||
Ok(Output::AffectedRows(rows_copied))
|
||||
}
|
||||
}
|
||||
@@ -313,13 +313,15 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::Schema;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::tests::test_util::create_mock_sql_handler;
|
||||
use crate::tests::test_util::{create_mock_sql_handler, MockInstance};
|
||||
|
||||
fn sql_to_statement(sql: &str) -> CreateTable {
|
||||
let mut res = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
|
||||
@@ -522,4 +524,42 @@ mod tests {
|
||||
schema.column_schema_by_name("memory").unwrap().data_type
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn create_table_by_procedure() {
|
||||
let instance = MockInstance::with_procedure_enabled("create_table_by_procedure").await;
|
||||
|
||||
let sql = r#"create table test_table(
|
||||
host string,
|
||||
ts timestamp,
|
||||
cpu double default 0,
|
||||
memory double,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);"#;
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let output = instance
|
||||
.inner()
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
// create if not exists
|
||||
let sql = r#"create table if not exists test_table(
|
||||
host string,
|
||||
ts timestamp,
|
||||
cpu double default 0,
|
||||
memory double,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);"#;
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let output = instance
|
||||
.inner()
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::vectors::StringVector;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::ast::{BinaryOperator, Expr, Value};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::sql_value_to_value;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::DeleteRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{ColumnNotFoundSnafu, DeleteSnafu, InvalidSqlSnafu, NotSupportSqlSnafu, Result};
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn delete(&self, query_ctx: QueryContextRef, stmt: Delete) -> Result<Output> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
|
||||
let table_ref = TableReference {
|
||||
catalog: &catalog_name.to_string(),
|
||||
schema: &schema_name.to_string(),
|
||||
table: &table_name.to_string(),
|
||||
};
|
||||
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let req = DeleteRequest {
|
||||
key_column_values: parse_selection(stmt.selection(), &table)?,
|
||||
};
|
||||
|
||||
let affected_rows = table.delete(req).await.with_context(|_| DeleteSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
Ok(Output::AffectedRows(affected_rows))
|
||||
}
|
||||
}
|
||||
|
||||
/// parse selection, currently supported format is `tagkey1 = 'tagvalue1' and 'ts' = 'value'`.
|
||||
/// (only uses =, and in the where clause and provides all columns needed by the key.)
|
||||
fn parse_selection(
|
||||
selection: &Option<Expr>,
|
||||
table: &TableRef,
|
||||
) -> Result<HashMap<String, VectorRef>> {
|
||||
let mut key_column_values = HashMap::new();
|
||||
if let Some(expr) = selection {
|
||||
parse_expr(expr, &mut key_column_values, table)?;
|
||||
}
|
||||
Ok(key_column_values)
|
||||
}
|
||||
|
||||
fn parse_expr(
|
||||
expr: &Expr,
|
||||
key_column_values: &mut HashMap<String, VectorRef>,
|
||||
table: &TableRef,
|
||||
) -> Result<()> {
|
||||
// match BinaryOp
|
||||
if let Expr::BinaryOp { left, op, right } = expr {
|
||||
match (&**left, op, &**right) {
|
||||
// match And operator
|
||||
(Expr::BinaryOp { .. }, BinaryOperator::And, Expr::BinaryOp { .. }) => {
|
||||
parse_expr(left, key_column_values, table)?;
|
||||
parse_expr(right, key_column_values, table)?;
|
||||
return Ok(());
|
||||
}
|
||||
// match Eq operator
|
||||
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Value(value)) => {
|
||||
key_column_values.insert(
|
||||
column_name.to_string(),
|
||||
value_to_vector(&column_name.to_string(), value, table)?,
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Identifier(value)) => {
|
||||
key_column_values.insert(
|
||||
column_name.to_string(),
|
||||
Arc::new(StringVector::from(vec![value.to_string()])),
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
NotSupportSqlSnafu {
|
||||
msg: format!(
|
||||
"Not support sql expr:{expr},correct format is tagkey1 = tagvalue1 and ts = value"
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// parse value to vector
|
||||
fn value_to_vector(column_name: &String, sql_value: &Value, table: &TableRef) -> Result<VectorRef> {
|
||||
let schema = table.schema();
|
||||
let column_schema =
|
||||
schema
|
||||
.column_schema_by_name(column_name)
|
||||
.with_context(|| ColumnNotFoundSnafu {
|
||||
table_name: table.table_info().name.clone(),
|
||||
column_name: column_name.to_string(),
|
||||
})?;
|
||||
let data_type = &column_schema.data_type;
|
||||
let value = sql_value_to_value(column_name, data_type, sql_value);
|
||||
match value {
|
||||
Ok(value) => {
|
||||
let mut vec = data_type.create_mutable_vector(1);
|
||||
if vec.try_push_value_ref(value.as_value_ref()).is_err() {
|
||||
return InvalidSqlSnafu {
|
||||
msg: format!(
|
||||
"invalid sql, column name is {column_name}, value is {sql_value}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Ok(vec.to_vector())
|
||||
}
|
||||
_ => InvalidSqlSnafu {
|
||||
msg: format!("invalid sql, column name is {column_name}, value is {sql_value}",),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
65
src/datanode/src/sql/flush_table.rs
Normal file
65
src/datanode/src/sql/flush_table.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::SchemaProviderRef;
|
||||
use common_query::Output;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::requests::FlushTableRequest;
|
||||
|
||||
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(schema, table, req.region_number, req.wait)
|
||||
.await?;
|
||||
} else {
|
||||
let all_table_names = schema.table_names().context(CatalogSnafu)?;
|
||||
futures::future::join_all(all_table_names.iter().map(|table| {
|
||||
self.flush_table_inner(schema.clone(), table, req.region_number, req.wait)
|
||||
}))
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
}
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
async fn flush_table_inner(
|
||||
&self,
|
||||
schema: SchemaProviderRef,
|
||||
table_name: &str,
|
||||
region: Option<u32>,
|
||||
wait: Option<bool>,
|
||||
) -> Result<()> {
|
||||
schema
|
||||
.table(table_name)
|
||||
.await
|
||||
.context(error::FindTableSnafu { table_name })?
|
||||
.context(error::TableNotFoundSnafu { table_name })?
|
||||
.flush(region, wait)
|
||||
.await
|
||||
.context(error::FlushTableSnafu { table_name })
|
||||
}
|
||||
}
|
||||
@@ -11,49 +11,31 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datafusion_expr::type_coercion::binary::coerce_types;
|
||||
use datafusion_expr::Operator;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::vectors::MutableVector;
|
||||
use futures::stream::{self, StreamExt};
|
||||
use futures::Stream;
|
||||
use query::parser::QueryStatement;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
use sql::statements::insert::Insert;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::{self};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
|
||||
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
|
||||
ExecuteSqlSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu, ParseSqlValueSnafu,
|
||||
Result, TableNotFoundSnafu,
|
||||
CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
|
||||
ColumnValuesNumberMismatchSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||
ParseSqlValueSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler};
|
||||
|
||||
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
|
||||
|
||||
type InsertRequestStream = Pin<Box<dyn Stream<Item = Result<SqlRequest>> + Send>>;
|
||||
pub(crate) enum InsertRequests {
|
||||
// Single request
|
||||
Request(SqlRequest),
|
||||
// Streaming requests
|
||||
Stream(InsertRequestStream),
|
||||
}
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn insert(&self, req: InsertRequest) -> Result<Output> {
|
||||
// FIXME(dennis): table_ref is used in InsertSnafu and the req is consumed
|
||||
@@ -77,7 +59,7 @@ impl SqlHandler {
|
||||
table_ref: TableReference,
|
||||
table: &TableRef,
|
||||
stmt: Insert,
|
||||
) -> Result<SqlRequest> {
|
||||
) -> Result<InsertRequest> {
|
||||
let values = stmt
|
||||
.values_body()
|
||||
.context(ParseSqlValueSnafu)?
|
||||
@@ -129,7 +111,7 @@ impl SqlHandler {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(SqlRequest::Insert(InsertRequest {
|
||||
Ok(InsertRequest {
|
||||
catalog_name: table_ref.catalog.to_string(),
|
||||
schema_name: table_ref.schema.to_string(),
|
||||
table_name: table_ref.table.to_string(),
|
||||
@@ -138,149 +120,14 @@ impl SqlHandler {
|
||||
.map(|(cs, mut b)| (cs.name.to_string(), b.to_vector()))
|
||||
.collect(),
|
||||
region_number: 0,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
fn build_request_from_batch(
|
||||
stmt: Insert,
|
||||
table: TableRef,
|
||||
batch: RecordBatch,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<SqlRequest> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
|
||||
|
||||
let schema = table.schema();
|
||||
let columns: Vec<_> = if stmt.columns().is_empty() {
|
||||
schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.map(|c| c.name.to_string())
|
||||
.collect()
|
||||
} else {
|
||||
stmt.columns().iter().map(|c| (*c).clone()).collect()
|
||||
};
|
||||
let columns_num = columns.len();
|
||||
|
||||
ensure!(
|
||||
batch.num_columns() == columns_num,
|
||||
ColumnValuesNumberMismatchSnafu {
|
||||
columns: columns_num,
|
||||
values: batch.num_columns(),
|
||||
}
|
||||
);
|
||||
|
||||
let batch_schema = &batch.schema;
|
||||
let batch_columns = batch_schema.column_schemas();
|
||||
assert_eq!(batch_columns.len(), columns_num);
|
||||
let mut columns_values = HashMap::with_capacity(columns_num);
|
||||
|
||||
for (i, column_name) in columns.into_iter().enumerate() {
|
||||
let column_schema = schema
|
||||
.column_schema_by_name(&column_name)
|
||||
.with_context(|| ColumnNotFoundSnafu {
|
||||
table_name: &table_name,
|
||||
column_name: &column_name,
|
||||
})?;
|
||||
let expect_datatype = column_schema.data_type.as_arrow_type();
|
||||
// It's safe to retrieve the column schema by index, we already
|
||||
// check columns number is the same above.
|
||||
let batch_datatype = batch_columns[i].data_type.as_arrow_type();
|
||||
let coerced_type = coerce_types(&expect_datatype, &Operator::Eq, &batch_datatype)
|
||||
.map_err(|_| Error::ColumnTypeMismatch {
|
||||
column: column_name.clone(),
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
})?;
|
||||
|
||||
ensure!(
|
||||
expect_datatype == coerced_type,
|
||||
ColumnTypeMismatchSnafu {
|
||||
column: column_name,
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
}
|
||||
);
|
||||
let vector = batch
|
||||
.column(i)
|
||||
.cast(&column_schema.data_type)
|
||||
.map_err(|_| Error::ColumnTypeMismatch {
|
||||
column: column_name.clone(),
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
})?;
|
||||
|
||||
columns_values.insert(column_name, vector);
|
||||
}
|
||||
|
||||
Ok(SqlRequest::Insert(InsertRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
columns_values,
|
||||
region_number: 0,
|
||||
}))
|
||||
}
|
||||
|
||||
// FIXME(dennis): move it to frontend when refactor is done.
|
||||
async fn build_stream_from_query(
|
||||
&self,
|
||||
table: TableRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequestStream> {
|
||||
let query = stmt
|
||||
.query_body()
|
||||
.context(ParseSqlValueSnafu)?
|
||||
.context(MissingInsertBodySnafu)?;
|
||||
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(
|
||||
QueryStatement::Sql(Statement::Query(Box::new(query))),
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
let output = self
|
||||
.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
let stream: InsertRequestStream = match output {
|
||||
Output::RecordBatches(batches) => {
|
||||
Box::pin(stream::iter(batches.take()).map(move |batch| {
|
||||
Self::build_request_from_batch(
|
||||
stmt.clone(),
|
||||
table.clone(),
|
||||
batch,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
Output::Stream(stream) => Box::pin(stream.map(move |batch| {
|
||||
Self::build_request_from_batch(
|
||||
stmt.clone(),
|
||||
table.clone(),
|
||||
batch.context(CollectRecordsSnafu)?,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
})),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
pub(crate) async fn insert_to_requests(
|
||||
&self,
|
||||
pub async fn insert_to_request(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequests> {
|
||||
) -> Result<InsertRequest> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx.clone())?;
|
||||
|
||||
@@ -292,16 +139,8 @@ impl SqlHandler {
|
||||
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
})?;
|
||||
|
||||
if stmt.is_insert_select() {
|
||||
Ok(InsertRequests::Stream(
|
||||
self.build_stream_from_query(table, stmt, query_ctx).await?,
|
||||
))
|
||||
} else {
|
||||
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
|
||||
Ok(InsertRequests::Request(Self::build_request_from_values(
|
||||
table_ref, &table, stmt,
|
||||
)?))
|
||||
}
|
||||
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
|
||||
Self::build_request_from_values(table_ref, &table, stmt)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod instance_test;
|
||||
// TODO(LFC): These tests should be moved to frontend crate. They are actually standalone instance tests.
|
||||
mod promql_test;
|
||||
pub(crate) mod test_util;
|
||||
|
||||
@@ -31,22 +31,14 @@ async fn create_insert_query_assert(
|
||||
expected: &str,
|
||||
) {
|
||||
let instance = setup_test_instance("test_execute_insert").await;
|
||||
let query_ctx = QueryContext::arc();
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(create, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(insert, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
instance.execute_sql(create).await;
|
||||
|
||||
instance.execute_sql(insert).await;
|
||||
|
||||
let query_output = instance
|
||||
.inner()
|
||||
.execute_promql_statement(promql, start, end, interval, lookback, query_ctx)
|
||||
.execute_promql_statement(promql, start, end, interval, lookback, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = String::from(expected);
|
||||
@@ -56,24 +48,12 @@ async fn create_insert_query_assert(
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn create_insert_tql_assert(create: &str, insert: &str, tql: &str, expected: &str) {
|
||||
let instance = setup_test_instance("test_execute_insert").await;
|
||||
let query_ctx = QueryContext::arc();
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(create, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(insert, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
instance.execute_sql(create).await;
|
||||
|
||||
let query_output = instance
|
||||
.inner()
|
||||
.execute_sql(tql, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
instance.execute_sql(insert).await;
|
||||
|
||||
let query_output = instance.execute_sql(tql).await;
|
||||
let expected = String::from(expected);
|
||||
check_unordered_output_stream(query_output, expected).await;
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_query::Output;
|
||||
@@ -22,13 +23,18 @@ use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use mito::config::EngineConfig;
|
||||
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
||||
use query::QueryEngineFactory;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
|
||||
use crate::datanode::{DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig, WalConfig};
|
||||
use crate::datanode::{
|
||||
DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig, StorageConfig, WalConfig,
|
||||
};
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
use crate::instance::Instance;
|
||||
use crate::sql::SqlHandler;
|
||||
@@ -60,6 +66,8 @@ impl MockInstance {
|
||||
store: ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: procedure_dir.path().to_str().unwrap().to_string(),
|
||||
}),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
});
|
||||
|
||||
let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
|
||||
@@ -72,12 +80,42 @@ impl MockInstance {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn inner(&self) -> &Instance {
|
||||
&self.instance
|
||||
pub(crate) async fn execute_sql(&self, sql: &str) -> Output {
|
||||
let engine = self.inner().query_engine();
|
||||
let planner = engine.planner();
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let plan = planner.plan(stmt, QueryContext::arc()).await.unwrap();
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).unwrap();
|
||||
planner.plan(stmt, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
_ => self
|
||||
.inner()
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn data_tmp_dir(&self) -> &TempDir {
|
||||
&self._guard._data_tmp_dir
|
||||
pub(crate) fn inner(&self) -> &Instance {
|
||||
&self.instance
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,9 +132,12 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
|
||||
dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
storage: ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
store: ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
mode: Mode::Standalone,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -164,33 +205,13 @@ pub async fn create_mock_sql_handler() -> SqlHandler {
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let catalog_list = catalog::local::new_memory_catalog_list().unwrap();
|
||||
let factory = QueryEngineFactory::new(catalog_list);
|
||||
|
||||
SqlHandler::new(
|
||||
mock_engine.clone(),
|
||||
catalog_manager,
|
||||
factory.query_engine(),
|
||||
mock_engine,
|
||||
None,
|
||||
)
|
||||
SqlHandler::new(mock_engine.clone(), catalog_manager, mock_engine, None)
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_test_instance(test_name: &str) -> MockInstance {
|
||||
MockInstance::new(test_name).await
|
||||
}
|
||||
|
||||
pub async fn check_output_stream(output: Output, expected: String) {
|
||||
let recordbatches = match output {
|
||||
Output::Stream(stream) => util::collect_batches(stream).await.unwrap(),
|
||||
Output::RecordBatches(recordbatches) => recordbatches,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let pretty_print = recordbatches.pretty_print().unwrap();
|
||||
assert_eq!(pretty_print, expected, "{}", pretty_print);
|
||||
}
|
||||
|
||||
pub async fn check_unordered_output_stream(output: Output, expected: String) {
|
||||
let sort_table = |table: String| -> String {
|
||||
let replaced = table.replace("\\n", "\n");
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use arrow::array::ArrayData;
|
||||
use arrow::bitmap::Bitmap;
|
||||
use arrow::buffer::NullBuffer;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum ValidityKind<'a> {
|
||||
/// Whether the array slot is valid or not (null).
|
||||
Slots {
|
||||
bitmap: &'a Bitmap,
|
||||
bitmap: &'a NullBuffer,
|
||||
len: usize,
|
||||
null_count: usize,
|
||||
},
|
||||
@@ -38,7 +38,7 @@ pub struct Validity<'a> {
|
||||
impl<'a> Validity<'a> {
|
||||
/// Creates a `Validity` from [`ArrayData`].
|
||||
pub fn from_array_data(data: &'a ArrayData) -> Validity<'a> {
|
||||
match data.null_bitmap() {
|
||||
match data.nulls() {
|
||||
Some(bitmap) => Validity {
|
||||
kind: ValidityKind::Slots {
|
||||
bitmap,
|
||||
@@ -67,7 +67,7 @@ impl<'a> Validity<'a> {
|
||||
/// Returns whether `i-th` bit is set.
|
||||
pub fn is_set(&self, i: usize) -> bool {
|
||||
match self.kind {
|
||||
ValidityKind::Slots { bitmap, .. } => bitmap.is_set(i),
|
||||
ValidityKind::Slots { bitmap, .. } => bitmap.is_valid(i),
|
||||
ValidityKind::AllValid { len } => i < len,
|
||||
ValidityKind::AllNull { .. } => false,
|
||||
}
|
||||
|
||||
@@ -4,6 +4,10 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
python = ["dep:script"]
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
async-stream.workspace = true
|
||||
@@ -14,6 +18,7 @@ client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-function = { path = "../common/function" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-query = { path = "../common/query" }
|
||||
@@ -29,12 +34,14 @@ futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
itertools = "0.10"
|
||||
meta-client = { path = "../meta-client" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
moka = { version = "0.9", features = ["future"] }
|
||||
openmetrics-parser = "0.4"
|
||||
partition = { path = "../partition" }
|
||||
prost.workspace = true
|
||||
query = { path = "../query" }
|
||||
rustls = "0.20"
|
||||
script = { path = "../script", features = ["python"], optional = true }
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
servers = { path = "../servers" }
|
||||
@@ -55,3 +62,4 @@ meta-srv = { path = "../meta-srv", features = ["mock"] }
|
||||
strfmt = "0.2"
|
||||
toml = "0.5"
|
||||
tower = "0.4"
|
||||
uuid.workspace = true
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user