Compare commits

..

35 Commits

Author SHA1 Message Date
Yingwen
eff07d5986 Merge pull request #16 from GreptimeTeam/feat/grpc-unary-insert
feat: GRPC unary insert method
2023-03-17 19:26:48 +08:00
luofucong
40c55e4da7 feat: GRPC unary insert method 2023-03-17 19:11:46 +08:00
Yingwen
8d113550cf Merge pull request #15 from GreptimeTeam/ci/release-yaml
ci: Adjust release yaml
2023-03-15 11:23:31 +08:00
evenyag
15a0ed0853 ci: Adjust release yaml 2023-03-14 19:36:55 +08:00
Ruihang Xia
44493e9d8c feat: impl flush on shutdown (#14)
* feat: impl flush on shutdown

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* powerful if-else!

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-14 18:29:38 +08:00
Lei, HUANG
efd15839d4 Merge pull request #13 from GreptimeTeam/chore/flush-message
chore(servers): Change flush message
2023-03-14 17:34:22 +08:00
evenyag
1f62b36537 chore(servers): Change flush message 2023-03-14 17:16:39 +08:00
Ruihang Xia
7b8e65ce93 chore: merge public repo (#12)
* feat: implement table flush (#1121)

* feat: add flush method for trait

* feat: implement flush via grpc

* chore: move table_dir/region_name/region_id to table crate

* chore: Update src/mito/src/table.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>

* fix: use correct env var (#1166)

* fix: use correct env var

* fix: move COPY up so rustup know it's nightly

* fix: add `pyo3_backend` in GHA yml

* chore: name for `TODO`

* temp: not set `pyo3_backend` before find DSO

* fix: release linux with pyo3_backend

* fix: failed to run subquery wrapped in two parentheses (#1157)

* refactor: add the separate GitHub Action job to push the image to the UCloud registry (#1170)

* refactor: make the cmd hold the application instance (#1159)

* fix: export 'PYO3_CROSS_LIB_DIR' when cargo build for aarch64-linux and refactor matrix opts (#1171)

---------

Co-authored-by: Weny Xu <wenymedia@gmail.com>
Co-authored-by: Yingwen <realevenyag@gmail.com>
Co-authored-by: discord9 <55937128+discord9@users.noreply.github.com>
Co-authored-by: LFC <luofucong@greptime.com>
Co-authored-by: zyy17 <zyylsxm@gmail.com>
2023-03-14 16:42:40 +08:00
Yingwen
6475339ad0 Merge pull request #10 from GreptimeTeam/feat/manual-flush-http
feat: manual flush http API
2023-03-14 16:37:56 +08:00
Lei, HUANG
0bd802c70d Update src/servers/src/error.rs
Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-14 16:26:47 +08:00
Lei, HUANG
28d07c7a2e Merge pull request #9 from GreptimeTeam/docs/edge-example-toml
docs(config): Add edge example
2023-03-14 16:25:29 +08:00
Lei, HUANG
dc33b0c0ce Merge pull request #11 from GreptimeTeam/chore/adjust-log
chore(storage): Adjust log level
2023-03-14 16:25:05 +08:00
evenyag
4b4f8f27e8 chore(storage): Adjust log level 2023-03-14 16:14:25 +08:00
Lei, HUANG
c994e0de88 fix: format 2023-03-14 16:13:57 +08:00
Lei, HUANG
d1ba9ca126 fix: unit tests 2023-03-14 16:11:25 +08:00
evenyag
0877dabce2 docs(config): Add edge example 2023-03-14 16:02:11 +08:00
Lei, HUANG
8b9671f376 feat: manual flush http API 2023-03-14 15:49:37 +08:00
Yingwen
dcf66d9d52 chore(datanode): derive serde default for Wal/CompactionConfig (#8) 2023-03-14 15:22:13 +08:00
Yingwen
65b61e78ad Merge pull request #6 from GreptimeTeam/docs/project-version
docs: Set greptimedb-edge version to 0.1.0
2023-03-14 15:01:29 +08:00
evenyag
3638704f95 docs: Set greptimedb-edge version to 0.1.0 2023-03-14 14:38:00 +08:00
Lei, HUANG
8a2f4256bf Merge pull request #5 from GreptimeTeam/feat/wait-flush-done
feat: Region writer wait flush done
2023-03-14 14:28:39 +08:00
evenyag
83aeadc506 feat: Region writer wait flush done 2023-03-14 14:09:22 +08:00
Lei, HUANG
f556052951 Merge pull request #3 from GreptimeTeam/feat/merge-public
feat: Merge develop branch of public repo
2023-03-14 14:05:38 +08:00
LFC
8658d428e0 fix: failed to run subquery wrapped in two parentheses (#1157) 2023-03-14 11:52:25 +08:00
discord9
e8e11072f8 fix: use correct env var (#1166)
* fix: use correct env var

* fix: move COPY up so rustup know it's nightly

* fix: add `pyo3_backend` in GHA yml

* chore: name for `TODO`

* temp: not set `pyo3_backend` before find DSO

* fix: release linux with pyo3_backend
2023-03-14 11:52:25 +08:00
Weny Xu
6f0f72c377 feat: implement table flush (#1121)
* feat: add flush method for trait

* feat: implement flush via grpc

* chore: move table_dir/region_name/region_id to table crate

* chore: Update src/mito/src/table.rs

---------

Co-authored-by: Yingwen <realevenyag@gmail.com>
2023-03-14 11:52:25 +08:00
Yingwen
32030a8194 Merge pull request #4 from GreptimeTeam/ci/remove-unnecessary
ci: Remove api doc ci and coverage statistics
2023-03-14 11:51:21 +08:00
evenyag
0f7cde2411 ci: Remove api doc ci and coverage statistics 2023-03-14 11:46:48 +08:00
Lei, HUANG
1ece402ec8 Merge pull request #2 from GreptimeTeam/feat/skip-wal
feat: skip wal for user table
2023-03-14 11:39:20 +08:00
Lei, HUANG
7ee54b3e69 fix: don't skip wal in test 2023-03-14 11:19:17 +08:00
Lei, HUANG
9b4dcba8cf fix: check errors 2023-03-14 10:51:16 +08:00
Lei, HUANG
c3bcb1111f fix: fmt 2023-03-13 20:24:25 +08:00
Lei, HUANG
a4ebd03a61 feat: skip wal for user table 2023-03-13 20:18:41 +08:00
Lei, HUANG
e7daf1226f feat: tune parquet parameters 2023-03-13 20:17:23 +08:00
Lei, HUANG
05c0ea9a59 feat: tune parquet parameters (#1)
* feat: tune parquet parameters

* Update src/storage/src/sst/parquet.rs

---------

Co-authored-by: Ruihang Xia <waynestxia@gmail.com>
2023-03-13 20:11:06 +08:00
484 changed files with 8614 additions and 21789 deletions

View File

@@ -3,13 +3,3 @@ linker = "aarch64-linux-gnu-gcc"
[alias]
sqlness = "run --bin sqlness-runner --"
[build]
rustflags = [
# lints
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
]

View File

@@ -2,7 +2,7 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id

View File

@@ -1,42 +1,42 @@
on:
push:
branches:
- develop
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
# on:
# push:
# branches:
# - develop
# paths-ignore:
# - 'docs/**'
# - 'config/**'
# - '**.md'
# - '.dockerignore'
# - 'docker/**'
# - '.gitignore'
name: Build API docs
# name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-02-26
# env:
# RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
apidoc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url='greptime/'" />
</head>
<body></body></html>
EOF
- name: Publish dist directory
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc
# jobs:
# apidoc:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v3
# - uses: arduino/setup-protoc@v1
# with:
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# - uses: dtolnay/rust-toolchain@master
# with:
# toolchain: ${{ env.RUST_TOOLCHAIN }}
# - run: cargo doc --workspace --no-deps --document-private-items
# - run: |
# cat <<EOF > target/doc/index.html
# <!DOCTYPE html>
# <html>
# <head>
# <meta http-equiv="refresh" content="0; url='greptime/'" />
# </head>
# <body></body></html>
# EOF
# - name: Publish dist directory
# uses: JamesIves/github-pages-deploy-action@v4
# with:
# folder: target/doc

View File

@@ -183,7 +183,7 @@ jobs:
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run cargo clippy
run: cargo clippy --workspace --all-targets -- -D warnings
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
coverage:
if: github.event.pull_request.draft == false
@@ -213,10 +213,11 @@ jobs:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
# - name: Install cargo-llvm-cov
# uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
run: cargo nextest run -F pyo3_backend
# run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1

View File

@@ -2,10 +2,9 @@ on:
push:
tags:
- "v*.*.*"
schedule:
# At 00:00 on Monday.
- cron: '0 0 * * 1'
# Mannually trigger only builds binaries.
# schedule:
# # At 00:00 on Monday.
# - cron: '0 0 * * 1'
workflow_dispatch:
name: Release
@@ -19,9 +18,6 @@ env:
CARGO_PROFILE: nightly
## FIXME(zyy17): Enable it after the tests are stabled.
DISABLE_RUN_TESTS: true
jobs:
build:
name: Build binary
@@ -33,45 +29,23 @@ jobs:
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
opts: "-F servers/dashboard"
# opts: "-F pyo3_backend"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
continue-on-error: true
# opts: "-F pyo3_backend"
# - arch: aarch64-apple-darwin
# os: macos-latest
# file: greptime-darwin-arm64
# continue-on-error: true
# - arch: x86_64-apple-darwin
# os: macos-latest
# file: greptime-darwin-amd64
# continue-on-error: true
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
if: github.repository == 'GreptimeTeam/greptimedb-edge'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -126,12 +100,11 @@ jobs:
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
- name: Compile Python 3.10.10 from source for Aarch64
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
sudo ./docker/aarch64/compile-python.sh
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
@@ -143,54 +116,19 @@ jobs:
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
- name: Run cargo build for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
alias python=python3
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
@@ -212,98 +150,11 @@ jobs:
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
docker:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64-pyo3
path: amd64
- name: Unzip the amd64 artifacts
run: |
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
cp -r amd64 docker/ci
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64-pyo3
path: arm64
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
cp -r arm64 docker/ci
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: ./docker/ci/
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: ./docker/ci/
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
# Release artifacts only when all the artifacts are built successfully.
needs: [build,docker]
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
if: github.repository == 'GreptimeTeam/greptimedb-edge'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -342,48 +193,142 @@ jobs:
files: |
**/greptime-*
docker-push-uhub:
name: Push docker image to UCloud Container Registry
needs: [docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
continue-on-error: true
steps:
- name: Checkout sources
uses: actions/checkout@v3
# docker:
# name: Build docker image
# needs: [build]
# runs-on: ubuntu-latest
# if: github.repository == 'GreptimeTeam/greptimedb'
# steps:
# - name: Checkout sources
# uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
# - name: Login to UCloud Container Registry
# uses: docker/login-action@v2
# with:
# registry: uhub.service.ucloud.cn
# username: ${{ secrets.UCLOUD_USERNAME }}
# password: ${{ secrets.UCLOUD_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# - name: Login to Dockerhub
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to UCloud Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
# shell: bash
# if: github.event_name == 'schedule'
# run: |
# buildTime=`date "+%Y%m%d"`
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
# shell: bash
# if: github.event_name != 'schedule'
# run: |
# VERSION=${{ github.ref_name }}
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
# - name: Set up QEMU
# uses: docker/setup-qemu-action@v2
- name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
run: |
docker buildx imagetools create \
--tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
--tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
greptime/greptimedb:${{ env.IMAGE_TAG }}
# - name: Set up buildx
# uses: docker/setup-buildx-action@v2
# - name: Download amd64 binary
# uses: actions/download-artifact@v3
# with:
# name: greptime-linux-amd64
# path: amd64
# - name: Unzip the amd64 artifacts
# run: |
# cd amd64
# tar xvf greptime-linux-amd64.tgz
# rm greptime-linux-amd64.tgz
# - name: Download arm64 binary
# id: download-arm64
# uses: actions/download-artifact@v3
# with:
# name: greptime-linux-arm64
# path: arm64
# - name: Unzip the arm64 artifacts
# id: unzip-arm64
# if: success() || steps.download-arm64.conclusion == 'success'
# run: |
# cd arm64
# tar xvf greptime-linux-arm64.tgz
# rm greptime-linux-arm64.tgz
# - name: Build and push all
# uses: docker/build-push-action@v3
# if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
# with:
# context: .
# file: ./docker/ci/Dockerfile
# push: true
# platforms: linux/amd64,linux/arm64
# tags: |
# greptime/greptimedb:latest
# greptime/greptimedb:${{ env.IMAGE_TAG }}
# - name: Build and push amd64 only
# uses: docker/build-push-action@v3
# if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
# with:
# context: .
# file: ./docker/ci/Dockerfile
# push: true
# platforms: linux/amd64
# tags: |
# greptime/greptimedb:latest
# greptime/greptimedb:${{ env.IMAGE_TAG }}
# docker-push-uhub:
# name: Push docker image to UCloud Container Registry
# needs: [docker]
# runs-on: ubuntu-latest
# if: github.repository == 'GreptimeTeam/greptimedb'
# # Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
# continue-on-error: true
# steps:
# - name: Checkout sources
# uses: actions/checkout@v3
# - name: Set up QEMU
# uses: docker/setup-qemu-action@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Login to UCloud Container Registry
# uses: docker/login-action@v2
# with:
# registry: uhub.service.ucloud.cn
# username: ${{ secrets.UCLOUD_USERNAME }}
# password: ${{ secrets.UCLOUD_PASSWORD }}
# - name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
# shell: bash
# if: github.event_name == 'schedule'
# run: |
# buildTime=`date "+%Y%m%d"`
# SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
# echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
# - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
# shell: bash
# if: github.event_name != 'schedule'
# run: |
# VERSION=${{ github.ref_name }}
# echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
# - name: Push image to uhub # Use 'docker buildx imagetools create' to create a new image base on source image.
# run: |
# docker buildx imagetools create \
# --tag uhub.service.ucloud.cn/greptime/greptimedb:latest \
# --tag uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }} \
# greptime/greptimedb:${{ env.IMAGE_TAG }}

4
.gitignore vendored
View File

@@ -35,7 +35,3 @@ benchmarks/data
# dotenv
.env
# dashboard files
!/src/servers/dashboard/VERSION
/src/servers/dashboard/*

View File

@@ -51,7 +51,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
#### `pre-commit` Hooks

1564
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,6 @@ members = [
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/datasource",
"src/common/error",
"src/common/function",
"src/common/function-macro",
@@ -24,7 +23,6 @@ members = [
"src/common/time",
"src/datanode",
"src/datatypes",
"src/file-table-engine",
"src/frontend",
"src/log-store",
"src/meta-client",
@@ -47,38 +45,38 @@ members = [
]
[workspace.package]
version = "0.2.0"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
arrow = { version = "37.0" }
arrow-array = "37.0"
arrow-flight = "37.0"
arrow-schema = { version = "37.0", features = ["serde"] }
arrow = { version = "34.0" }
arrow-array = "34.0"
arrow-flight = "34.0"
arrow-schema = { version = "34.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
# TODO(LFC): Use official DataFusion, when https://github.com/apache/arrow-datafusion/pull/5542 got merged
datafusion = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-common = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-optimizer = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-physical-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
datafusion-sql = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" }
futures = "0.3"
futures-util = "0.3"
parquet = "37.0"
parquet = "34.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.33"
sqlparser = "0.32"
tempfile = "3"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = { version = "0.7", features = ["io-util"] }
tonic = { version = "0.9", features = ["tls"] }
tokio-util = "0.7"
tonic = { version = "0.8", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
[profile.release]

View File

@@ -21,10 +21,6 @@ fmt: ## Format all the Rust code.
.PHONY: fmt-toml
fmt-toml: ## Format all TOML files.
taplo format --option "indent_string= "
.PHONY: check-toml
check-toml: ## Check all TOML files.
taplo format --check --option "indent_string= "
.PHONY: docker-image
@@ -51,7 +47,7 @@ check: ## Cargo check all the targets.
.PHONY: clippy
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets -- -D warnings
cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
.PHONY: fmt-check
fmt-check: ## Check code format.

View File

@@ -1,14 +1,14 @@
<p align="center">
<picture>
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
</picture>
</p>
<h3 align="center">
The next-generation hybrid time-series/analytics processing database in the cloud
The next-generation hybrid timeseries/analytics processing database in the cloud
</h3>
<p align="center">
@@ -23,8 +23,6 @@
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
## What is GreptimeDB
@@ -38,11 +36,11 @@ Our core developers have been building time-series data platform
for years. Based on their best-practices, GreptimeDB is born to give you:
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
- Flexible indexes, tackling high cardinality issues down
- Optimized columnar layout for handling time-series data; compacted, compressed, stored on various storage backends
- Flexible index options, tackling high cardinality issues down
- Distributed, parallel query execution, leveraging elastic computing resource
- Native SQL, and Python scripting for advanced analytical scenarios
- Widely adopted database protocols and APIs, native PromQL supports
- Widely adopted database protocols and APIs
- Extensible table engine architecture for extensive workloads
## Quick Start
@@ -63,12 +61,12 @@ To compile GreptimeDB from source, you'll need:
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
in cpython): this install a Python shared library required for running python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
`Python3` package should have this shared library by default.
#### Build with Docker
@@ -149,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
downloadable pre-built binaries for Linux and MacOS
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
Docker images
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
@@ -160,7 +158,6 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)

View File

@@ -1,19 +0,0 @@
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| >= v0.1.0 | :white_check_mark: |
| < v0.1.0 | :x: |
## Reporting a Vulnerability
We place great importance on the security of GreptimeDB code, software,
and cloud platform. If you come across a security vulnerability in GreptimeDB,
we kindly request that you inform us immediately. We will thoroughly investigate
all valid reports and make every effort to resolve the issue promptly.
To report any issues or vulnerabilities, please email us at info@greptime.com, rather than
posting publicly on GitHub. Be sure to provide us with the version identifier as well as details
on how the vulnerability can be exploited.

View File

@@ -21,12 +21,12 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -61,7 +61,7 @@ struct Args {
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
endpoint: String,
}
@@ -97,9 +97,6 @@ async fn write_data(
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
@@ -125,17 +122,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let (values, datatype) = build_values(array);
let values = build_values(array);
let column = Column {
column_name: field.name().clone(),
column_name: field.name().to_owned(),
values: Some(values),
null_mask: array
.to_data()
.nulls()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
null_mask: vec![],
// datatype and semantic_type are set to default
..Default::default()
};
@@ -145,7 +136,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
fn build_values(column: &ArrayRef) -> Values {
match column.data_type() {
DataType::Int64 => {
let array = column
@@ -153,13 +144,10 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
Values {
i64_values: values.to_vec(),
..Default::default()
}
}
DataType::Float64 => {
let array = column
@@ -167,38 +155,29 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
Values {
f64_values: values.to_vec(),
..Default::default()
}
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.downcast_ref::<TimestampNanosecondArray>()
.unwrap();
let values = array.values();
(
Values {
ts_microsecond_values: values.to_vec(),
..Default::default()
},
ColumnDataType::TimestampMicrosecond,
)
Values {
i64_values: values.to_vec(),
..Default::default()
}
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
Values {
string_values: values,
..Default::default()
}
}
DataType::Null
| DataType::Boolean
@@ -225,7 +204,7 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _)
| DataType::Union(_, _, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
@@ -234,10 +213,6 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr() -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
@@ -253,13 +228,13 @@ fn create_table_expr() -> CreateTableExpr {
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
datatype: ColumnDataType::TimestampMicrosecond as i32,
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
datatype: ColumnDataType::TimestampMicrosecond as i32,
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
},
@@ -365,8 +340,7 @@ fn create_table_expr() -> CreateTableExpr {
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: None,
engine: "mito".to_string(),
table_id: Some(TableId { id: 0 }),
}
}

View File

@@ -37,24 +37,12 @@ type = "File"
data_dir = "/tmp/greptimedb/data/"
# Compaction options, see `standalone.example.toml`.
[storage.compaction]
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
# Storage manifest options
[storage.manifest]
# Region checkpoint actions margin.
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = "File"
# data_dir = "/tmp/greptimedb/procedure/"
# max_retry_times = 3
# retry_delay = "500ms"
# type = 'File'
# data_dir = '/tmp/greptimedb/procedure/'

11
config/edge.example.toml Normal file
View File

@@ -0,0 +1,11 @@
# WAL options.
[wal]
# WAL data directory.
dir = "/tmp/greptimedb/wal"
# Storage options.
[storage]
# Storage type.
type = "File"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data"

View File

@@ -99,7 +99,7 @@ type = "File"
data_dir = "/tmp/greptimedb/data/"
# Compaction options.
[storage.compaction]
[compaction]
# Max task number that can concurrently run.
max_inflight_tasks = 4
# Max files in level 0 to trigger compaction.
@@ -107,16 +107,6 @@ max_files_in_level0 = 8
# Max task number for SST purge task after compaction.
max_purge_tasks = 32
# Storage manifest options
[storage.manifest]
# Region checkpoint actions margin.
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
# Procedure storage options.
# Uncomment to enable.
# [procedure.store]
@@ -124,7 +114,3 @@ checkpoint_on_startup = false
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"
# # Procedure max retry time.
# max_retry_times = 3
# # Initial retry delay of procedures, increases exponentially
# retry_delay = "500ms"

View File

@@ -1,36 +1,9 @@
#!/usr/bin/env bash
set -e
# this script will download Python source code, compile it, and install it to /usr/local/lib
# then use this python to compile cross-compiled python for aarch64
ARCH=$1
PYTHON_VERSION=3.10.10
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
function download_python_source_code() {
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
tar -xvf Python-$PYTHON_VERSION.tgz
}
function compile_for_amd64_platform() {
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
echo "Compiling for amd64 platform..."
./configure \
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make install
}
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
tar -xvf Python-3.10.10.tgz
cd Python-3.10.10
# explain Python compile options here a bit:s
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
@@ -41,47 +14,33 @@ function compile_for_amd64_platform() {
# ac_cv_have_long_long_format=yes: target platform supports long long type
# disable-ipv6: disable ipv6 support, we don't need it in here
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
function compile_for_aarch64_platform() {
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
echo "Compiling for aarch64 platform..."
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo "LIBRARY_PATH: $LIBRARY_PATH"
echo "PATH: $PATH"
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make altinstall
}
# Main script starts here.
download_python_source_code
# Enter the python source code directory.
cd $PYTHON_SOURCE_DIR || exit 1
# Build local python first, then build cross-compiled python.
compile_for_amd64_platform
# Clean the build directory.
make clean && make distclean
# Cross compile python for aarch64.
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
compile_for_aarch64_platform
fi
./configure \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
make
make install
cd ..
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
cd Python-3.10.10 && \
make clean && \
make distclean && \
alias python=python3 && \
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix=$PY_INSTALL_PATH --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
make && make altinstall && \
cd ..

View File

@@ -1,14 +1,6 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip
COPY requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
ARG TARGETARCH

View File

@@ -1,5 +0,0 @@
numpy>=1.24.2
pandas>=1.5.3
pyarrow>=11.0.0
requests>=2.28.2
scipy>=1.10.1

View File

@@ -1,74 +0,0 @@
This document introduces how to implement SQL statements in GreptimeDB.
The execution entry point for SQL statements locates at Frontend Instance. You can see it has
implemented `SqlQueryHandler`:
```rust
impl SqlQueryHandler for Instance {
type Error = Error;
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
// ...
}
}
```
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
will be feed into `StatementExecutor`:
```rust
// in Frontend Instance:
self.statement_executor.execute_sql(stmt, query_ctx).await
```
That's where we handle our SQL statements. You can just create a new match arm for your statement there, then the
statement is implemented for both GreptimeDB Standalone and Cluster. You can see how `DESCRIBE TABLE` is implemented as
an example.
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
Summarize as the diagram below:
```text
SQL query
|
v
+---------------------------+
| SqlQueryHandler::do_query |
+---------------------------+
|
| SQL parsing
v
+--------------------------------+
| StatementExecutor::execute_sql |
+--------------------------------+
|
| SQL execution
v
+----------------------------------+
| commonly handled statements like |
| "plan_exec" for selection or |
+----------------------------------+
| |
For Standalone | | For Cluster
v v
+---------------------------+ +---------------------------+
| SqlStatementExecutor impl | | SqlStatementExecutor impl |
| in Datanode Instance | | in Frontend DistInstance |
+---------------------------+ +---------------------------+
```
Note that some SQL statements can be executed in our QueryEngine, in the form of `LogicalPlan`. You can follow the
invocation path down to the `QueryEngine` implementation from `StatementExecutor::plan_exec`. For now, there's only
one `DatafusionQueryEngine` for both GreptimeDB Standalone and Cluster. That lone query engine works for both modes is
because GreptimeDB read/write data through `Table` trait, and each mode has its own `Table` implementation.
We don't have any bias towards whether statements should be handled in query engine or `StatementExecutor`. You can
implement one kind of statement in both places. For example, `Insert` with selection is handled in query engine, because
we can easily do the query part there. However, `Insert` without selection is not, for the cost of parsing statement
to `LogicalPlan` is not neglectable. So generally if the SQL query is simple enough, you can handle it
in `StatementExecutor`; otherwise if it is complex or has some part of selection, it should be parsed to `LogicalPlan`
and handled in query engine.

View File

@@ -1,196 +0,0 @@
---
Feature Name: "Fault Tolerance for Region"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
Date: 2023-03-08
Author: "Luo Fucong <luofucong@greptime.com>"
---
Fault Tolerance for Region
----------------------
# Summary
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
# Motivation
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
# Details
## Background
Some backgrounds about region in distributed mode:
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
## Failure Detection
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
```protobuf
message HeartbeatRequest {
...
// Region stats on this node
repeated RegionStat region_stats = 6;
...
}
message RegionStat {
uint64 region_id = 1;
TableName table_name = 2;
...
}
```
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
> The heartbeat interval is now hardcoded to five seconds.
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
To conclude, the failure detection pseudo-codes are like this:
```rust
// in Metasrv:
fn failure_detection() {
loop {
// passive detection
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
// find the datanodes that contains the failed regions
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
// active detection
for (datanode, regions) in datanodes_and_regions {
if !datanode.is_healthy(regions) {
do_failover(datanode, regions);
}
}
sleep(config.detect_interval);
}
}
```
Some design considerations:
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
So we detect some regions are not available. How to regain the availability back?
## Region Failover
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
```protobuf
message Region {
uint64 id = 1;
string name = 2;
Partition partition = 3;
message State {
Active,
Passive,
}
State state = 4;
map<string, string> attrs = 100;
}
```
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
A picture is worth a 1000 words:
```text
+-------------------------+
| Metasrv detects region |
| failure |
+-------------------------+
|
v
+----------------------------+
| Metasrv chooses candidates |
| to hold failed regions |
+----------------------------+
|
v
+-------------------------+ +-------------------------+
| Metasrv "passive" the |------>| Frontend rejects writes |
| failed regions | | to "passive" regions |
+-------------------------+ +-------------------------+
|
v
+--------------------------+ +---------------------------+
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
| regions from remote WAL | | "open" region requests |
+--------------------------+ +---------------------------+
|
|
| +-------------------------+ +-------------------------+
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
| failed regions | | restriction to regions |
+-------------------------+ +-------------------------+
|
v
+-------------------------+
| Region failover done, |
| HA regain |
+-------------------------+
```
# Alternatives
## The "Neon" Way
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
However, the main concerns we don't adopt this method are two-fold:
1. Remote WAL is planned to be quorum based, it can be efficiently written;
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
## No WAL, Replication instead
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
# Future Work
Some optimizations we could take:
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

View File

@@ -1,174 +0,0 @@
---
Feature Name: "File external table"
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1041
Date: 2023-03-08
Author: "Xu Wenkang <wenymedia@gmail.com>"
---
File external table
---
# Summary
Allows users to perform SQL queries on files
# Motivation
User data may already exist in other storages, i.g., file systems/s3, etc. in CSV, parquet, JSON format, etc. We can provide users the ability to perform SQL queries on these files.
# Details
## Overview
The file external table providers users ability to perform SQL queries on these files.
For example, a user has a CSV file on the local file system `/var/data/city.csv`:
```
Rank , Name , State , 2023 Population , 2020 Census , Annual Change , Density (mi²)
1 , New York City , New York , 8,992,908 , 8,804,190 , 0.7% , 29,938
2 , Los Angeles , California , 3,930,586 , 3,898,747 , 0.27% , 8,382
3 , Chicago , Illinois , 2,761,625 , 2,746,388 , 0.18% , 12,146
.....
```
Then user can create a file external table with:
```sql
CREATE EXTERNAL TABLE city with(location='/var/data/city.csv', format="CSV", field_delimiter = ',', record_delimiter = '\n', skip_header = 1);
```
Then query the external table with:
```bash
MySQL> select * from city;
```
| Rank | Name | State | 2023 Population | 2020 Census | Annual Change | Density (mi²) |
| :--- | :------------ | :--------- | :-------------- | :---------- | :------------ | :------------ |
| 1 | New York City | New York | 8,992,908 | 8,804,190 | 0.7% | 29,938 |
| 2 | Los Angeles | California | 3,930,586 | 3,898,747 | 0.27% | 8,382 |
| 3 | Chicago | Illinois | 2,761,625 | 2,746,388 | 0.18% | 12,146 |
Drop the external table, if needs with:
```sql
DROP EXTERNAL TABLE city
```
### Syntax
```
CREATE EXTERNAL [<database>.]<table_name>
[
(
<col_name> <col_type> [NULL | NOT NULL] [COMMENT "<comment>"]
)
]
[ WITH
(
LOCATION = 'url'
[,FIELD_DELIMITER = 'delimiter' ]
[,RECORD_DELIMITER = 'delimiter' ]
[,SKIP_HEADER = '<number>' ]
[,FORMAT = { csv | json | parquet } ]
[,PATTERN = '<regex_pattern>' ]
[,ENDPOINT = '<uri>' ]
[,ACCESS_KEY_ID = '<key_id>' ]
[,SECRET_ACCESS_KEY = '<access_key>' ]
[,SESSION_TOKEN = '<token>' ]
[,REGION = '<region>' ]
[,ENABLE_VIRTUAL_HOST_STYLE = '<boolean>']
..
)
]
```
### Supported File Format
The external file table supports multiple formats; We divide formats into row format and columnar format.
Row formats:
- CSV, JSON
Columnar formats:
- Parquet
Some of these formats support filter pushdown, and others don't. If users use very large files, that format doesn't support pushdown, which might consume a lot of IO for scanning full files and cause a long running query.
### File Table Engine
![overview](external-table-engine-overview.png)
We implement a file table engine that creates an external table by accepting user-specified file paths and treating all records as immutable.
1. File Format Decoder: decode files to the `RecordBatch` stream.
2. File Table Engine: implement the `TableProvider` trait, store necessary metadata in memory, and provide scan ability.
Our implementation is better for small files. For large files(i.g., a GB-level CSV file), suggests our users import data to the database.
## Drawbacks
- Some formats don't support filter pushdown
- Hard to support indexing
## Life cycle
### Register a table
1. Write metadata to manifest.
2. Create the table via file table engine.
3. Register table to `CatalogProvider` and register table to `SystemCatalog`(persist tables to disk).
### Deregister a table (Drop a table)
1. Fetch the target table info (figure out table engine type).
2. Deregister the target table in `CatalogProvider` and `SystemCatalog`.
3. Find the target table engine.
4. Drop the target table.
### Recover a table when restarting
1. Collect tables name and engine type info.
2. Find the target tables in different engines.
3. Open and register tables.
# Alternatives
## Using DataFusion API
We can use datafusion API to register a file table:
```rust
let ctx = SessionContext::new();
ctx.register_csv("example", "tests/data/example.csv", CsvReadOptions::new()).await?;
// create a plan
let df = ctx.sql("SELECT a, MIN(b) FROM example WHERE a <= b GROUP BY a LIMIT 100").await?;
```
### Drawbacks
The DataFusion implements its own `Object Store` abstraction and supports parsing the partitioned directories, which can push down the filter and skips some directories. However, this makes it impossible to use our's `LruCacheLayer`(The parsing of the partitioned directories required paths as input). If we want to manage memory entirely, we should implement our own `TableProvider` or `Table`.
- Impossible to use `CacheLayer`
## Introduce an intermediate representation layer
![overview](external-table-engine-way-2.png)
We convert all files into `parquet` as an intermediate representation. Then we only need to implement a `parquet` file table engine, and we already have a similar one. Also, it supports limited filter pushdown via the `parquet` row group stats.
### Drawbacks
- Computing overhead
- Storage overhead

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bash
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
set -e
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
# Download the SHA256 checksum attached to the release. To verify the integrity
# of the download, this checksum will be used to check the download tar file
# containing the built dashboard assets.
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
# Download the tar file containing the built dashboard assets.
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
# Verify the checksums match; exit if they don't.
case "$(uname -s)" in
FreeBSD | Darwin)
echo "$(cat sha256.txt)" | shasum --algorithm 256 --check \
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
Linux)
echo "$(cat sha256.txt)" | sha256sum --check -- \
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
*)
echo "The '$(uname -s)' operating system is not supported as a build host for the dashboard" >&2
exit 1
esac
# Extract the assets and clean up.
tar -xzf build.tar.gz -C "$STATIC_DIR"
rm sha256.txt
rm build.tar.gz
echo "Successfully download dashboard assets to $STATIC_DIR"

View File

@@ -10,10 +10,10 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0bebe5f69c91cdfbce85cb8f45f9fcd28185261c" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0a7b790ed41364b5599dff806d1080bd59c5c9f6" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true
[build-dependencies]
tonic-build = "0.9"
tonic-build = "0.8"

View File

@@ -18,7 +18,7 @@ use common_error::ext::ErrorExt;
use common_error::prelude::StatusCode;
use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use snafu::Location;
use snafu::{Backtrace, ErrorCompat};
pub type Result<T> = std::result::Result<T, Error>;
@@ -26,12 +26,12 @@ pub type Result<T> = std::result::Result<T, Error>;
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unknown proto column datatype: {}", datatype))]
UnknownColumnDataType { datatype: i32, location: Location },
UnknownColumnDataType { datatype: i32, backtrace: Backtrace },
#[snafu(display("Failed to create column datatype from {:?}", from))]
IntoColumnDataType {
from: ConcreteDataType,
location: Location,
backtrace: Backtrace,
},
#[snafu(display(
@@ -66,6 +66,9 @@ impl ErrorExt for Error {
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self

View File

@@ -7,7 +7,6 @@ license.workspace = true
[dependencies]
api = { path = "../api" }
arc-swap = "1.0"
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
backoff = { version = "0.4", features = ["tokio"] }

View File

@@ -1,15 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod catalog_adapter;

View File

@@ -19,7 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::Location;
use snafu::{Backtrace, ErrorCompat};
use crate::DeregisterTableRequest;
@@ -50,7 +50,7 @@ pub enum Error {
},
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog { msg: String, location: Location },
SystemCatalog { msg: String, backtrace: Backtrace },
#[snafu(display(
"System catalog table type mismatch, expected: binary, found: {:?}",
@@ -58,68 +58,61 @@ pub enum Error {
))]
SystemCatalogTypeMismatch {
data_type: ConcreteDataType,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
InvalidEntryType {
entry_type: Option<u8>,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Invalid system catalog key: {:?}", key))]
InvalidKey {
key: Option<String>,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Catalog value is not present"))]
EmptyValue { location: Location },
EmptyValue { backtrace: Backtrace },
#[snafu(display("Failed to deserialize value, source: {}", source))]
ValueDeserialize {
source: serde_json::error::Error,
location: Location,
},
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
TableEngineNotFound {
engine_name: String,
#[snafu(backtrace)]
source: table::error::Error,
backtrace: Backtrace,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Cannot find schema {} in catalog {}", schema, catalog))]
SchemaNotFound {
catalog: String,
schema: String,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Table `{}` already exists", table))]
TableExists { table: String, location: Location },
TableExists { table: String, backtrace: Backtrace },
#[snafu(display("Table `{}` not exist", table))]
TableNotExist { table: String, location: Location },
TableNotExist { table: String, backtrace: Backtrace },
#[snafu(display("Schema {} already exists", schema))]
SchemaExists { schema: String, location: Location },
SchemaExists {
schema: String,
backtrace: Backtrace,
},
#[snafu(display("Operation {} not implemented yet", operation))]
Unimplemented {
operation: String,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Operation {} not supported", op))]
NotSupported { op: String, location: Location },
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
OpenTable {
table_info: String,
@@ -130,7 +123,7 @@ pub enum Error {
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
table_info: String,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to read system catalog table records"))]
@@ -139,12 +132,6 @@ pub enum Error {
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch, source: {}", source))]
CreateRecordBatch {
#[snafu(backtrace)]
source: common_recordbatch::error::Error,
},
#[snafu(display(
"Failed to insert table creation record to system catalog, source: {}",
source
@@ -166,7 +153,7 @@ pub enum Error {
},
#[snafu(display("Illegal catalog manager state: {}", msg))]
IllegalManagerState { location: Location, msg: String },
IllegalManagerState { backtrace: Backtrace, msg: String },
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
SystemCatalogTableScan {
@@ -217,37 +204,6 @@ pub enum Error {
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
#[snafu(display(
"Failed to get region stats, catalog: {}, schema: {}, table: {}, source: {}",
catalog,
schema,
table,
source
))]
RegionStats {
catalog: String,
schema: String,
table: String,
#[snafu(backtrace)]
source: table::error::Error,
},
#[snafu(display("Invalid system table definition: {err_msg}"))]
InvalidSystemTableDef { err_msg: String, location: Location },
#[snafu(display("{}: {}", msg, source))]
Datafusion {
msg: String,
source: DataFusionError,
location: Location,
},
#[snafu(display("Table schema mismatch, source: {}", source))]
TableSchemaMismatch {
#[snafu(backtrace)]
source: table::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -260,8 +216,7 @@ impl ErrorExt for Error {
| Error::TableNotFound { .. }
| Error::IllegalManagerState { .. }
| Error::CatalogNotFound { .. }
| Error::InvalidEntryType { .. }
| Error::InvalidSystemTableDef { .. } => StatusCode::Unexpected,
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
@@ -269,27 +224,21 @@ impl ErrorExt for Error {
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
source.status_code()
}
Error::ReadSystemCatalog { source, .. } => source.status_code(),
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
source.status_code()
}
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::TableNotExist { .. } => StatusCode::TableNotFound,
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
StatusCode::InvalidArguments
}
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. }
| Error::RegionStats { source, .. }
| Error::TableSchemaMismatch { source } => source.status_code(),
| Error::DeregisterTable { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
@@ -299,12 +248,15 @@ impl ErrorExt for Error {
source.status_code()
}
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
Error::Unimplemented { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
@@ -328,7 +280,7 @@ mod tests {
StatusCode::TableAlreadyExists,
Error::TableExists {
table: "some_table".to_string(),
location: Location::generate(),
backtrace: Backtrace::generate(),
}
.status_code()
);
@@ -342,7 +294,7 @@ mod tests {
StatusCode::StorageUnavailable,
Error::SystemCatalog {
msg: "".to_string(),
location: Location::generate(),
backtrace: Backtrace::generate(),
}
.status_code()
);
@@ -351,7 +303,7 @@ mod tests {
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: ConcreteDataType::binary_datatype(),
location: Location::generate(),
backtrace: Backtrace::generate(),
}
.status_code()
);
@@ -365,7 +317,7 @@ mod tests {
pub fn test_errors_to_datafusion_error() {
let e: DataFusionError = Error::TableExists {
table: "test_table".to_string(),
location: Location::generate(),
backtrace: Backtrace::generate(),
}
.into();
match e {

View File

@@ -1,80 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod tables;
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use datafusion::datasource::streaming::{PartitionStream, StreamingTable};
use snafu::ResultExt;
use table::table::adapter::TableAdapter;
use table::TableRef;
use crate::error::{DatafusionSnafu, Result, TableSchemaMismatchSnafu};
use crate::information_schema::tables::InformationSchemaTables;
use crate::{CatalogProviderRef, SchemaProvider};
const TABLES: &str = "tables";
pub(crate) struct InformationSchemaProvider {
catalog_name: String,
catalog_provider: CatalogProviderRef,
}
impl InformationSchemaProvider {
pub(crate) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
Self {
catalog_name,
catalog_provider,
}
}
}
#[async_trait]
impl SchemaProvider for InformationSchemaProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn table_names(&self) -> Result<Vec<String>> {
Ok(vec![TABLES.to_string()])
}
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let table = if name.eq_ignore_ascii_case(TABLES) {
Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(),
self.catalog_provider.clone(),
))
} else {
return Ok(None);
};
let table = Arc::new(
StreamingTable::try_new(table.schema().clone(), vec![table]).with_context(|_| {
DatafusionSnafu {
msg: format!("Failed to get InformationSchema table '{name}'"),
}
})?,
);
let table = TableAdapter::new(table).context(TableSchemaMismatchSnafu)?;
Ok(Some(Arc::new(table)))
}
fn table_exist(&self, name: &str) -> Result<bool> {
Ok(matches!(name.to_ascii_lowercase().as_str(), TABLES))
}
}

View File

@@ -1,165 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_query::physical_plan::TaskContext;
use common_recordbatch::RecordBatch;
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::StringVectorBuilder;
use snafu::ResultExt;
use table::metadata::TableType;
use crate::error::{CreateRecordBatchSnafu, Result};
use crate::information_schema::TABLES;
use crate::CatalogProviderRef;
pub(super) struct InformationSchemaTables {
schema: SchemaRef,
catalog_name: String,
catalog_provider: CatalogProviderRef,
}
impl InformationSchemaTables {
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
let schema = Arc::new(Schema::new(vec![
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
]));
Self {
schema,
catalog_name,
catalog_provider,
}
}
fn builder(&self) -> InformationSchemaTablesBuilder {
InformationSchemaTablesBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_provider.clone(),
)
}
}
/// Builds the `information_schema.TABLE` table row by row
///
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
struct InformationSchemaTablesBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_provider: CatalogProviderRef,
catalog_names: StringVectorBuilder,
schema_names: StringVectorBuilder,
table_names: StringVectorBuilder,
table_types: StringVectorBuilder,
}
impl InformationSchemaTablesBuilder {
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
Self {
schema,
catalog_name,
catalog_provider,
catalog_names: StringVectorBuilder::with_capacity(42),
schema_names: StringVectorBuilder::with_capacity(42),
table_names: StringVectorBuilder::with_capacity(42),
table_types: StringVectorBuilder::with_capacity(42),
}
}
/// Construct the `information_schema.tables` virtual table
async fn make_tables(&mut self) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
for schema_name in self.catalog_provider.schema_names()? {
if schema_name == INFORMATION_SCHEMA_NAME {
continue;
}
let Some(schema) = self.catalog_provider.schema(&schema_name)? else { continue };
for table_name in schema.table_names()? {
let Some(table) = schema.table(&table_name).await? else { continue };
self.add_table(&catalog_name, &schema_name, &table_name, table.table_type());
}
}
// Add a final list for the information schema tables themselves
self.add_table(
&catalog_name,
INFORMATION_SCHEMA_NAME,
TABLES,
TableType::View,
);
self.finish()
}
fn add_table(
&mut self,
catalog_name: &str,
schema_name: &str,
table_name: &str,
table_type: TableType,
) {
self.catalog_names.push(Some(catalog_name));
self.schema_names.push(Some(schema_name));
self.table_names.push(Some(table_name));
self.table_types.push(Some(match table_type {
TableType::Base => "BASE TABLE",
TableType::View => "VIEW",
TableType::Temporary => "LOCAL TEMPORARY",
}));
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> = vec![
Arc::new(self.catalog_names.finish()),
Arc::new(self.schema_names.finish()),
Arc::new(self.table_names.finish()),
Arc::new(self.table_types.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaTables {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_tables()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -18,9 +18,8 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::meta::{RegionStat, TableName};
use common_telemetry::{info, warn};
use snafu::ResultExt;
use common_telemetry::info;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::CreateTableRequest;
@@ -29,10 +28,8 @@ use table::TableRef;
use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
pub mod datafusion;
pub mod error;
pub mod helper;
pub(crate) mod information_schema;
pub mod local;
pub mod remote;
pub mod schema;
@@ -228,52 +225,39 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
Ok(())
}
/// The stat of regions in the datanode node.
/// The number of regions can be got from len of vec.
///
/// Ignores any errors occurred during iterating regions. The intention of this method is to
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
/// "try our best" job.
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
/// The number of regions in the datanode node.
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
let mut region_number: u64 = 0;
let mut region_stats = Vec::new();
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
for catalog_name in catalog_names {
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
for catalog_name in catalog_manager.catalog_names()? {
let catalog =
catalog_manager
.catalog(&catalog_name)?
.context(error::CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
let Ok(schema_names) = catalog.schema_names() else { continue };
for schema_name in schema_names {
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
for schema_name in catalog.schema_names()? {
let schema = catalog
.schema(&schema_name)?
.context(error::SchemaNotFoundSnafu {
catalog: &catalog_name,
schema: &schema_name,
})?;
let Ok(table_names) = schema.table_names() else { continue };
for table_name in table_names {
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
for table_name in schema.table_names()? {
let table =
schema
.table(&table_name)
.await?
.context(error::TableNotFoundSnafu {
table_info: &table_name,
})?;
let region_numbers = &table.table_info().meta.region_numbers;
region_number += region_numbers.len() as u64;
match table.region_stats() {
Ok(stats) => {
let stats = stats.into_iter().map(|stat| RegionStat {
region_id: stat.region_id,
table_name: Some(TableName {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
}),
approximate_bytes: stat.disk_usage_bytes as i64,
..Default::default()
});
region_stats.extend(stats);
}
Err(e) => {
warn!("Failed to get region status, err: {:?}", e);
}
};
}
}
}
(region_number, region_stats)
Ok(region_number)
}

View File

@@ -18,7 +18,7 @@ use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
MITO_ENGINE, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
};
use common_catalog::format_full_table_name;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
@@ -27,8 +27,7 @@ use datatypes::prelude::ScalarVector;
use datatypes::vectors::{BinaryVector, UInt8Vector};
use futures_util::lock::Mutex;
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::NumbersTable;
@@ -38,8 +37,7 @@ use table::TableRef;
use crate::error::{
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
TableNotFoundSnafu,
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
use crate::system::{
@@ -57,7 +55,7 @@ use crate::{
pub struct LocalCatalogManager {
system: Arc<SystemCatalog>,
catalogs: Arc<MemoryCatalogManager>,
engine_manager: TableEngineManagerRef,
engine: TableEngineRef,
next_table_id: AtomicU32,
init_lock: Mutex<bool>,
register_lock: Mutex<()>,
@@ -65,20 +63,19 @@ pub struct LocalCatalogManager {
}
impl LocalCatalogManager {
/// Create a new [CatalogManager] with given user catalogs and mito engine
pub async fn try_new(engine_manager: TableEngineManagerRef) -> Result<Self> {
let engine = engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
/// Create a new [CatalogManager] with given user catalogs and table engine
pub async fn try_new(engine: TableEngineRef) -> Result<Self> {
let table = SystemCatalogTable::new(engine.clone()).await?;
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
let system_catalog = Arc::new(SystemCatalog::new(table));
let system_catalog = Arc::new(SystemCatalog::new(
table,
memory_catalog_list.clone(),
engine.clone(),
));
Ok(Self {
system: system_catalog,
catalogs: memory_catalog_list,
engine_manager,
engine,
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
init_lock: Mutex::new(false),
register_lock: Mutex::new(()),
@@ -103,14 +100,7 @@ impl LocalCatalogManager {
// Processing system table hooks
let mut sys_table_requests = self.system_table_requests.lock().await;
let engine = self
.engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
handle_system_table_request(self, engine, &mut sys_table_requests).await?;
handle_system_table_request(self, self.engine.clone(), &mut sys_table_requests).await?;
Ok(())
}
@@ -263,14 +253,9 @@ impl LocalCatalogManager {
table_name: t.table_name.clone(),
table_id: t.table_id,
};
let engine = self
.engine_manager
.engine(&t.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &t.engine,
})?;
let option = engine
let option = self
.engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
@@ -305,7 +290,9 @@ impl CatalogList for LocalCatalogManager {
}
fn catalog_names(&self) -> Result<Vec<String>> {
self.catalogs.catalog_names()
let mut res = self.catalogs.catalog_names()?;
res.push(SYSTEM_CATALOG_NAME.to_string());
Ok(res)
}
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
@@ -377,7 +364,6 @@ impl CatalogManager for LocalCatalogManager {
// Try to register table with same table id, just ignore.
Ok(false)
} else {
let engine = request.table.table_info().meta.engine.to_string();
// table does not exist
self.system
.register_table(
@@ -385,7 +371,6 @@ impl CatalogManager for LocalCatalogManager {
schema_name.clone(),
request.table_name.clone(),
request.table_id,
engine,
)
.await?;
schema.register_table(request.table_name, request.table)?;
@@ -419,21 +404,6 @@ impl CatalogManager for LocalCatalogManager {
schema: schema_name,
})?;
let _lock = self.register_lock.lock().await;
ensure!(
!schema.table_exist(&request.new_table_name)?,
TableExistsSnafu {
table: &request.new_table_name
}
);
let old_table = schema
.table(&request.table_name)
.await?
.context(TableNotExistSnafu {
table: &request.table_name,
})?;
let engine = old_table.table_info().meta.engine.to_string();
// rename table in system catalog
self.system
.register_table(
@@ -441,14 +411,11 @@ impl CatalogManager for LocalCatalogManager {
schema_name.clone(),
request.new_table_name.clone(),
request.table_id,
engine,
)
.await?;
let renamed = schema
.rename_table(&request.table_name, request.new_table_name.clone())
.is_ok();
Ok(renamed)
Ok(schema
.rename_table(&request.table_name, request.new_table_name)
.is_ok())
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
@@ -563,8 +530,6 @@ impl CatalogManager for LocalCatalogManager {
mod tests {
use std::assert_matches::assert_matches;
use mito::engine::MITO_ENGINE;
use super::*;
use crate::system::{CatalogEntry, SchemaEntry};
@@ -576,7 +541,6 @@ mod tests {
schema_name: "S1".to_string(),
table_name: "T1".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
}),
Entry::Catalog(CatalogEntry {
catalog_name: "C2".to_string(),
@@ -597,7 +561,6 @@ mod tests {
schema_name: "S1".to_string(),
table_name: "T2".to_string(),
table_id: 2,
engine: MITO_ENGINE.to_string(),
}),
];
let res = LocalCatalogManager::sort_entries(vec);

View File

@@ -324,20 +324,16 @@ impl SchemaProvider for MemorySchemaProvider {
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
let mut tables = self.tables.write().unwrap();
let Some(table) = tables.remove(name) else {
return TableNotFoundSnafu {
if tables.get(name).is_some() {
let table = tables.remove(name).unwrap();
tables.insert(new_name, table.clone());
Ok(table)
} else {
TableNotFoundSnafu {
table_info: name.to_string(),
}
.fail()?;
};
let e = match tables.entry(new_name) {
Entry::Vacant(e) => e,
Entry::Occupied(e) => {
return TableExistsSnafu { table: e.key() }.fail();
}
};
e.insert(table.clone());
Ok(table)
.fail()?
}
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
@@ -400,6 +396,7 @@ mod tests {
let other_table = NumbersTable::new(12);
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
let err = result.err().unwrap();
assert!(err.backtrace_opt().is_some());
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
}
@@ -463,7 +460,7 @@ mod tests {
assert!(schema.table_exist(table_name).unwrap());
// rename table
let new_table_name = "numbers_new";
let new_table_name = "numbers";
let rename_table_req = RenameTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),

View File

@@ -20,25 +20,23 @@ use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID, MITO_ENGINE,
};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_telemetry::{debug, error, info};
use dashmap::DashMap;
use futures::Stream;
use futures_util::StreamExt;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::{CreateTableRequest, OpenTableRequest};
use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
SchemaNotFoundSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
};
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
@@ -57,14 +55,14 @@ pub struct RemoteCatalogManager {
node_id: u64,
backend: KvBackendRef,
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
engine_manager: TableEngineManagerRef,
engine: TableEngineRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
}
impl RemoteCatalogManager {
pub fn new(engine_manager: TableEngineManagerRef, node_id: u64, backend: KvBackendRef) -> Self {
pub fn new(engine: TableEngineRef, node_id: u64, backend: KvBackendRef) -> Self {
Self {
engine_manager,
engine,
node_id,
backend,
catalogs: Default::default(),
@@ -188,9 +186,7 @@ impl RemoteCatalogManager {
let max_table_id = MIN_USER_TABLE_ID - 1;
// initiate default catalog and schema
let default_catalog = self
.create_catalog_and_schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.await?;
let default_catalog = self.initiate_default_catalog().await?;
res.insert(DEFAULT_CATALOG_NAME.to_string(), default_catalog);
info!("Default catalog and schema registered");
@@ -270,19 +266,13 @@ impl RemoteCatalogManager {
Ok(())
}
pub async fn create_catalog_and_schema(
&self,
catalog_name: &str,
schema_name: &str,
) -> Result<CatalogProviderRef> {
let schema_provider = self.new_schema_provider(catalog_name, schema_name);
let catalog_provider = self.new_catalog_provider(catalog_name);
catalog_provider.register_schema(schema_name.to_string(), schema_provider.clone())?;
async fn initiate_default_catalog(&self) -> Result<CatalogProviderRef> {
let default_catalog = self.new_catalog_provider(DEFAULT_CATALOG_NAME);
let default_schema = self.new_schema_provider(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())?;
let schema_key = SchemaKey {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
}
.to_string();
self.backend
@@ -293,10 +283,10 @@ impl RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
info!("Created schema '{schema_key}'");
info!("Registered default schema");
let catalog_key = CatalogKey {
catalog_name: catalog_name.to_string(),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
}
.to_string();
self.backend
@@ -307,8 +297,8 @@ impl RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
info!("Created catalog '{catalog_key}");
Ok(catalog_provider)
info!("Registered default catalog");
Ok(default_catalog)
}
async fn open_or_create_table(
@@ -341,13 +331,8 @@ impl RemoteCatalogManager {
table_name: table_name.clone(),
table_id,
};
let engine = self
.engine_manager
.engine(&table_info.meta.engine)
.context(TableEngineNotFoundSnafu {
engine_name: &table_info.meta.engine,
})?;
match engine
match self
.engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
@@ -378,10 +363,9 @@ impl RemoteCatalogManager {
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
engine: engine.name().to_string(),
};
engine
self.engine
.create_table(&context, req)
.await
.context(CreateTableSnafu {
@@ -414,14 +398,17 @@ impl CatalogManager for RemoteCatalogManager {
info!("Max table id allocated: {}", max_table_id);
let mut system_table_requests = self.system_table_requests.lock().await;
let engine = self
.engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
handle_system_table_request(self, engine, &mut system_table_requests).await?;
handle_system_table_request(self, self.engine.clone(), &mut system_table_requests).await?;
info!("All system table opened");
self.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap()
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap()
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
Ok(())
}

View File

@@ -18,7 +18,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use table::TableRef;
use crate::error::{NotSupportedSnafu, Result};
use crate::error::Result;
/// Represents a schema, comprising a number of named tables.
#[async_trait]
@@ -35,30 +35,15 @@ pub trait SchemaProvider: Sync + Send {
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.
fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
NotSupportedSnafu {
op: format!("register_table({name}, <table>)"),
}
.fail()
}
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
/// If supported by the implementation, renames an existing table from this schema and returns it.
/// If no table of that name exists, returns "Table not found" error.
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
NotSupportedSnafu {
op: format!("rename_table({name}, {new_name})"),
}
.fail()
}
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
/// If supported by the implementation, removes an existing table from this schema and returns it.
/// If no table of that name exists, returns Ok(None).
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
NotSupportedSnafu {
op: format!("deregister_table({name})"),
}
.fail()
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;
/// If supported by the implementation, checks the table exist in the schema provider or not.
/// If no matched table in the schema provider, return false.

View File

@@ -17,8 +17,8 @@ use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME,
SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
@@ -112,7 +112,6 @@ impl SystemCatalogTable {
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
create_if_not_exists: true,
table_options: TableOptions::default(),
engine: engine.name().to_string(),
};
let table = engine
@@ -195,13 +194,12 @@ pub fn build_table_insert_request(
schema: String,
table_name: String,
table_id: TableId,
engine: String,
) -> InsertRequest {
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
build_insert_request(
EntryType::Table,
entry_key.as_bytes(),
serde_json::to_string(&TableEntryValue { table_name, engine })
serde_json::to_string(&TableEntryValue { table_name })
.unwrap()
.as_bytes(),
)
@@ -332,7 +330,6 @@ pub fn decode_system_catalog(
schema_name: table_parts[1].to_string(),
table_name: table_meta.table_name,
table_id,
engine: table_meta.engine,
}))
}
}
@@ -388,19 +385,11 @@ pub struct TableEntry {
pub schema_name: String,
pub table_name: String,
pub table_id: TableId,
pub engine: String,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableEntryValue {
pub table_name: String,
#[serde(default = "mito_engine")]
pub engine: String,
}
fn mito_engine() -> String {
MITO_ENGINE.to_string()
}
#[cfg(test)]
@@ -410,8 +399,8 @@ mod tests {
use datatypes::value::Value;
use log_store::NoopLogStore;
use mito::config::EngineConfig;
use mito::engine::{MitoEngine, MITO_ENGINE};
use object_store::ObjectStore;
use mito::engine::MitoEngine;
use object_store::{ObjectStore, ObjectStoreBuilder};
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
@@ -493,9 +482,11 @@ mod tests {
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let mut builder = object_store::services::Fs::default();
builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let accessor = object_store::services::Fs::default()
.root(&store_dir)
.build()
.unwrap();
let object_store = ObjectStore::new(accessor).finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
@@ -539,7 +530,6 @@ mod tests {
DEFAULT_SCHEMA_NAME.to_string(),
"my_table".to_string(),
1,
MITO_ENGINE.to_string(),
);
let result = catalog_table.insert(table_insertion).await.unwrap();
assert_eq!(result, 1);
@@ -560,7 +550,6 @@ mod tests {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "my_table".to_string(),
table_id: 1,
engine: MITO_ENGINE.to_string(),
});
assert_eq!(entry, expected);

View File

@@ -15,9 +15,8 @@
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_catalog::format_full_table_name;
use datafusion::common::{ResolvedTableReference, TableReference};
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
use datafusion::datasource::provider_as_source;
use datafusion::logical_expr::TableSource;
use session::context::QueryContext;
@@ -27,7 +26,6 @@ use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::CatalogListRef;
pub struct DfTableSourceProvider {
@@ -89,8 +87,9 @@ impl DfTableSourceProvider {
pub async fn resolve_table(
&mut self,
table_ref: TableReference<'_>,
table_ref: OwnedTableReference,
) -> Result<Arc<dyn TableSource>> {
let table_ref = table_ref.as_table_reference();
let table_ref = self.resolve_table_ref(table_ref)?;
let resolved_name = table_ref.to_string();
@@ -102,25 +101,14 @@ impl DfTableSourceProvider {
let schema_name = table_ref.schema.as_ref();
let table_name = table_ref.table.as_ref();
let schema = if schema_name != INFORMATION_SCHEMA_NAME {
let catalog = self
.catalog_list
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?
} else {
let catalog_provider = self
.catalog_list
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
Arc::new(InformationSchemaProvider::new(
catalog_name.to_string(),
catalog_provider,
))
};
let catalog = self
.catalog_list
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
catalog: catalog_name,
schema: schema_name,
})?;
let table = schema
.table(table_name)
.await?

View File

@@ -15,12 +15,28 @@
// The `tables` table in system catalog keeps a record of all tables created by user.
use std::any::Any;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
use common_error::ext::BoxedError;
use common_query::logical_plan::Expr;
use common_query::physical_plan::PhysicalPlanRef;
use common_recordbatch::error::Result as RecordBatchResult;
use common_recordbatch::{RecordBatch, RecordBatchStream};
use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use futures::Stream;
use snafu::ResultExt;
use table::metadata::TableId;
use table::engine::TableEngineRef;
use table::error::TablesRecordBatchSnafu;
use table::metadata::{TableId, TableInfoRef};
use table::table::scan::SimpleTableScan;
use table::{Table, TableRef};
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
@@ -28,9 +44,160 @@ use crate::system::{
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
SystemCatalogTable,
};
use crate::{CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef};
use crate::{
CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
};
/// Tables holds all tables created by user.
pub struct Tables {
schema: SchemaRef,
catalogs: CatalogListRef,
engine_name: String,
}
impl Tables {
pub fn new(catalogs: CatalogListRef, engine_name: String) -> Self {
Self {
schema: Arc::new(build_schema_for_tables()),
catalogs,
engine_name,
}
}
}
#[async_trait::async_trait]
impl Table for Tables {
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn table_info(&self) -> TableInfoRef {
unreachable!("Tables does not support table_info method")
}
async fn scan(
&self,
_projection: Option<&Vec<usize>>,
_filters: &[Expr],
_limit: Option<usize>,
) -> table::error::Result<PhysicalPlanRef> {
let catalogs = self.catalogs.clone();
let schema_ref = self.schema.clone();
let engine_name = self.engine_name.clone();
let stream = stream!({
for catalog_name in catalogs
.catalog_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
{
let catalog = catalogs
.catalog(&catalog_name)
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.unwrap();
for schema_name in catalog
.schema_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
{
let mut tables_in_schema = Vec::with_capacity(
catalog
.schema_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.len(),
);
let schema = catalog
.schema(&schema_name)
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
.unwrap();
for table_name in schema
.table_names()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)?
{
tables_in_schema.push(table_name);
}
let vec = tables_to_record_batch(
&catalog_name,
&schema_name,
tables_in_schema,
&engine_name,
);
let record_batch_res = RecordBatch::new(schema_ref.clone(), vec);
yield record_batch_res;
}
}
});
let stream = Box::pin(TablesRecordBatchStream {
schema: self.schema.clone(),
stream: Box::pin(stream),
});
Ok(Arc::new(SimpleTableScan::new(stream)))
}
}
/// Convert tables info to `RecordBatch`.
fn tables_to_record_batch(
catalog_name: &str,
schema_name: &str,
table_names: Vec<String>,
engine: &str,
) -> Vec<VectorRef> {
let mut catalog_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
let mut schema_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
let mut table_name_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
let mut engine_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
for table_name in table_names {
// Safety: All these vectors are string type.
catalog_vec.push_value_ref(ValueRef::String(catalog_name));
schema_vec.push_value_ref(ValueRef::String(schema_name));
table_name_vec.push_value_ref(ValueRef::String(&table_name));
engine_vec.push_value_ref(ValueRef::String(engine));
}
vec![
catalog_vec.to_vector(),
schema_vec.to_vector(),
table_name_vec.to_vector(),
engine_vec.to_vector(),
]
}
pub struct TablesRecordBatchStream {
schema: SchemaRef,
stream: Pin<Box<dyn Stream<Item = RecordBatchResult<RecordBatch>> + Send>>,
}
impl Stream for TablesRecordBatchStream {
type Item = RecordBatchResult<RecordBatch>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.stream).poll_next(cx)
}
}
impl RecordBatchStream for TablesRecordBatchStream {
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
}
pub struct InformationSchema {
pub tables: Arc<Tables>,
pub system: Arc<SystemCatalogTable>,
}
@@ -41,19 +208,41 @@ impl SchemaProvider for InformationSchema {
}
fn table_names(&self) -> Result<Vec<String>, Error> {
Ok(vec![SYSTEM_CATALOG_TABLE_NAME.to_string()])
Ok(vec![
"tables".to_string(),
SYSTEM_CATALOG_TABLE_NAME.to_string(),
])
}
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
if name.eq_ignore_ascii_case("tables") {
Ok(Some(self.tables.clone()))
} else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
Ok(Some(self.system.clone()))
} else {
Ok(None)
}
}
fn register_table(
&self,
_name: String,
_table: TableRef,
) -> crate::error::Result<Option<TableRef>> {
panic!("System catalog & schema does not support register table")
}
fn rename_table(&self, _name: &str, _new_name: String) -> crate::error::Result<TableRef> {
unimplemented!("System catalog & schema does not support rename table")
}
fn deregister_table(&self, _name: &str) -> crate::error::Result<Option<TableRef>> {
panic!("System catalog & schema does not support deregister table")
}
fn table_exist(&self, name: &str) -> Result<bool, Error> {
Ok(name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
Ok(name.eq_ignore_ascii_case("tables")
|| name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
}
}
@@ -62,8 +251,13 @@ pub struct SystemCatalog {
}
impl SystemCatalog {
pub(crate) fn new(system: SystemCatalogTable) -> Self {
pub fn new(
system: SystemCatalogTable,
catalogs: CatalogListRef,
engine: TableEngineRef,
) -> Self {
let schema = InformationSchema {
tables: Arc::new(Tables::new(catalogs, engine.name().to_string())),
system: Arc::new(system),
};
Self {
@@ -77,9 +271,8 @@ impl SystemCatalog {
schema: String,
table_name: String,
table_id: TableId,
engine: String,
) -> crate::error::Result<usize> {
let request = build_table_insert_request(catalog, schema, table_name, table_id, engine);
let request = build_table_insert_request(catalog, schema, table_name, table_id);
self.information_schema
.system
.insert(request)
@@ -141,3 +334,104 @@ impl CatalogProvider for SystemCatalog {
}
}
}
fn build_schema_for_tables() -> Schema {
let cols = vec![
ColumnSchema::new(
"catalog".to_string(),
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
"schema".to_string(),
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
"table_name".to_string(),
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
"engine".to_string(),
ConcreteDataType::string_datatype(),
false,
),
];
Schema::new(cols)
}
#[cfg(test)]
mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::physical_plan::SessionContext;
use futures_util::StreamExt;
use table::table::numbers::NumbersTable;
use super::*;
use crate::local::memory::new_memory_catalog_list;
use crate::CatalogList;
#[tokio::test]
async fn test_tables() {
let catalog_list = new_memory_catalog_list().unwrap();
let schema = catalog_list
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap()
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
schema
.register_table("test_table".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
let tables = Tables::new(catalog_list, "test_engine".to_string());
let tables_stream = tables.scan(None, &[], None).await.unwrap();
let session_ctx = SessionContext::new();
let mut tables_stream = tables_stream.execute(0, session_ctx.task_ctx()).unwrap();
if let Some(t) = tables_stream.next().await {
let batch = t.unwrap();
assert_eq!(1, batch.num_rows());
assert_eq!(4, batch.num_columns());
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(0).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(1).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(2).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(3).data_type()
);
assert_eq!(
"greptime",
batch.column(0).get_ref(0).as_string().unwrap().unwrap()
);
assert_eq!(
"public",
batch.column(1).get_ref(0).as_string().unwrap().unwrap()
);
assert_eq!(
"test_table",
batch.column(2).get_ref(0).as_string().unwrap().unwrap()
);
assert_eq!(
"test_engine",
batch.column(3).get_ref(0).as_string().unwrap().unwrap()
);
} else {
panic!("Record batch should not be empty!")
}
}
}

View File

@@ -21,7 +21,6 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::{error, info};
use mito::config::EngineConfig;
use table::engine::manager::MemoryTableEngineManager;
use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
@@ -34,8 +33,7 @@ mod tests {
mito::table::test_util::MockEngine::default(),
object_store,
));
let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
let catalog_manager = LocalCatalogManager::try_new(engine_manager).await.unwrap();
let catalog_manager = LocalCatalogManager::try_new(mock_engine).await.unwrap();
catalog_manager.start().await?;
Ok(catalog_manager)
}

View File

@@ -27,10 +27,9 @@ mod tests {
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::schema::RawSchema;
use futures_util::StreamExt;
use table::engine::manager::MemoryTableEngineManager;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
@@ -81,11 +80,8 @@ mod tests {
) -> (KvBackendRef, TableEngineRef, Arc<RemoteCatalogManager>) {
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
let table_engine = Arc::new(MockTableEngine::default());
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
MITO_ENGINE.to_string(),
table_engine.clone(),
));
let catalog_manager = RemoteCatalogManager::new(engine_manager, node_id, backend.clone());
let catalog_manager =
RemoteCatalogManager::new(table_engine.clone(), node_id, backend.clone());
catalog_manager.start().await.unwrap();
(backend, table_engine, Arc::new(catalog_manager))
}
@@ -135,7 +131,6 @@ mod tests {
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
},
)
.await
@@ -173,6 +168,7 @@ mod tests {
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
assert_eq!(vec!["numbers"], default_schema.table_names().unwrap());
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
@@ -195,7 +191,6 @@ mod tests {
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
},
)
.await
@@ -208,7 +203,14 @@ mod tests {
table,
};
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(vec![table_name], default_schema.table_names().unwrap());
assert_eq!(
HashSet::from([table_name, "numbers".to_string()]),
default_schema
.table_names()
.unwrap()
.into_iter()
.collect::<HashSet<_>>()
);
}
#[tokio::test]
@@ -249,7 +251,6 @@ mod tests {
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
engine: MITO_ENGINE.to_string(),
},
)
.await

View File

@@ -23,7 +23,7 @@ enum_dispatch = "0.3"
futures-util.workspace = true
parking_lot = "0.12"
prost.workspace = true
rand.workspace = true
rand = "0.8"
snafu.workspace = true
tonic.workspace = true

View File

@@ -14,7 +14,7 @@
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
use client::{Client, Database};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use prost::Message;
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
@@ -64,7 +64,6 @@ async fn run() {
table_options: Default::default(),
table_id: Some(TableId { id: 1024 }),
region_ids: vec![0],
engine: MITO_ENGINE.to_string(),
};
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);

View File

@@ -15,8 +15,6 @@
use std::sync::Arc;
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::health_check_client::HealthCheckClient;
use api::v1::HealthCheckRequest;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
@@ -155,13 +153,6 @@ impl Client {
inner: GreptimeDatabaseClient::new(channel),
})
}
pub async fn health_check(&self) -> Result<()> {
let (_, channel) = self.find_channel()?;
let mut client = HealthCheckClient::new(channel);
client.health_check(HealthCheckRequest {}).await?;
Ok(())
}
}
#[cfg(test)]

View File

@@ -35,44 +35,25 @@ use crate::error::{
};
use crate::{error, Client, Result};
#[derive(Clone, Debug, Default)]
#[derive(Clone, Debug)]
pub struct Database {
// The "catalog" and "schema" to be used in processing the requests at the server side.
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
// They will be carried in the request header.
catalog: String,
schema: String,
// The dbname follows naming rule as out mysql, postgres and http
// protocol. The server treat dbname in priority of catalog/schema.
dbname: String,
client: Client,
ctx: FlightContext,
}
impl Database {
/// Create database service client using catalog and schema
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
Self {
catalog: catalog.into(),
schema: schema.into(),
client,
..Default::default()
}
}
/// Create database service client using dbname.
///
/// This API is designed for external usage. `dbname` is:
///
/// - the name of database when using GreptimeDB standalone or cluster
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
/// environment
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
Self {
dbname: dbname.into(),
client,
..Default::default()
ctx: FlightContext::default(),
}
}
@@ -92,14 +73,6 @@ impl Database {
self.schema = schema.into();
}
pub fn dbname(&self) -> &String {
&self.dbname
}
pub fn set_dbname(&mut self, dbname: impl Into<String>) {
self.dbname = dbname.into();
}
pub fn set_auth(&mut self, auth: AuthScheme) {
self.ctx.auth_header = Some(AuthHeader {
auth_scheme: Some(auth),
@@ -113,7 +86,6 @@ impl Database {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(Request::Insert(request)),
};
@@ -195,7 +167,6 @@ impl Database {
catalog: self.catalog.clone(),
schema: self.schema.clone(),
authorization: self.ctx.auth_header.clone(),
dbname: self.dbname.clone(),
}),
request: Some(request),
};

View File

@@ -16,14 +16,16 @@ use std::any::Any;
use std::str::FromStr;
use common_error::prelude::*;
use snafu::Location;
use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
IllegalFlightMessages { reason: String, location: Location },
IllegalFlightMessages {
reason: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
FlightGet {
@@ -45,10 +47,13 @@ pub enum Error {
},
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
IllegalGrpcClientState { err_msg: String, location: Location },
IllegalGrpcClientState {
err_msg: String,
backtrace: Backtrace,
},
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
MissingField { field: String, backtrace: Backtrace },
#[snafu(display(
"Failed to create gRPC channel, peer address: {}, source: {}",
@@ -61,6 +66,10 @@ pub enum Error {
source: common_grpc::error::Error,
},
/// Error deserialized from gRPC metadata
#[snafu(display("{}", msg))]
ExternalError { code: StatusCode, msg: String },
// Server error carried in Tonic Status's metadata.
#[snafu(display("{}", msg))]
Server { code: StatusCode, msg: String },
@@ -85,9 +94,14 @@ impl ErrorExt for Error {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::ExternalError { code, .. } => *code,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![doc = include_str!("../../../../README.md")]
use std::fmt;
use clap::Parser;

View File

@@ -31,6 +31,7 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
// TODO: handle cli shutdown
Ok(())
}
}

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use clap::Parser;
use common_telemetry::logging;
use datanode::datanode::{
@@ -23,7 +21,7 @@ use meta_client::MetaClientOptions;
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::toml_loader;
pub struct Instance {
@@ -36,10 +34,8 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
self.datanode
.shutdown()
.await
.context(ShutdownDatanodeSnafu)
// TODO: handle datanode shutdown
Ok(())
}
}
@@ -88,10 +84,6 @@ struct StartCommand {
wal_dir: Option<String>,
#[clap(long)]
procedure_dir: Option<String>,
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
}
impl StartCommand {
@@ -152,24 +144,16 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
if let Some(data_dir) = cmd.data_dir {
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
}
if let Some(wal_dir) = cmd.wal_dir {
opts.wal.dir = wal_dir;
}
if let Some(procedure_dir) = cmd.procedure_dir {
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
}
if let Some(http_addr) = cmd.http_addr {
opts.http_opts.addr = http_addr
}
if let Some(http_timeout) = cmd.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout)
}
// Disable dashboard in datanode.
opts.http_opts.disable_dashboard = true;
Ok(opts)
}
@@ -181,9 +165,8 @@ mod tests {
use std::io::Write;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use datanode::datanode::{CompactionConfig, ObjectStoreConfig, RegionManifestConfig};
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
use servers::Mode;
use super::*;
@@ -219,15 +202,10 @@ mod tests {
type = "File"
data_dir = "/tmp/greptimedb/data/"
[storage.compaction]
max_inflight_tasks = 3
max_files_in_level0 = 7
[compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
[storage.manifest]
checkpoint_margin = 9
gc_duration = '7s'
checkpoint_on_startup = true
"#;
write!(file, "{}", toml_str).unwrap();
@@ -258,9 +236,9 @@ mod tests {
assert_eq!(3000, timeout_millis);
assert!(tcp_nodelay);
match &options.storage.store {
ObjectStoreConfig::File(FileConfig { data_dir, .. }) => {
assert_eq!("/tmp/greptimedb/data/", data_dir)
match options.storage {
ObjectStoreConfig::File(FileConfig { data_dir }) => {
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
@@ -268,20 +246,11 @@ mod tests {
assert_eq!(
CompactionConfig {
max_inflight_tasks: 3,
max_files_in_level0: 7,
max_inflight_tasks: 4,
max_files_in_level0: 8,
max_purge_tasks: 32,
sst_write_buffer_size: ReadableSize::mb(8),
},
options.storage.compaction,
);
assert_eq!(
RegionManifestConfig {
checkpoint_margin: Some(9),
gc_duration: Some(Duration::from_secs(7)),
checkpoint_on_startup: true,
},
options.storage.manifest,
options.compaction
);
}

View File

@@ -16,7 +16,6 @@ use std::any::Any;
use common_error::prelude::*;
use rustyline::error::ReadlineError;
use snafu::Location;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -27,10 +26,10 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
ShutdownDatanode {
#[snafu(display("Failed to stop datanode, source: {}", source))]
StopDatanode {
#[snafu(backtrace)]
source: datanode::error::Error,
source: BoxedError,
},
#[snafu(display("Failed to start frontend, source: {}", source))]
@@ -39,12 +38,6 @@ pub enum Error {
source: frontend::error::Error,
},
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
ShutdownFrontend {
#[snafu(backtrace)]
source: frontend::error::Error,
},
#[snafu(display("Failed to build meta server, source: {}", source))]
BuildMetaServer {
#[snafu(backtrace)]
@@ -57,30 +50,24 @@ pub enum Error {
source: meta_srv::error::Error,
},
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
ShutdownMetaServer {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
ReadConfig {
path: String,
source: std::io::Error,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to parse config, source: {}", source))]
ParseConfig {
source: toml::de::Error,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Missing config, msg: {}", msg))]
MissingConfig { msg: String, location: Location },
MissingConfig { msg: String, backtrace: Backtrace },
#[snafu(display("Illegal config: {}", msg))]
IllegalConfig { msg: String, location: Location },
IllegalConfig { msg: String, backtrace: Backtrace },
#[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig {
@@ -101,13 +88,13 @@ pub enum Error {
#[snafu(display("Cannot create REPL: {}", source))]
ReplCreation {
source: ReadlineError,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Error reading command: {}", source))]
Readline {
source: ReadlineError,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
@@ -162,10 +149,7 @@ impl ErrorExt for Error {
match self {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::ShutdownDatanode { source } => source.status_code(),
Error::ShutdownFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
@@ -185,10 +169,82 @@ impl ErrorExt for Error {
source.status_code()
}
Error::SubstraitEncodeLogicalPlan { source } => source.status_code(),
Error::StopDatanode { source } => source.status_code(),
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use super::*;
type StdResult<E> = std::result::Result<(), E>;
#[test]
fn test_start_node_error() {
fn throw_datanode_error() -> StdResult<datanode::error::Error> {
datanode::error::MissingNodeIdSnafu {}.fail()
}
let e = throw_datanode_error()
.context(StartDatanodeSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
#[test]
fn test_start_frontend_error() {
fn throw_frontend_error() -> StdResult<frontend::error::Error> {
frontend::error::InvalidSqlSnafu { err_msg: "failed" }.fail()
}
let e = throw_frontend_error()
.context(StartFrontendSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
#[test]
fn test_start_metasrv_error() {
fn throw_metasrv_error() -> StdResult<meta_srv::error::Error> {
meta_srv::error::StreamNoneSnafu {}.fail()
}
let e = throw_metasrv_error()
.context(StartMetaServerSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::Internal);
}
#[test]
fn test_read_config_error() {
fn throw_read_config_error() -> StdResult<std::io::Error> {
Err(std::io::ErrorKind::NotFound.into())
}
let e = throw_read_config_error()
.context(ReadConfigSnafu { path: "test" })
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
}

View File

@@ -47,10 +47,8 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
self.frontend
.shutdown()
.await
.context(error::ShutdownFrontendSnafu)
// TODO: handle frontend shutdown
Ok(())
}
}
@@ -107,8 +105,6 @@ pub struct StartCommand {
tls_key_path: Option<String>,
#[clap(long)]
user_provider: Option<String>,
#[clap(long)]
disable_dashboard: bool,
}
impl StartCommand {
@@ -151,24 +147,18 @@ impl TryFrom<StartCommand> for FrontendOptions {
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
let mut http_options = HttpOptions {
disable_dashboard: cmd.disable_dashboard,
..Default::default()
};
if let Some(addr) = cmd.http_addr {
http_options.addr = addr;
opts.http_options = Some(HttpOptions {
addr,
..Default::default()
});
}
opts.http_options = Some(http_options);
if let Some(addr) = cmd.grpc_addr {
opts.grpc_options = Some(GrpcOptions {
addr,
..Default::default()
});
}
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
@@ -235,7 +225,6 @@ mod tests {
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
disable_dashboard: false,
};
let opts: FrontendOptions = command.try_into().unwrap();
@@ -298,7 +287,6 @@ mod tests {
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
disable_dashboard: false,
};
let fe_opts = FrontendOptions::try_from(command).unwrap();
@@ -329,7 +317,6 @@ mod tests {
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
disable_dashboard: false,
};
let plugins = load_frontend_plugins(&command.user_provider);

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use clap::Parser;
use common_telemetry::{info, logging, warn};
use meta_srv::bootstrap::MetaSrvInstance;
@@ -32,14 +30,13 @@ impl Instance {
self.instance
.start()
.await
.context(error::StartMetaServerSnafu)
.context(error::StartMetaServerSnafu)?;
Ok(())
}
pub async fn stop(&self) -> Result<()> {
self.instance
.shutdown()
.await
.context(error::ShutdownMetaServerSnafu)
// TODO: handle metasrv shutdown
Ok(())
}
}
@@ -82,10 +79,6 @@ struct StartCommand {
selector: Option<String>,
#[clap(long)]
use_memory_store: bool,
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
}
impl StartCommand {
@@ -134,16 +127,6 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
opts.use_memory_store = true;
}
if let Some(http_addr) = cmd.http_addr {
opts.http_opts.addr = http_addr;
}
if let Some(http_timeout) = cmd.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout);
}
// Disable dashboard in metasrv.
opts.http_opts.disable_dashboard = true;
Ok(opts)
}
}
@@ -166,8 +149,6 @@ mod tests {
config_file: None,
selector: Some("LoadBased".to_string()),
use_memory_store: false,
http_addr: None,
http_timeout: None,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
@@ -196,8 +177,6 @@ mod tests {
selector: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
use_memory_store: false,
http_addr: None,
http_timeout: None,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);

View File

@@ -16,8 +16,11 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use common_error::prelude::BoxedError;
use common_telemetry::info;
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
use datanode::datanode::{
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
};
use datanode::instance::InstanceRef;
use frontend::frontend::FrontendOptions;
use frontend::grpc::GrpcOptions;
@@ -35,8 +38,7 @@ use servers::Mode;
use snafu::ResultExt;
use crate::error::{
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
StartDatanodeSnafu, StartFrontendSnafu,
Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu, StopDatanodeSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::toml_loader;
@@ -80,7 +82,8 @@ pub struct StandaloneOptions {
pub prometheus_options: Option<PrometheusOptions>,
pub prom_options: Option<PromOptions>,
pub wal: WalConfig,
pub storage: StorageConfig,
pub storage: ObjectStoreConfig,
pub compaction: CompactionConfig,
pub procedure: Option<ProcedureConfig>,
}
@@ -98,7 +101,8 @@ impl Default for StandaloneOptions {
prometheus_options: Some(PrometheusOptions::default()),
prom_options: Some(PromOptions::default()),
wal: WalConfig::default(),
storage: StorageConfig::default(),
storage: ObjectStoreConfig::default(),
compaction: CompactionConfig::default(),
procedure: None,
}
}
@@ -125,6 +129,7 @@ impl StandaloneOptions {
enable_memory_catalog: self.enable_memory_catalog,
wal: self.wal,
storage: self.storage,
compaction: self.compaction,
procedure: self.procedure,
..Default::default()
}
@@ -150,16 +155,16 @@ impl Instance {
}
pub async fn stop(&self) -> Result<()> {
self.datanode
.shutdown()
.await
.map_err(BoxedError::new)
.context(StopDatanodeSnafu)?;
self.frontend
.shutdown()
.await
.context(ShutdownFrontendSnafu)?;
self.datanode
.shutdown_instance()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
.map_err(BoxedError::new)
.context(StopDatanodeSnafu)?;
Ok(())
}
@@ -236,9 +241,8 @@ async fn build_frontend(
plugins: Arc<Plugins>,
datanode_instance: InstanceRef,
) -> Result<FeInstance> {
let mut frontend_instance = FeInstance::try_new_standalone(datanode_instance.clone())
.await
.context(StartFrontendSnafu)?;
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_script_handler(datanode_instance);
frontend_instance.set_plugins(plugins.clone());
Ok(frontend_instance)
}

View File

@@ -18,7 +18,7 @@ use std::io::{Read, Write};
use bytes::{Buf, BufMut, BytesMut};
use common_error::prelude::ErrorExt;
use paste::paste;
use snafu::{ensure, Location, ResultExt, Snafu};
use snafu::{ensure, Backtrace, ErrorCompat, ResultExt, Snafu};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -31,32 +31,28 @@ pub enum Error {
Overflow {
src_len: usize,
dst_len: usize,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Buffer underflow"))]
Underflow { location: Location },
Underflow { backtrace: Backtrace },
#[snafu(display("IO operation reach EOF, source: {}", source))]
Eof {
source: std::io::Error,
location: Location,
backtrace: Backtrace,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn as_any(&self) -> &dyn Any {
self
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn location_opt(&self) -> Option<common_error::snafu::Location> {
match self {
Error::Overflow { location, .. } => Some(*location),
Error::Underflow { location, .. } => Some(*location),
Error::Eof { location, .. } => Some(*location),
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -53,10 +53,6 @@ impl ReadableSize {
pub const fn as_mb(self) -> u64 {
self.0 / MIB
}
pub const fn as_bytes(self) -> u64 {
self.0
}
}
impl Div<u64> for ReadableSize {

View File

@@ -25,6 +25,3 @@ pub const MIN_USER_TABLE_ID: u32 = 1024;
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
/// scripts table id
pub const SCRIPTS_TABLE_ID: u32 = 1;
pub const MITO_ENGINE: &str = "mito";
pub const IMMUTABLE_FILE_ENGINE: &str = "file";

View File

@@ -16,29 +16,29 @@ use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::prelude::{Snafu, StatusCode};
use snafu::Location;
use snafu::{Backtrace, ErrorCompat};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Invalid catalog info: {}", key))]
InvalidCatalog { key: String, location: Location },
InvalidCatalog { key: String, backtrace: Backtrace },
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
DeserializeCatalogEntryValue {
raw: String,
location: Location,
backtrace: Backtrace,
source: serde_json::error::Error,
},
#[snafu(display("Failed to serialize catalog entry value"))]
SerializeCatalogEntryValue {
location: Location,
backtrace: Backtrace,
source: serde_json::error::Error,
},
#[snafu(display("Failed to parse node id: {}", key))]
ParseNodeId { key: String, location: Location },
ParseNodeId { key: String, backtrace: Backtrace },
}
impl ErrorExt for Error {
@@ -51,6 +51,10 @@ impl ErrorExt for Error {
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}

View File

@@ -1,28 +0,0 @@
[package]
name = "common-datasource"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
arrow.workspace = true
arrow-schema.workspace = true
async-compression = { version = "0.3", features = [
"bzip2",
"gzip",
"xz",
"zstd",
"futures-io",
"tokio",
] }
async-trait.workspace = true
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
datafusion.workspace = true
futures.workspace = true
object-store = { path = "../../object-store" }
regex = "1.7"
snafu.workspace = true
tokio.workspace = true
tokio-util.workspace = true
url = "2.3"

View File

@@ -1,84 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use std::str::FromStr;
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
use tokio::io::{AsyncRead, BufReader};
use crate::error::{self, Error, Result};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CompressionType {
/// Gzip-ed file
GZIP,
/// Bzip2-ed file
BZIP2,
/// Xz-ed file (liblzma)
XZ,
/// Zstd-ed file,
ZSTD,
/// Uncompressed file
UNCOMPRESSED,
}
impl FromStr for CompressionType {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let s = s.to_uppercase();
match s.as_str() {
"GZIP" | "GZ" => Ok(Self::GZIP),
"BZIP2" | "BZ2" => Ok(Self::BZIP2),
"XZ" => Ok(Self::XZ),
"ZST" | "ZSTD" => Ok(Self::ZSTD),
"" => Ok(Self::UNCOMPRESSED),
_ => error::UnsupportedCompressionTypeSnafu {
compression_type: s,
}
.fail(),
}
}
}
impl Display for CompressionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Self::GZIP => "GZIP",
Self::BZIP2 => "BZIP2",
Self::XZ => "XZ",
Self::ZSTD => "ZSTD",
Self::UNCOMPRESSED => "",
})
}
}
impl CompressionType {
pub const fn is_compressed(&self) -> bool {
!matches!(self, &Self::UNCOMPRESSED)
}
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
&self,
s: T,
) -> Box<dyn AsyncRead + Unpin + Send> {
match self {
CompressionType::GZIP => Box::new(GzipDecoder::new(BufReader::new(s))),
CompressionType::BZIP2 => Box::new(BzDecoder::new(BufReader::new(s))),
CompressionType::XZ => Box::new(XzDecoder::new(BufReader::new(s))),
CompressionType::ZSTD => Box::new(ZstdDecoder::new(BufReader::new(s))),
CompressionType::UNCOMPRESSED => Box::new(s),
}
}
}

View File

@@ -1,142 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::prelude::*;
use snafu::Location;
use url::ParseError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Unsupported compression type: {}", compression_type))]
UnsupportedCompressionType { compression_type: String },
#[snafu(display("Unsupported backend protocol: {}", protocol))]
UnsupportedBackendProtocol { protocol: String },
#[snafu(display("empty host: {}", url))]
EmptyHostPath { url: String },
#[snafu(display("Invalid path: {}", path))]
InvalidPath { path: String },
#[snafu(display("Invalid url: {}, error :{}", url, source))]
InvalidUrl { url: String, source: ParseError },
#[snafu(display("Failed to decompression, source: {}", source))]
Decompression {
source: object_store::Error,
location: Location,
},
#[snafu(display("Failed to build backend, source: {}", source))]
BuildBackend {
source: object_store::Error,
location: Location,
},
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
ReadObject {
path: String,
location: Location,
source: object_store::Error,
},
#[snafu(display("Failed to read parquet source: {}", source))]
ReadParquetSnafu {
location: Location,
source: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to convert parquet to schema: {}", source))]
ParquetToSchema {
location: Location,
source: datafusion::parquet::errors::ParquetError,
},
#[snafu(display("Failed to infer schema from file: {}, source: {}", path, source))]
InferSchema {
path: String,
location: Location,
source: arrow_schema::ArrowError,
},
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
ListObjects {
path: String,
location: Location,
source: object_store::Error,
},
#[snafu(display("Invalid connection: {}", msg))]
InvalidConnection { msg: String },
#[snafu(display("Failed to join handle: {}", source))]
JoinHandle {
location: Location,
source: tokio::task::JoinError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
use Error::*;
match self {
BuildBackend { .. } | ListObjects { .. } | ReadObject { .. } => {
StatusCode::StorageUnavailable
}
UnsupportedBackendProtocol { .. }
| UnsupportedCompressionType { .. }
| InvalidConnection { .. }
| InvalidUrl { .. }
| EmptyHostPath { .. }
| InvalidPath { .. }
| InferSchema { .. }
| ReadParquetSnafu { .. }
| ParquetToSchema { .. } => StatusCode::InvalidArguments,
Decompression { .. } | JoinHandle { .. } => StatusCode::Unexpected,
}
}
fn as_any(&self) -> &dyn Any {
self
}
fn location_opt(&self) -> Option<common_error::snafu::Location> {
use Error::*;
match self {
BuildBackend { location, .. } => Some(*location),
ReadObject { location, .. } => Some(*location),
ListObjects { location, .. } => Some(*location),
InferSchema { location, .. } => Some(*location),
ReadParquetSnafu { location, .. } => Some(*location),
ParquetToSchema { location, .. } => Some(*location),
Decompression { location, .. } => Some(*location),
JoinHandle { location, .. } => Some(*location),
UnsupportedBackendProtocol { .. }
| EmptyHostPath { .. }
| InvalidPath { .. }
| InvalidUrl { .. }
| InvalidConnection { .. }
| UnsupportedCompressionType { .. } => None,
}
}
}

View File

@@ -1,30 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod csv;
pub mod json;
pub mod parquet;
pub const DEFAULT_SCHEMA_INFER_MAX_RECORD: usize = 1000;
use arrow::datatypes::SchemaRef;
use async_trait::async_trait;
use object_store::ObjectStore;
use crate::error::Result;
#[async_trait]
pub trait FileFormat: Send + Sync + std::fmt::Debug {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef>;
}

View File

@@ -1,158 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
use arrow_schema::SchemaRef;
use async_trait::async_trait;
use common_runtime;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::io::SyncIoBridge;
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::file_format::{self, FileFormat};
#[derive(Debug)]
pub struct CsvFormat {
pub has_header: bool,
pub delimiter: u8,
pub schema_infer_max_record: Option<usize>,
pub compression_type: CompressionType,
}
impl Default for CsvFormat {
fn default() -> Self {
Self {
has_header: true,
delimiter: b',',
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
compression_type: CompressionType::UNCOMPRESSED,
}
}
}
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
let reader = store
.reader(&path)
.await
.context(error::ReadObjectSnafu { path: &path })?;
let decoded = self.compression_type.convert_async_read(reader);
let delimiter = self.delimiter;
let schema_infer_max_record = self.schema_infer_max_record;
let has_header = self.has_header;
common_runtime::spawn_blocking_read(move || {
let reader = SyncIoBridge::new(decoded);
let (schema, _records_read) =
infer_csv_schema(reader, delimiter, schema_infer_max_record, has_header)
.context(error::InferSchemaSnafu { path: &path })?;
Ok(Arc::new(schema))
})
.await
.context(error::JoinHandleSnafu)?
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::file_format::FileFormat;
use crate::test_util::{self, format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/csv").display().to_string()
}
#[tokio::test]
async fn infer_schema_basic() {
let csv = CsvFormat::default();
let store = test_store(&test_data_root());
let schema = csv
.infer_schema(&store, "simple.csv".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"c1: Utf8: NULL",
"c2: Int64: NULL",
"c3: Int64: NULL",
"c4: Int64: NULL",
"c5: Int64: NULL",
"c6: Int64: NULL",
"c7: Int64: NULL",
"c8: Int64: NULL",
"c9: Int64: NULL",
"c10: Int64: NULL",
"c11: Float64: NULL",
"c12: Float64: NULL",
"c13: Utf8: NULL"
],
formatted,
);
}
#[tokio::test]
async fn infer_schema_with_limit() {
let json = CsvFormat {
schema_infer_max_record: Some(3),
..CsvFormat::default()
};
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "schema_infer_limit.csv".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"a: Int64: NULL",
"b: Float64: NULL",
"c: Int64: NULL",
"d: Int64: NULL"
],
formatted
);
let json = CsvFormat::default();
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "schema_infer_limit.csv".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"a: Int64: NULL",
"b: Float64: NULL",
"c: Int64: NULL",
"d: Utf8: NULL"
],
formatted
);
}
}

View File

@@ -1,121 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::BufReader;
use std::sync::Arc;
use arrow::datatypes::SchemaRef;
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
use async_trait::async_trait;
use common_runtime;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::io::SyncIoBridge;
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::file_format::{self, FileFormat};
#[derive(Debug)]
pub struct JsonFormat {
pub schema_infer_max_record: Option<usize>,
pub compression_type: CompressionType,
}
impl Default for JsonFormat {
fn default() -> Self {
Self {
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
compression_type: CompressionType::UNCOMPRESSED,
}
}
}
#[async_trait]
impl FileFormat for JsonFormat {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
let reader = store
.reader(&path)
.await
.context(error::ReadObjectSnafu { path: &path })?;
let decoded = self.compression_type.convert_async_read(reader);
let schema_infer_max_record = self.schema_infer_max_record;
common_runtime::spawn_blocking_read(move || {
let mut reader = BufReader::new(SyncIoBridge::new(decoded));
let iter = ValueIter::new(&mut reader, schema_infer_max_record);
let schema = infer_json_schema_from_iterator(iter)
.context(error::InferSchemaSnafu { path: &path })?;
Ok(Arc::new(schema))
})
.await
.context(error::JoinHandleSnafu)?
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::file_format::FileFormat;
use crate::test_util::{self, format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/json").display().to_string()
}
#[tokio::test]
async fn infer_schema_basic() {
let json = JsonFormat::default();
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "simple.json".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec![
"a: Int64: NULL",
"b: Float64: NULL",
"c: Boolean: NULL",
"d: Utf8: NULL",
],
formatted
);
}
#[tokio::test]
async fn infer_schema_with_limit() {
let json = JsonFormat {
schema_infer_max_record: Some(3),
..JsonFormat::default()
};
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "schema_infer_limit.json".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(
vec!["a: Int64: NULL", "b: Float64: NULL", "c: Boolean: NULL"],
formatted
);
}
}

View File

@@ -1,78 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow_schema::SchemaRef;
use async_trait::async_trait;
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
use datafusion::parquet::arrow::parquet_to_arrow_schema;
use object_store::ObjectStore;
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::file_format::FileFormat;
#[derive(Debug, Default)]
pub struct ParquetFormat {}
#[async_trait]
impl FileFormat for ParquetFormat {
async fn infer_schema(&self, store: &ObjectStore, path: String) -> Result<SchemaRef> {
let mut reader = store
.reader(&path)
.await
.context(error::ReadObjectSnafu { path: &path })?;
let metadata = reader
.get_metadata()
.await
.context(error::ReadParquetSnafuSnafu)?;
let file_metadata = metadata.file_metadata();
let schema = parquet_to_arrow_schema(
file_metadata.schema_descr(),
file_metadata.key_value_metadata(),
)
.context(error::ParquetToSchemaSnafu)?;
Ok(Arc::new(schema))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::file_format::FileFormat;
use crate::test_util::{self, format_schema, test_store};
fn test_data_root() -> String {
test_util::get_data_dir("tests/parquet")
.display()
.to_string()
}
#[tokio::test]
async fn infer_schema_basic() {
let json = ParquetFormat::default();
let store = test_store(&test_data_root());
let schema = json
.infer_schema(&store, "basic.parquet".to_string())
.await
.unwrap();
let formatted: Vec<_> = format_schema(schema);
assert_eq!(vec!["num: Int64: NULL", "str: Utf8: NULL"], formatted);
}
}

View File

@@ -1,21 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod compression;
pub mod error;
pub mod file_format;
pub mod lister;
pub mod object_store;
pub mod test_util;
pub mod util;

View File

@@ -1,83 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::{future, TryStreamExt};
use object_store::{Entry, ObjectStore};
use regex::Regex;
use snafu::ResultExt;
use crate::error::{self, Result};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Source {
Filename(String),
Dir,
}
pub struct Lister {
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
}
impl Lister {
pub fn new(
object_store: ObjectStore,
source: Source,
path: String,
regex: Option<Regex>,
) -> Self {
Lister {
object_store,
source,
path,
regex,
}
}
pub async fn list(&self) -> Result<Vec<Entry>> {
match &self.source {
Source::Dir => {
let streamer = self
.object_store
.list(&self.path)
.await
.context(error::ListObjectsSnafu { path: &self.path })?;
streamer
.try_filter(|f| {
let res = self
.regex
.as_ref()
.map(|x| x.is_match(f.name()))
.unwrap_or(true);
future::ready(res)
})
.try_collect::<Vec<_>>()
.await
.context(error::ListObjectsSnafu { path: &self.path })
}
Source::Filename(filename) => {
// make sure this file exists
let file_full_path = format!("{}{}", self.path, filename);
let _ = self.object_store.stat(&file_full_path).await.context(
error::ListObjectsSnafu {
path: &file_full_path,
},
)?;
Ok(vec![Entry::new(&file_full_path)])
}
}
}
}

View File

@@ -1,60 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod fs;
pub mod s3;
use std::collections::HashMap;
use object_store::ObjectStore;
use snafu::{OptionExt, ResultExt};
use url::{ParseError, Url};
use self::fs::build_fs_backend;
use self::s3::build_s3_backend;
use crate::error::{self, Result};
pub const FS_SCHEMA: &str = "FS";
pub const S3_SCHEMA: &str = "S3";
/// parse url returns (schema,Option<host>,path)
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
let parsed_url = Url::parse(url);
match parsed_url {
Ok(url) => Ok((
url.scheme().to_string(),
url.host_str().map(|s| s.to_string()),
url.path().to_string(),
)),
Err(ParseError::RelativeUrlWithoutBase) => {
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
}
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
}
}
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
let (schema, host, _path) = parse_url(url)?;
match schema.to_uppercase().as_str() {
S3_SCHEMA => {
let host = host.context(error::EmptyHostPathSnafu {
url: url.to_string(),
})?;
Ok(build_s3_backend(&host, "/", connection)?)
}
FS_SCHEMA => Ok(build_fs_backend("/")?),
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
}
}

View File

@@ -1,28 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use object_store::services::Fs;
use object_store::ObjectStore;
use snafu::ResultExt;
use crate::error::{BuildBackendSnafu, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let mut builder = Fs::default();
builder.root(root);
let object_store = ObjectStore::new(builder)
.context(BuildBackendSnafu)?
.finish();
Ok(object_store)
}

View File

@@ -1,79 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use object_store::services::S3;
use object_store::ObjectStore;
use snafu::ResultExt;
use crate::error::{self, Result};
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
const SESSION_TOKEN: &str = "SESSION_TOKEN";
const REGION: &str = "REGION";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
pub fn build_s3_backend(
host: &str,
path: &str,
connection: HashMap<String, String>,
) -> Result<ObjectStore> {
let mut builder = S3::default();
builder.root(path);
builder.bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
error::InvalidConnectionSnafu {
msg: format!(
"failed to parse the option {}={}, {}",
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
),
}
.build()
})?;
if enable {
builder.enable_virtual_host_style();
}
}
Ok(ObjectStore::new(builder)
.context(error::BuildBackendSnafu)?
.finish())
}

View File

@@ -1,48 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
use arrow_schema::SchemaRef;
use object_store::services::Fs;
use object_store::ObjectStore;
pub fn get_data_dir(path: &str) -> PathBuf {
// https://doc.rust-lang.org/cargo/reference/environment-variables.html
let dir = env!("CARGO_MANIFEST_DIR");
PathBuf::from(dir).join(path)
}
pub fn format_schema(schema: SchemaRef) -> Vec<String> {
schema
.fields()
.iter()
.map(|f| {
format!(
"{}: {:?}: {}",
f.name(),
f.data_type(),
if f.is_nullable() { "NULL" } else { "NOT NULL" }
)
})
.collect()
}
pub fn test_store(root: &str) -> ObjectStore {
let mut builder = Fs::default();
builder.root(root);
ObjectStore::new(builder).unwrap().finish()
}

View File

@@ -1,125 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
if path.is_empty() {
("/".to_string(), None)
} else if path.ends_with('/') {
(path.to_string(), None)
} else if let Some(idx) = path.rfind('/') {
(
path[..idx + 1].to_string(),
Some(path[idx + 1..].to_string()),
)
} else {
("/".to_string(), Some(path.to_string()))
}
}
#[cfg(test)]
mod tests {
use url::Url;
use super::*;
#[test]
fn test_parse_uri() {
struct Test<'a> {
uri: &'a str,
expected_path: &'a str,
expected_schema: &'a str,
}
let tests = [
Test {
uri: "s3://bucket/to/path/",
expected_path: "/to/path/",
expected_schema: "s3",
},
Test {
uri: "fs:///to/path/",
expected_path: "/to/path/",
expected_schema: "fs",
},
Test {
uri: "fs:///to/path/file",
expected_path: "/to/path/file",
expected_schema: "fs",
},
];
for test in tests {
let parsed_uri = Url::parse(test.uri).unwrap();
assert_eq!(parsed_uri.path(), test.expected_path);
assert_eq!(parsed_uri.scheme(), test.expected_schema);
}
}
#[test]
fn test_parse_path_and_dir() {
let parsed = Url::from_file_path("/to/path/file").unwrap();
assert_eq!(parsed.path(), "/to/path/file");
let parsed = Url::from_directory_path("/to/path/").unwrap();
assert_eq!(parsed.path(), "/to/path/");
}
#[test]
fn test_find_dir_and_filename() {
struct Test<'a> {
path: &'a str,
expected_dir: &'a str,
expected_filename: Option<String>,
}
let tests = [
Test {
path: "to/path/",
expected_dir: "to/path/",
expected_filename: None,
},
Test {
path: "to/path/filename",
expected_dir: "to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/to/path/filename",
expected_dir: "/to/path/",
expected_filename: Some("filename".into()),
},
Test {
path: "/",
expected_dir: "/",
expected_filename: None,
},
Test {
path: "filename",
expected_dir: "/",
expected_filename: Some("filename".into()),
},
Test {
path: "",
expected_dir: "/",
expected_filename: None,
},
];
for test in tests {
let (path, filename) = find_dir_and_filename(test.path);
assert_eq!(test.expected_dir, path);
assert_eq!(test.expected_filename, filename)
}
}
}

View File

@@ -1,24 +0,0 @@
### Parquet
The `parquet/basic.parquet` was converted from `csv/basic.csv` via [bdt](https://github.com/andygrove/bdt).
Internal of `parquet/basic.parquet`:
Data:
```
+-----+-------+
| num | str |
+-----+-------+
| 5 | test |
| 2 | hello |
| 4 | foo |
+-----+-------+
```
Schema:
```
+-------------+-----------+-------------+
| column_name | data_type | is_nullable |
+-------------+-----------+-------------+
| num | Int64 | YES |
| str | Utf8 | YES |
+-------------+-----------+-------------+
```

View File

@@ -1,4 +0,0 @@
num,str
5,test
2,hello
4,foo
1 num str
2 5 test
3 2 hello
4 4 foo

View File

@@ -1,5 +0,0 @@
a,b,c,d
1,2,3,4
1,2,3,4
1,2.0,3,4
1,2,4,test
1 a b c d
2 1 2 3 4
3 1 2 3 4
4 1 2.0 3 4
5 1 2 4 test

View File

@@ -1,11 +0,0 @@
c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13
c,2,1,18109,2033001162,-6513304855495910254,25,43062,1491205016,5863949479783605708,0.110830784,0.9294097332465232,6WfVFBVGJSQb7FhA7E0lBwdvjfZnSW
d,5,-40,22614,706441268,-7542719935673075327,155,14337,3373581039,11720144131976083864,0.69632107,0.3114712539863804,C2GT5KVyOPZpgKVl110TyZO0NcJ434
b,1,29,-18218,994303988,5983957848665088916,204,9489,3275293996,14857091259186476033,0.53840446,0.17909035118828576,AyYVExXK6AR2qUTxNZ7qRHQOVGMLcz
a,1,-85,-15154,1171968280,1919439543497968449,77,52286,774637006,12101411955859039553,0.12285209,0.6864391962767343,0keZ5G8BffGwgF2RwQD59TFzMStxCB
b,5,-82,22080,1824882165,7373730676428214987,208,34331,3342719438,3330177516592499461,0.82634634,0.40975383525297016,Ig1QcuKsjHXkproePdERo2w0mYzIqd
b,4,-111,-1967,-4229382,1892872227362838079,67,9832,1243785310,8382489916947120498,0.06563997,0.152498292971736,Sfx0vxv1skzZWT1PqVdoRDdO6Sb6xH
e,3,104,-25136,1738331255,300633854973581194,139,20807,3577318119,13079037564113702254,0.40154034,0.7764360990307122,DuJNG8tufSqW0ZstHqWj3aGvFLMg4A
a,3,13,12613,1299719633,2020498574254265315,191,17835,3998790955,14881411008939145569,0.041445434,0.8813167497816289,Amn2K87Db5Es3dFQO9cw9cvpAM6h35
d,1,38,18384,-335410409,-1632237090406591229,26,57510,2712615025,1842662804748246269,0.6064476,0.6404495093354053,4HX6feIvmNXBN7XGqgO4YVBkhu8GDI
a,4,-38,20744,762932956,308913475857409919,7,45465,1787652631,878137512938218976,0.7459874,0.02182578039211991,ydkwycaISlYSlEq3TlkS2m15I2pcp8
1 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13
2 c 2 1 18109 2033001162 -6513304855495910254 25 43062 1491205016 5863949479783605708 0.110830784 0.9294097332465232 6WfVFBVGJSQb7FhA7E0lBwdvjfZnSW
3 d 5 -40 22614 706441268 -7542719935673075327 155 14337 3373581039 11720144131976083864 0.69632107 0.3114712539863804 C2GT5KVyOPZpgKVl110TyZO0NcJ434
4 b 1 29 -18218 994303988 5983957848665088916 204 9489 3275293996 14857091259186476033 0.53840446 0.17909035118828576 AyYVExXK6AR2qUTxNZ7qRHQOVGMLcz
5 a 1 -85 -15154 1171968280 1919439543497968449 77 52286 774637006 12101411955859039553 0.12285209 0.6864391962767343 0keZ5G8BffGwgF2RwQD59TFzMStxCB
6 b 5 -82 22080 1824882165 7373730676428214987 208 34331 3342719438 3330177516592499461 0.82634634 0.40975383525297016 Ig1QcuKsjHXkproePdERo2w0mYzIqd
7 b 4 -111 -1967 -4229382 1892872227362838079 67 9832 1243785310 8382489916947120498 0.06563997 0.152498292971736 Sfx0vxv1skzZWT1PqVdoRDdO6Sb6xH
8 e 3 104 -25136 1738331255 300633854973581194 139 20807 3577318119 13079037564113702254 0.40154034 0.7764360990307122 DuJNG8tufSqW0ZstHqWj3aGvFLMg4A
9 a 3 13 12613 1299719633 2020498574254265315 191 17835 3998790955 14881411008939145569 0.041445434 0.8813167497816289 Amn2K87Db5Es3dFQO9cw9cvpAM6h35
10 d 1 38 18384 -335410409 -1632237090406591229 26 57510 2712615025 1842662804748246269 0.6064476 0.6404495093354053 4HX6feIvmNXBN7XGqgO4YVBkhu8GDI
11 a 4 -38 20744 762932956 308913475857409919 7 45465 1787652631 878137512938218976 0.7459874 0.02182578039211991 ydkwycaISlYSlEq3TlkS2m15I2pcp8

View File

@@ -1,4 +0,0 @@
{"a":1}
{"a":-10, "b":-3.5}
{"a":2, "b":0.6, "c":false}
{"a":1, "b":2.0, "c":false, "d":"4"}

View File

@@ -1,12 +0,0 @@
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":-10, "b":-3.5, "c":true, "d":"4"}
{"a":2, "b":0.6, "c":false, "d":"text"}
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":7, "b":-3.5, "c":true, "d":"4"}
{"a":1, "b":0.6, "c":false, "d":"text"}
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":5, "b":-3.5, "c":true, "d":"4"}
{"a":1, "b":0.6, "c":false, "d":"text"}
{"a":1, "b":2.0, "c":false, "d":"4"}
{"a":1, "b":-3.5, "c":true, "d":"4"}
{"a":100000000000000, "b":0.6, "c":false, "d":"text"}

View File

@@ -23,12 +23,10 @@ pub trait ErrorExt: std::error::Error {
StatusCode::Unknown
}
// TODO(ruihang): remove this default implementation
/// Get the location of this error, None if the location is unavailable.
/// Add `_opt` suffix to avoid confusing with similar method in `std::error::Error`
fn location_opt(&self) -> Option<crate::snafu::Location> {
None
}
/// Get the reference to the backtrace of this error, None if the backtrace is unavailable.
// Add `_opt` suffix to avoid confusing with similar method in `std::error::Error`, once backtrace
// in std is stable, we can deprecate this method.
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace>;
/// Returns the error as [Any](std::any::Any) so that it can be
/// downcast to a specific implementation.
@@ -73,8 +71,8 @@ impl crate::ext::ErrorExt for BoxedError {
self.inner.status_code()
}
fn location_opt(&self) -> Option<crate::snafu::Location> {
self.inner.location_opt()
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
self.inner.backtrace_opt()
}
fn as_any(&self) -> &dyn std::any::Any {
@@ -86,7 +84,7 @@ impl crate::ext::ErrorExt for BoxedError {
// via `ErrorCompat::backtrace()`.
impl crate::snafu::ErrorCompat for BoxedError {
fn backtrace(&self) -> Option<&crate::snafu::Backtrace> {
None
self.inner.backtrace_opt()
}
}
@@ -120,7 +118,7 @@ impl crate::ext::ErrorExt for PlainError {
self.status_code
}
fn location_opt(&self) -> Option<crate::snafu::Location> {
fn backtrace_opt(&self) -> Option<&crate::snafu::Backtrace> {
None
}
@@ -128,3 +126,62 @@ impl crate::ext::ErrorExt for PlainError {
self as _
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use snafu::ErrorCompat;
use super::*;
use crate::format::DebugFormat;
use crate::mock::MockError;
#[test]
fn test_opaque_error_without_backtrace() {
let err = BoxedError::new(MockError::new(StatusCode::Internal));
assert!(err.backtrace_opt().is_none());
assert_eq!(StatusCode::Internal, err.status_code());
assert!(err.as_any().downcast_ref::<MockError>().is_some());
assert!(err.source().is_none());
assert!(ErrorCompat::backtrace(&err).is_none());
}
#[test]
fn test_opaque_error_with_backtrace() {
let err = BoxedError::new(MockError::with_backtrace(StatusCode::Internal));
assert!(err.backtrace_opt().is_some());
assert_eq!(StatusCode::Internal, err.status_code());
assert!(err.as_any().downcast_ref::<MockError>().is_some());
assert!(err.source().is_none());
assert!(ErrorCompat::backtrace(&err).is_some());
let msg = format!("{err:?}");
assert!(msg.contains("\nBacktrace:\n"));
let fmt_msg = format!("{:?}", DebugFormat::new(&err));
assert_eq!(msg, fmt_msg);
let msg = err.to_string();
msg.contains("Internal");
}
#[test]
fn test_opaque_error_with_source() {
let leaf_err = MockError::with_backtrace(StatusCode::Internal);
let internal_err = MockError::with_source(leaf_err);
let err = BoxedError::new(internal_err);
assert!(err.backtrace_opt().is_some());
assert_eq!(StatusCode::Internal, err.status_code());
assert!(err.as_any().downcast_ref::<MockError>().is_some());
assert!(err.source().is_some());
let msg = format!("{err:?}");
assert!(msg.contains("\nBacktrace:\n"));
assert!(msg.contains("Caused by"));
assert!(ErrorCompat::backtrace(&err).is_some());
}
}

View File

@@ -33,9 +33,9 @@ impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
// Source error use debug format for more verbose info.
write!(f, " Caused by: {source:?}")?;
}
if let Some(location) = self.0.location_opt() {
if let Some(backtrace) = self.0.backtrace_opt() {
// Add a newline to separate causes and backtrace.
write!(f, " at: {location}")?;
write!(f, "\nBacktrace:\n{backtrace}")?;
}
Ok(())
@@ -47,7 +47,7 @@ mod tests {
use std::any::Any;
use snafu::prelude::*;
use snafu::{GenerateImplicitData, Location};
use snafu::{Backtrace, GenerateImplicitData};
use super::*;
@@ -56,7 +56,7 @@ mod tests {
struct Leaf;
impl ErrorExt for Leaf {
fn location_opt(&self) -> Option<Location> {
fn backtrace_opt(&self) -> Option<&Backtrace> {
None
}
@@ -66,14 +66,14 @@ mod tests {
}
#[derive(Debug, Snafu)]
#[snafu(display("This is a leaf with location"))]
struct LeafWithLocation {
location: Location,
#[snafu(display("This is a leaf with backtrace"))]
struct LeafWithBacktrace {
backtrace: Backtrace,
}
impl ErrorExt for LeafWithLocation {
fn location_opt(&self) -> Option<Location> {
None
impl ErrorExt for LeafWithBacktrace {
fn backtrace_opt(&self) -> Option<&Backtrace> {
Some(&self.backtrace)
}
fn as_any(&self) -> &dyn Any {
@@ -86,12 +86,12 @@ mod tests {
struct Internal {
#[snafu(source)]
source: Leaf,
location: Location,
backtrace: Backtrace,
}
impl ErrorExt for Internal {
fn location_opt(&self) -> Option<Location> {
None
fn backtrace_opt(&self) -> Option<&Backtrace> {
Some(&self.backtrace)
}
fn as_any(&self) -> &dyn Any {
@@ -106,21 +106,19 @@ mod tests {
let msg = format!("{:?}", DebugFormat::new(&err));
assert_eq!("This is a leaf error.", msg);
let err = LeafWithLocation {
location: Location::generate(),
let err = LeafWithBacktrace {
backtrace: Backtrace::generate(),
};
// TODO(ruihang): display location here
let msg = format!("{:?}", DebugFormat::new(&err));
assert!(msg.starts_with("This is a leaf with location."));
assert!(msg.starts_with("This is a leaf with backtrace.\nBacktrace:\n"));
let err = Internal {
source: Leaf,
location: Location::generate(),
backtrace: Backtrace::generate(),
};
// TODO(ruihang): display location here
let msg = format!("{:?}", DebugFormat::new(&err));
assert!(msg.contains("Internal error. Caused by: Leaf"));
assert!(msg.contains("Internal error. Caused by: Leaf\nBacktrace:\n"));
}
}

View File

@@ -17,7 +17,7 @@
use std::any::Any;
use std::fmt;
use snafu::Location;
use snafu::GenerateImplicitData;
use crate::prelude::*;
@@ -25,19 +25,34 @@ use crate::prelude::*;
#[derive(Debug)]
pub struct MockError {
pub code: StatusCode,
backtrace: Option<Backtrace>,
source: Option<Box<MockError>>,
}
impl MockError {
/// Create a new [MockError] without backtrace.
pub fn new(code: StatusCode) -> MockError {
MockError { code, source: None }
MockError {
code,
backtrace: None,
source: None,
}
}
/// Create a new [MockError] with backtrace.
pub fn with_backtrace(code: StatusCode) -> MockError {
MockError {
code,
backtrace: Some(Backtrace::generate()),
source: None,
}
}
/// Create a new [MockError] with source.
pub fn with_source(source: MockError) -> MockError {
MockError {
code: source.code,
backtrace: None,
source: Some(Box::new(source)),
}
}
@@ -60,11 +75,39 @@ impl ErrorExt for MockError {
self.code
}
fn location_opt(&self) -> Option<Location> {
None
fn backtrace_opt(&self) -> Option<&Backtrace> {
self.backtrace
.as_ref()
.or_else(|| self.source.as_ref().and_then(|err| err.backtrace_opt()))
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl ErrorCompat for MockError {
fn backtrace(&self) -> Option<&Backtrace> {
self.backtrace_opt()
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::*;
#[test]
fn test_mock_error() {
let err = MockError::new(StatusCode::Unknown);
assert!(err.backtrace_opt().is_none());
let err = MockError::with_backtrace(StatusCode::Unknown);
assert!(err.backtrace_opt().is_some());
let root_err = MockError::with_source(err);
assert!(root_err.source().is_some());
assert!(root_err.backtrace_opt().is_some());
}
}

View File

@@ -36,8 +36,6 @@ macro_rules! ok {
}
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
let mut result = TokenStream::new();
// extract arg map
let arg_pairs = parse_macro_input!(args as AttributeArgs);
let arg_span = arg_pairs[0].span();
@@ -61,17 +59,12 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
let arg_types = ok!(extract_input_types(inputs));
// build the struct and its impl block
// only do this when `display_name` is specified
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
let struct_code = build_struct(
attrs,
vis,
ok!(get_ident(&arg_map, "name", arg_span)),
display_name,
);
result.extend(struct_code);
}
let struct_code = build_struct(
attrs,
vis,
ok!(get_ident(&arg_map, "name", arg_span)),
ok!(get_ident(&arg_map, "display_name", arg_span)),
);
let calc_fn_code = build_calc_fn(
ok!(get_ident(&arg_map, "name", arg_span)),
arg_types,
@@ -84,6 +77,8 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
}
.into();
let mut result = TokenStream::new();
result.extend(struct_code);
result.extend(calc_fn_code);
result.extend(input_fn_code);
result
@@ -212,7 +207,7 @@ fn build_calc_fn(
fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
assert_eq!(input.len(), #num_params);
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.to_data().into())?; )*
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.data().clone().into())?; )*
// TODO(ruihang): add ensure!()

View File

@@ -11,10 +11,11 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
mod to_unixtime;
use to_unixtime::ToUnixtimeFunction;
use std::sync::Arc;
mod from_unixtime;
use from_unixtime::FromUnixtimeFunction;
use crate::scalars::function_registry::FunctionRegistry;
@@ -22,6 +23,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ToUnixtimeFunction::default()));
registry.register(Arc::new(FromUnixtimeFunction::default()));
}
}

View File

@@ -0,0 +1,133 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! from_unixtime function.
/// TODO(dennis) It can be removed after we upgrade datafusion.
use std::fmt;
use std::sync::Arc;
use common_query::error::{
ArrowComputeSnafu, IntoVectorSnafu, Result, TypeCastSnafu, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::{DataType as ArrowDatatype, Int64Type};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{TimestampMillisecondVector, VectorRef};
use snafu::ResultExt;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct FromUnixtimeFunction;
const NAME: &str = "from_unixtime";
impl Function for FromUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_millisecond_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(
1,
vec![ConcreteDataType::int64_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
match columns[0].data_type() {
ConcreteDataType::Int64(_) => {
let array = columns[0].to_arrow_array();
// Our timestamp vector's time unit is millisecond
let array = compute::multiply_scalar_dyn::<Int64Type>(&array, 1000i64)
.context(ArrowComputeSnafu)?;
let arrow_datatype = &self.return_type(&[]).unwrap().as_arrow_type();
Ok(Arc::new(
TimestampMillisecondVector::try_from_arrow_array(
compute::cast(&array, arrow_datatype).context(TypeCastSnafu {
typ: ArrowDatatype::Int64,
})?,
)
.context(IntoVectorSnafu {
data_type: arrow_datatype.clone(),
})?,
))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for FromUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FROM_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::value::Value;
use datatypes::vectors::Int64Vector;
use super::*;
#[test]
fn test_from_unixtime() {
let f = FromUnixtimeFunction::default();
assert_eq!("from_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Uniform(1, valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::int64_datatype()]
));
let times = vec![Some(1494410783), None, Some(1494410983)];
let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(3, vector.len());
for (i, t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Timestamp(ts) => {
assert_eq!(ts.value(), t.unwrap() * 1000);
}
_ => unreachable!(),
}
}
}
}

View File

@@ -1,148 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::str::FromStr;
use std::sync::Arc;
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::{Signature, Volatility};
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
use snafu::ensure;
use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct ToUnixtimeFunction;
const NAME: &str = "to_unixtime";
fn convert_to_seconds(arg: &str) -> Option<i64> {
match Timestamp::from_str(arg) {
Ok(ts) => {
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
Some(ts.value().div_euclid(sec_mul))
}
Err(_err) => None,
}
}
impl Function for ToUnixtimeFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::timestamp_second_datatype())
}
fn signature(&self) -> Signature {
Signature::exact(
vec![ConcreteDataType::String(StringType)],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
error::InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
),
}
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
let array = columns[0].to_arrow_array();
let vector = StringVector::try_from_arrow_array(&array).unwrap();
Ok(Arc::new(Int64Vector::from(
(0..vector.len())
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
.collect::<Vec<_>>(),
)))
}
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for ToUnixtimeFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TO_UNIXTIME")
}
}
#[cfg(test)]
mod tests {
use common_query::prelude::TypeSignature;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::value::Value;
use datatypes::vectors::StringVector;
use super::{ToUnixtimeFunction, *};
use crate::scalars::Function;
#[test]
fn test_to_unixtime() {
let f = ToUnixtimeFunction::default();
assert_eq!("to_unixtime", f.name());
assert_eq!(
ConcreteDataType::timestamp_second_datatype(),
f.return_type(&[]).unwrap()
);
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::Exact(valid_types),
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::String(StringType)]
));
let times = vec![
Some("2023-03-01T06:35:02Z"),
None,
Some("2022-06-30T23:59:60Z"),
Some("invalid_time_stamp"),
];
let results = vec![Some(1677652502), None, Some(1656633600), None];
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(4, vector.len());
for (i, _t) in times.iter().enumerate() {
let v = vector.get(i);
if i == 1 || i == 3 {
assert_eq!(Value::Null, v);
continue;
}
match v {
Value::Int64(ts) => {
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
}
_ => unreachable!(),
}
}
}
}

View File

@@ -178,7 +178,6 @@ pub fn create_expr_to_request(
primary_key_indices,
create_if_not_exists: expr.create_if_not_exists,
table_options,
engine: expr.engine,
})
}

View File

@@ -17,7 +17,7 @@ use std::any::Any;
use api::DecodeError;
use common_error::ext::ErrorExt;
use common_error::prelude::{Snafu, StatusCode};
use snafu::Location;
use snafu::{Backtrace, ErrorCompat};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -32,7 +32,7 @@ pub enum Error {
DecodeInsert { source: DecodeError },
#[snafu(display("Illegal insert data"))]
IllegalInsertData { location: Location },
IllegalInsertData,
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
@@ -48,14 +48,17 @@ pub enum Error {
DuplicatedTimestampColumn {
exists: String,
duplicated: String,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Missing timestamp column, msg: {}", msg))]
MissingTimestampColumn { msg: String, location: Location },
MissingTimestampColumn { msg: String, backtrace: Backtrace },
#[snafu(display("Invalid column proto: {}", err_msg))]
InvalidColumnProto { err_msg: String, location: Location },
InvalidColumnProto {
err_msg: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to create vector, source: {}", source))]
CreateVector {
#[snafu(backtrace)]
@@ -63,7 +66,7 @@ pub enum Error {
},
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
MissingField { field: String, backtrace: Backtrace },
#[snafu(display("Invalid column default constraint, source: {}", source))]
ColumnDefaultConstraint {
@@ -110,6 +113,9 @@ impl ErrorExt for Error {
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self

View File

@@ -195,7 +195,6 @@ pub fn build_create_expr_from_insertion(
table_id: Option<TableId>,
table_name: &str,
columns: &[Column],
engine: &str,
) -> Result<CreateTableExpr> {
let mut new_columns: HashSet<String> = HashSet::default();
let mut column_defs = Vec::default();
@@ -257,7 +256,6 @@ pub fn build_create_expr_from_insertion(
table_options: Default::default(),
table_id: table_id.map(|id| api::v1::TableId { id }),
region_ids: vec![0], // TODO:(hl): region id should be allocated by frontend
engine: engine.to_string(),
};
Ok(expr)
@@ -457,7 +455,6 @@ mod tests {
use api::v1::column::{self, SemanticType, Values};
use api::v1::{Column, ColumnDataType};
use common_base::BitVec;
use common_catalog::consts::MITO_ENGINE;
use common_query::physical_plan::PhysicalPlanRef;
use common_query::prelude::Expr;
use common_time::timestamp::Timestamp;
@@ -496,22 +493,13 @@ mod tests {
let table_id = Some(10);
let table_name = "test_metric";
assert!(
build_create_expr_from_insertion("", "", table_id, table_name, &[], MITO_ENGINE)
.is_err()
);
assert!(build_create_expr_from_insertion("", "", table_id, table_name, &[]).is_err());
let insert_batch = mock_insert_batch();
let create_expr = build_create_expr_from_insertion(
"",
"",
table_id,
table_name,
&insert_batch.0,
MITO_ENGINE,
)
.unwrap();
let create_expr =
build_create_expr_from_insertion("", "", table_id, table_name, &insert_batch.0)
.unwrap();
assert_eq!(table_id, create_expr.table_id.map(|x| x.id));
assert_eq!(table_name, create_expr.table_name);

View File

@@ -26,7 +26,7 @@ tower = "0.4"
[dev-dependencies]
criterion = "0.4"
rand.workspace = true
rand = "0.8"
[[bench]]
name = "bench_main"

View File

@@ -16,7 +16,7 @@ use std::any::Any;
use std::io;
use common_error::prelude::{ErrorExt, StatusCode};
use snafu::{Location, Snafu};
use snafu::{Backtrace, ErrorCompat, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
@@ -29,11 +29,11 @@ pub enum Error {
#[snafu(display("Invalid config file path, {}", source))]
InvalidConfigFilePath {
source: io::Error,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, location: Location },
MissingField { field: String, backtrace: Backtrace },
#[snafu(display(
"Write type mismatch, column name: {}, expected: {}, actual: {}",
@@ -45,13 +45,13 @@ pub enum Error {
column_name: String,
expected: String,
actual: String,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to create gRPC channel, source: {}", source))]
CreateChannel {
source: tonic::transport::Error,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to create RecordBatch, source: {}", source))]
@@ -61,7 +61,7 @@ pub enum Error {
},
#[snafu(display("Failed to convert Arrow type: {}", from))]
Conversion { from: String, location: Location },
Conversion { from: String, backtrace: Backtrace },
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
@@ -72,11 +72,14 @@ pub enum Error {
#[snafu(display("Failed to decode FlightData, source: {}", source))]
DecodeFlightData {
source: api::DecodeError,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Invalid FlightData, reason: {}", reason))]
InvalidFlightData { reason: String, location: Location },
InvalidFlightData {
reason: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to convert Arrow Schema, source: {}", source))]
ConvertArrowSchema {
@@ -104,7 +107,65 @@ impl ErrorExt for Error {
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use snafu::{OptionExt, ResultExt};
use super::*;
type StdResult<E> = std::result::Result<(), E>;
fn throw_none_option() -> Option<String> {
None
}
#[test]
fn test_missing_field_error() {
let e = throw_none_option()
.context(MissingFieldSnafu { field: "test" })
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
#[test]
fn test_type_mismatch_error() {
let e = throw_none_option()
.context(TypeMismatchSnafu {
column_name: "",
expected: "",
actual: "",
})
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::InvalidArguments);
}
#[test]
fn test_create_channel_error() {
fn throw_tonic_error() -> StdResult<tonic::transport::Error> {
tonic::transport::Endpoint::new("http//http").map(|_| ())
}
let e = throw_tonic_error()
.context(CreateChannelSnafu)
.err()
.unwrap();
assert!(e.backtrace_opt().is_some());
assert_eq!(e.status_code(), StatusCode::Internal);
}
}

View File

@@ -16,7 +16,7 @@ use std::any::Any;
use std::path::PathBuf;
use common_error::prelude::{ErrorExt, StatusCode};
use snafu::{Location, Snafu};
use snafu::{Backtrace, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
@@ -30,7 +30,7 @@ pub enum Error {
ProfilingNotEnabled,
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
BuildTempPath { path: PathBuf, location: Location },
BuildTempPath { path: PathBuf, backtrace: Backtrace },
#[snafu(display("Failed to open temp file: {}", path))]
OpenTempFile {
@@ -56,6 +56,10 @@ impl ErrorExt for Error {
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
snafu::ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}

View File

@@ -6,7 +6,6 @@ license.workspace = true
[dependencies]
async-trait.workspace = true
async-stream.workspace = true
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
common-telemetry = { path = "../telemetry" }
@@ -15,7 +14,6 @@ object-store = { path = "../../object-store" }
serde.workspace = true
serde_json = "1.0"
smallvec = "1"
backon = "0.4.0"
snafu.workspace = true
tokio.workspace = true
uuid.workspace = true

View File

@@ -13,11 +13,9 @@
// limitations under the License.
use std::any::Any;
use std::string::FromUtf8Error;
use std::sync::Arc;
use common_error::prelude::*;
use snafu::Location;
use crate::procedure::ProcedureId;
@@ -35,25 +33,24 @@ pub enum Error {
},
#[snafu(display("Loader {} is already registered", name))]
LoaderConflict { name: String, location: Location },
LoaderConflict { name: String, backtrace: Backtrace },
#[snafu(display("Failed to serialize to json, source: {}", source))]
ToJson {
source: serde_json::Error,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Procedure {} already exists", procedure_id))]
DuplicateProcedure {
procedure_id: ProcedureId,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to put state, key: '{key}', source: {source}"))]
#[snafu(display("Failed to put {}, source: {}", key, source))]
PutState {
key: String,
#[snafu(backtrace)]
source: BoxedError,
source: object_store::Error,
},
#[snafu(display("Failed to delete {}, source: {}", key, source))]
@@ -62,18 +59,10 @@ pub enum Error {
source: object_store::Error,
},
#[snafu(display("Failed to delete keys: '{keys}', source: {source}"))]
DeleteStates {
keys: String,
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Failed to list state, path: '{path}', source: {source}"))]
#[snafu(display("Failed to list {}, source: {}", path, source))]
ListState {
path: String,
#[snafu(backtrace)]
source: BoxedError,
source: object_store::Error,
},
#[snafu(display("Failed to read {}, source: {}", key, source))]
@@ -85,7 +74,7 @@ pub enum Error {
#[snafu(display("Failed to deserialize from json, source: {}", source))]
FromJson {
source: serde_json::Error,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Procedure exec failed, source: {}", source))]
@@ -100,27 +89,14 @@ pub enum Error {
#[snafu(display("Failed to wait watcher, source: {}", source))]
WaitWatcher {
source: tokio::sync::watch::error::RecvError,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("Failed to execute procedure, source: {}", source))]
ProcedureExec {
source: Arc<Error>,
location: Location,
backtrace: Backtrace,
},
#[snafu(display(
"Procedure retry exceeded max times, procedure_id: {}, source:{}",
procedure_id,
source
))]
RetryTimesExceeded {
source: Arc<Error>,
procedure_id: ProcedureId,
},
#[snafu(display("Corrupted data, error: {source}"))]
CorruptedData { source: FromUtf8Error },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -128,26 +104,27 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::External { source }
| Error::PutState { source, .. }
| Error::DeleteStates { source, .. }
| Error::ListState { source, .. } => source.status_code(),
Error::External { source } => source.status_code(),
Error::ToJson { .. }
| Error::PutState { .. }
| Error::DeleteState { .. }
| Error::ListState { .. }
| Error::ReadState { .. }
| Error::FromJson { .. }
| Error::RetryTimesExceeded { .. }
| Error::RetryLater { .. }
| Error::WaitWatcher { .. } => StatusCode::Internal,
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
StatusCode::InvalidArguments
}
Error::ProcedurePanic { .. } | Error::CorruptedData { .. } => StatusCode::Unexpected,
Error::ProcedurePanic { .. } => StatusCode::Unexpected,
Error::ProcedureExec { source, .. } => source.status_code(),
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}

View File

@@ -17,7 +17,7 @@
pub mod error;
pub mod local;
mod procedure;
pub mod store;
mod store;
pub mod watcher;
pub use crate::error::{Error, Result};

View File

@@ -17,11 +17,10 @@ mod runner;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use async_trait::async_trait;
use backon::ExponentialBuilder;
use common_telemetry::logging;
use object_store::ObjectStore;
use snafu::ensure;
use tokio::sync::watch::{self, Receiver, Sender};
use tokio::sync::Notify;
@@ -30,7 +29,7 @@ use crate::error::{DuplicateProcedureSnafu, LoaderConflictSnafu, Result};
use crate::local::lock::LockMap;
use crate::local::runner::Runner;
use crate::procedure::BoxedProcedureLoader;
use crate::store::{ProcedureMessage, ProcedureStore, StateStoreRef};
use crate::store::{ObjectStateStore, ProcedureMessage, ProcedureStore, StateStoreRef};
use crate::{
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
ProcedureWithId, Watcher,
@@ -290,35 +289,22 @@ impl ManagerContext {
/// Config for [LocalManager].
#[derive(Debug)]
pub struct ManagerConfig {
pub max_retry_times: usize,
pub retry_delay: Duration,
}
impl Default for ManagerConfig {
fn default() -> Self {
Self {
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
}
}
/// Object store
pub object_store: ObjectStore,
}
/// A [ProcedureManager] that maintains procedure states locally.
pub struct LocalManager {
manager_ctx: Arc<ManagerContext>,
state_store: StateStoreRef,
max_retry_times: usize,
retry_delay: Duration,
}
impl LocalManager {
/// Create a new [LocalManager] with specific `config`.
pub fn new(config: ManagerConfig, state_store: StateStoreRef) -> LocalManager {
pub fn new(config: ManagerConfig) -> LocalManager {
LocalManager {
manager_ctx: Arc::new(ManagerContext::new()),
state_store,
max_retry_times: config.max_retry_times,
retry_delay: config.retry_delay,
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
}
}
@@ -335,11 +321,7 @@ impl LocalManager {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: ExponentialBuilder::default()
.with_min_delay(self.retry_delay)
.with_max_times(self.max_retry_times),
store: ProcedureStore::new(self.state_store.clone()),
rolling_back: false,
};
let watcher = meta.state_receiver.clone();
@@ -429,7 +411,7 @@ impl ProcedureManager for LocalManager {
mod test_util {
use common_test_util::temp_dir::TempDir;
use object_store::services::Fs as Builder;
use object_store::ObjectStore;
use object_store::ObjectStoreBuilder;
use super::*;
@@ -439,9 +421,8 @@ mod test_util {
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
builder.root(store_dir);
ObjectStore::new(builder).unwrap().finish()
let accessor = Builder::default().root(store_dir).build().unwrap();
ObjectStore::new(accessor).finish()
}
}
@@ -453,7 +434,6 @@ mod tests {
use super::*;
use crate::error::Error;
use crate::store::ObjectStateStore;
use crate::{Context, Procedure, Status};
#[test]
@@ -562,11 +542,9 @@ mod tests {
fn test_register_loader() {
let dir = create_temp_dir("register");
let config = ManagerConfig {
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
object_store: test_util::new_object_store(&dir),
};
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
let manager = LocalManager::new(config, state_store);
let manager = LocalManager::new(config);
manager
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
@@ -583,11 +561,9 @@ mod tests {
let dir = create_temp_dir("recover");
let object_store = test_util::new_object_store(&dir);
let config = ManagerConfig {
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
object_store: object_store.clone(),
};
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
let manager = LocalManager::new(config, state_store);
let manager = LocalManager::new(config);
manager
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
@@ -629,11 +605,9 @@ mod tests {
async fn test_submit_procedure() {
let dir = create_temp_dir("submit");
let config = ManagerConfig {
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
object_store: test_util::new_object_store(&dir),
};
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
let manager = LocalManager::new(config, state_store);
let manager = LocalManager::new(config);
let procedure_id = ProcedureId::random();
assert!(manager
@@ -677,11 +651,9 @@ mod tests {
async fn test_state_changed_on_err() {
let dir = create_temp_dir("on_err");
let config = ManagerConfig {
max_retry_times: 3,
retry_delay: Duration::from_millis(500),
object_store: test_util::new_object_store(&dir),
};
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
let manager = LocalManager::new(config, state_store);
let manager = LocalManager::new(config);
#[derive(Debug)]
struct MockProcedure {

View File

@@ -15,15 +15,15 @@
use std::sync::Arc;
use std::time::Duration;
use backon::{BackoffBuilder, ExponentialBuilder};
use common_telemetry::logging;
use tokio::time;
use crate::error::{ProcedurePanicSnafu, Result};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
use crate::store::ProcedureStore;
use crate::ProcedureState::Retrying;
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
const ERR_WAIT_DURATION: Duration = Duration::from_secs(30);
#[derive(Debug)]
enum ExecResult {
@@ -108,9 +108,7 @@ pub(crate) struct Runner {
pub(crate) procedure: BoxedProcedure,
pub(crate) manager_ctx: Arc<ManagerContext>,
pub(crate) step: u32,
pub(crate) exponential_builder: ExponentialBuilder,
pub(crate) store: ProcedureStore,
pub(crate) rolling_back: bool,
}
impl Runner {
@@ -166,56 +164,18 @@ impl Runner {
provider: self.manager_ctx.clone(),
};
self.rolling_back = false;
self.execute_once_with_retry(&ctx).await;
}
async fn execute_once_with_retry(&mut self, ctx: &Context) {
let mut retry = self.exponential_builder.build();
let mut retry_times = 0;
loop {
match self.execute_once(ctx).await {
ExecResult::Done | ExecResult::Failed => return,
match self.execute_once(&ctx).await {
ExecResult::Continue => (),
ExecResult::Done | ExecResult::Failed => return,
ExecResult::RetryLater => {
retry_times += 1;
if let Some(d) = retry.next() {
self.wait_on_err(d, retry_times).await;
} else {
assert!(self.meta.state().is_retrying());
if let Retrying { error } = self.meta.state() {
self.meta.set_state(ProcedureState::failed(Arc::new(
Error::RetryTimesExceeded {
source: error,
procedure_id: self.meta.id,
},
)))
}
return;
}
self.wait_on_err().await;
}
}
}
}
async fn rollback(&mut self, error: Arc<Error>) -> ExecResult {
if let Err(e) = self.rollback_procedure().await {
self.rolling_back = true;
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(error));
ExecResult::Failed
}
async fn execute_once(&mut self, ctx: &Context) -> ExecResult {
// if rolling_back, there is no need to execute again.
if self.rolling_back {
// We can definitely get the previous error here.
let state = self.meta.state();
let err = state.error().unwrap();
return self.rollback(err.clone()).await;
}
match self.procedure.execute(ctx).await {
Ok(status) => {
logging::debug!(
@@ -226,11 +186,8 @@ impl Runner {
status.need_persist(),
);
if status.need_persist() {
if let Err(err) = self.persist_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
return ExecResult::RetryLater;
}
if status.need_persist() && self.persist_procedure().await.is_err() {
return ExecResult::RetryLater;
}
match status {
@@ -239,8 +196,7 @@ impl Runner {
self.on_suspended(subprocedures).await;
}
Status::Done => {
if let Err(e) = self.commit_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
if self.commit_procedure().await.is_err() {
return ExecResult::RetryLater;
}
@@ -261,12 +217,17 @@ impl Runner {
);
if e.is_retry_later() {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
// Write rollback key so we can skip this procedure while recovering procedures.
self.rollback(Arc::new(e)).await
if self.rollback_procedure().await.is_err() {
return ExecResult::RetryLater;
}
ExecResult::Failed
}
}
}
@@ -300,9 +261,7 @@ impl Runner {
procedure,
manager_ctx: self.manager_ctx.clone(),
step,
exponential_builder: self.exponential_builder.clone(),
store: self.store.clone(),
rolling_back: false,
};
// Insert the procedure. We already check the procedure existence before inserting
@@ -326,16 +285,8 @@ impl Runner {
});
}
/// Extend the retry time to wait for the next retry.
async fn wait_on_err(&self, d: Duration, i: u64) {
logging::info!(
"Procedure {}-{} retry for the {} times after {} millis",
self.procedure.type_name(),
self.meta.id,
i,
d.as_millis(),
);
time::sleep(d).await;
async fn wait_on_err(&self) {
time::sleep(ERR_WAIT_DURATION).await;
}
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
@@ -465,15 +416,14 @@ mod tests {
procedure,
manager_ctx: Arc::new(ManagerContext::new()),
step: 0,
exponential_builder: ExponentialBuilder::default(),
store,
rolling_back: false,
}
}
async fn check_files(object_store: &ObjectStore, procedure_id: ProcedureId, files: &[&str]) {
let dir = format!("{procedure_id}/");
let lister = object_store.list(&dir).await.unwrap();
let object = object_store.object(&dir);
let lister = object.list().await.unwrap();
let mut files_in_dir: Vec<_> = lister
.map_ok(|de| de.name().to_string())
.try_collect()
@@ -794,7 +744,7 @@ mod tests {
let res = runner.execute_once(&ctx).await;
assert!(res.is_retry_later(), "{res:?}");
assert!(meta.state().is_retrying());
assert!(meta.state().is_running());
let res = runner.execute_once(&ctx).await;
assert!(res.is_done(), "{res:?}");
@@ -802,36 +752,6 @@ mod tests {
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
}
#[tokio::test]
async fn test_execute_exceed_max_retry_later() {
let exec_fn =
|_| async { Err(Error::retry_later(MockError::new(StatusCode::Unexpected))) }.boxed();
let exceed_max_retry_later = ProcedureAdapter {
data: "exceed_max_retry_later".to_string(),
lock_key: LockKey::single("catalog.schema.table"),
exec_fn,
};
let dir = create_temp_dir("exceed_max_retry_later");
let meta = exceed_max_retry_later.new_meta(ROOT_ID);
let object_store = test_util::new_object_store(&dir);
let procedure_store = ProcedureStore::from(object_store.clone());
let mut runner = new_runner(
meta.clone(),
Box::new(exceed_max_retry_later),
procedure_store,
);
runner.exponential_builder = ExponentialBuilder::default()
.with_min_delay(Duration::from_millis(1))
.with_max_times(3);
// Run the runner and execute the procedure.
runner.execute_procedure_in_loop().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("Procedure retry exceeded max times"));
}
#[tokio::test]
async fn test_child_error() {
let mut times = 0;
@@ -899,7 +819,7 @@ mod tests {
// Replace the manager ctx.
runner.manager_ctx = manager_ctx;
// Run the runner and execute the procedure.
// Run the runer and execute the procedure.
runner.run().await;
let err = meta.state().error().unwrap().to_string();
assert!(err.contains("subprocedure failed"), "{err}");

View File

@@ -206,8 +206,6 @@ pub enum ProcedureState {
Running,
/// The procedure is finished.
Done,
/// The procedure is failed and can be retried.
Retrying { error: Arc<Error> },
/// The procedure is failed and cannot proceed anymore.
Failed { error: Arc<Error> },
}
@@ -218,11 +216,6 @@ impl ProcedureState {
ProcedureState::Failed { error }
}
/// Returns a [ProcedureState] with retrying state.
pub fn retrying(error: Arc<Error>) -> ProcedureState {
ProcedureState::Retrying { error }
}
/// Returns true if the procedure state is running.
pub fn is_running(&self) -> bool {
matches!(self, ProcedureState::Running)
@@ -238,16 +231,10 @@ impl ProcedureState {
matches!(self, ProcedureState::Failed { .. })
}
/// Returns true if the procedure state is retrying.
pub fn is_retrying(&self) -> bool {
matches!(self, ProcedureState::Retrying { .. })
}
/// Returns the error.
pub fn error(&self) -> Option<&Arc<Error>> {
match self {
ProcedureState::Failed { error } => Some(error),
ProcedureState::Retrying { error } => Some(error),
_ => None,
}
}

View File

@@ -26,7 +26,7 @@ use crate::error::{Result, ToJsonSnafu};
pub(crate) use crate::store::state_store::{ObjectStateStore, StateStoreRef};
use crate::{BoxedProcedure, ProcedureId};
pub mod state_store;
mod state_store;
/// Serialized data of a procedure.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -248,15 +248,15 @@ mod tests {
use async_trait::async_trait;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use object_store::services::Fs as Builder;
use object_store::ObjectStoreBuilder;
use super::*;
use crate::{Context, LockKey, Procedure, Status};
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let accessor = Builder::default().root(store_dir).build().unwrap();
let object_store = ObjectStore::new(accessor).finish();
ProcedureStore::from(object_store)
}

View File

@@ -15,25 +15,22 @@
use std::pin::Pin;
use std::sync::Arc;
use async_stream::try_stream;
use async_trait::async_trait;
use common_error::ext::PlainError;
use common_error::prelude::{BoxedError, StatusCode};
use futures::{Stream, StreamExt};
use object_store::{EntryMode, Metakey, ObjectStore};
use futures::{Stream, TryStreamExt};
use object_store::{ObjectMode, ObjectStore};
use snafu::ResultExt;
use crate::error::{DeleteStateSnafu, ListStateSnafu, PutStateSnafu, Result};
use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
/// Key value from state store.
pub type KeyValue = (String, Vec<u8>);
type KeyValue = (String, Vec<u8>);
/// Stream that yields [KeyValue].
pub type KeyValueStream = Pin<Box<dyn Stream<Item = Result<KeyValue>> + Send>>;
type KeyValueStream = Pin<Box<dyn Stream<Item = Result<KeyValue>> + Send>>;
/// Storage layer for persisting procedure's state.
#[async_trait]
pub trait StateStore: Send + Sync {
pub(crate) trait StateStore: Send + Sync {
/// Puts `key` and `value` into the store.
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()>;
@@ -53,13 +50,13 @@ pub(crate) type StateStoreRef = Arc<dyn StateStore>;
/// [StateStore] based on [ObjectStore].
#[derive(Debug)]
pub struct ObjectStateStore {
pub(crate) struct ObjectStateStore {
store: ObjectStore,
}
impl ObjectStateStore {
/// Returns a new [ObjectStateStore] with specific `store`.
pub fn new(store: ObjectStore) -> ObjectStateStore {
pub(crate) fn new(store: ObjectStore) -> ObjectStateStore {
ObjectStateStore { store }
}
}
@@ -67,83 +64,49 @@ impl ObjectStateStore {
#[async_trait]
impl StateStore for ObjectStateStore {
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()> {
self.store
.write(key, value)
.await
.map_err(|e| {
BoxedError::new(PlainError::new(
e.to_string(),
StatusCode::StorageUnavailable,
))
})
.context(PutStateSnafu { key })
let object = self.store.object(key);
object.write(value).await.context(PutStateSnafu { key })
}
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
let path_string = path.to_string();
let mut lister = self
let lister = self
.store
.scan(path)
.object(path)
.scan()
.await
.map_err(|e| {
BoxedError::new(PlainError::new(
e.to_string(),
StatusCode::StorageUnavailable,
))
})
.with_context(|_| ListStateSnafu {
.map_err(|e| Error::ListState {
path: path_string.clone(),
source: e,
})?;
let store = self.store.clone();
let stream = try_stream!({
while let Some(res) = lister.next().await {
let entry = res
.map_err(|e| {
BoxedError::new(PlainError::new(
e.to_string(),
StatusCode::StorageUnavailable,
))
})
.context(ListStateSnafu { path: &path_string })?;
let stream = lister
.try_filter_map(|entry| async move {
let key = entry.path();
let metadata = store
.metadata(&entry, Metakey::Mode)
.await
.map_err(|e| {
BoxedError::new(PlainError::new(
e.to_string(),
StatusCode::StorageUnavailable,
))
})
.context(ListStateSnafu { path: key })?;
if let EntryMode::FILE = metadata.mode() {
let value = store
.read(key)
.await
.map_err(|e| {
BoxedError::new(PlainError::new(
e.to_string(),
StatusCode::StorageUnavailable,
))
})
.context(ListStateSnafu { path: key })?;
yield (key.to_string(), value);
}
}
});
let key_value = match entry.mode().await? {
ObjectMode::FILE => {
let value = entry.read().await?;
Some((key.to_string(), value))
}
ObjectMode::DIR | ObjectMode::Unknown => None,
};
Ok(key_value)
})
.map_err(move |e| Error::ListState {
path: path_string.clone(),
source: e,
});
Ok(Box::pin(stream))
}
async fn delete(&self, keys: &[String]) -> Result<()> {
for key in keys {
self.store
.delete(key)
.await
.context(DeleteStateSnafu { key })?;
let object = self.store.object(key);
object.delete().await.context(DeleteStateSnafu { key })?;
}
Ok(())
@@ -153,8 +116,8 @@ impl StateStore for ObjectStateStore {
#[cfg(test)]
mod tests {
use common_test_util::temp_dir::create_temp_dir;
use futures_util::TryStreamExt;
use object_store::services::Fs as Builder;
use object_store::ObjectStoreBuilder;
use super::*;
@@ -162,10 +125,8 @@ mod tests {
async fn test_object_state_store() {
let dir = create_temp_dir("state_store");
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let accessor = Builder::default().root(store_dir).build().unwrap();
let object_store = ObjectStore::new(accessor).finish();
let state_store = ObjectStateStore::new(object_store);
let data: Vec<_> = state_store

Some files were not shown because too many files have changed in this diff Show More