mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 20:32:56 +00:00
Compare commits
9 Commits
flow_p3_re
...
tests/cuck
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee67ce10c9 | ||
|
|
2ba721cc82 | ||
|
|
de468ee595 | ||
|
|
bb9bdf74ec | ||
|
|
be5574fdb3 | ||
|
|
f9afc5dbbf | ||
|
|
c7400a4182 | ||
|
|
bf07dd275a | ||
|
|
7e1eed4b18 |
@@ -22,15 +22,15 @@ inputs:
|
|||||||
build-dev-builder-ubuntu:
|
build-dev-builder-ubuntu:
|
||||||
description: Build dev-builder-ubuntu image
|
description: Build dev-builder-ubuntu image
|
||||||
required: false
|
required: false
|
||||||
default: "true"
|
default: 'true'
|
||||||
build-dev-builder-centos:
|
build-dev-builder-centos:
|
||||||
description: Build dev-builder-centos image
|
description: Build dev-builder-centos image
|
||||||
required: false
|
required: false
|
||||||
default: "true"
|
default: 'true'
|
||||||
build-dev-builder-android:
|
build-dev-builder-android:
|
||||||
description: Build dev-builder-android image
|
description: Build dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: "true"
|
default: 'true'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -47,7 +47,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=ubuntu \
|
BASE_IMAGE=ubuntu \
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
@@ -58,7 +58,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=centos \
|
BASE_IMAGE=centos \
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
@@ -72,5 +72,5 @@ runs:
|
|||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
IMAGE_TAG=${{ inputs.version }} && \
|
IMAGE_TAG=${{ inputs.version }} && \
|
||||||
|
|
||||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ inputs:
|
|||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
default: "false"
|
default: 'false'
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -68,7 +68,7 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime on centos base image
|
- name: Build greptime on centos base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
@@ -79,7 +79,7 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||||
with:
|
with:
|
||||||
base-image: android
|
base-image: android
|
||||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||||
|
|||||||
@@ -26,6 +26,8 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
|||||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
|||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
|
|||||||
57
.github/workflows/develop.yml
vendored
57
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
@@ -147,9 +147,8 @@ jobs:
|
|||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
cargo install cargo-fuzz
|
||||||
cargo +nightly install cargo-fuzz
|
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -185,13 +184,13 @@ jobs:
|
|||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/sqlness-*
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
sqlness-kafka-wal:
|
sqlness-kafka-wal:
|
||||||
@@ -215,13 +214,13 @@ jobs:
|
|||||||
working-directory: tests-integration/fixtures/kafka
|
working-directory: tests-integration/fixtures/kafka
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs-with-kafka-wal
|
name: sqlness-logs-with-kafka-wal
|
||||||
path: /tmp/sqlness-*
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
@@ -315,10 +314,10 @@ jobs:
|
|||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
@@ -331,20 +330,20 @@ jobs:
|
|||||||
fail_ci_if_error: false
|
fail_ci_if_error: false
|
||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
# compat:
|
compat:
|
||||||
# name: Compatibility Test
|
name: Compatibility Test
|
||||||
# needs: build
|
needs: build
|
||||||
# runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
# timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
# steps:
|
steps:
|
||||||
# - uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
# - name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
# uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
# with:
|
with:
|
||||||
# name: bins
|
name: bins
|
||||||
# path: .
|
path: .
|
||||||
# - name: Unzip binaries
|
- name: Unzip binaries
|
||||||
# run: |
|
run: |
|
||||||
# mkdir -p ./bins/current
|
mkdir -p ./bins/current
|
||||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||||
|
|||||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -12,7 +12,7 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness:
|
sqlness:
|
||||||
@@ -85,10 +85,10 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Notify slack if failed
|
- name: Notify slack if failed
|
||||||
if: failure()
|
if: failure()
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
|
|
||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
|
|
||||||
|
|||||||
2637
Cargo.lock
generated
2637
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
55
Cargo.toml
55
Cargo.toml
@@ -55,6 +55,7 @@ members = [
|
|||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/index",
|
"src/index",
|
||||||
|
"tests-chaos",
|
||||||
"tests-fuzz",
|
"tests-fuzz",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
@@ -70,24 +71,16 @@ license = "Apache-2.0"
|
|||||||
clippy.print_stdout = "warn"
|
clippy.print_stdout = "warn"
|
||||||
clippy.print_stderr = "warn"
|
clippy.print_stderr = "warn"
|
||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.readonly_write_lock = "allow"
|
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
|
||||||
rust.non_local_definitions = "allow"
|
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
|
||||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
|
||||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
|
||||||
#
|
|
||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
arrow = { version = "47.0" }
|
||||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
arrow-array = "47.0"
|
||||||
arrow-flight = "51.0"
|
arrow-flight = "47.0"
|
||||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
|
arrow-ipc = { version = "47.0", features = ["lz4"] }
|
||||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
axum = { version = "0.6", features = ["headers"] }
|
axum = { version = "0.6", features = ["headers"] }
|
||||||
@@ -98,24 +91,21 @@ bytemuck = "1.12"
|
|||||||
bytes = { version = "1.5", features = ["serde"] }
|
bytes = { version = "1.5", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
crossbeam-utils = "0.8"
|
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
etcd-client = "0.12"
|
||||||
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "04d78b6e025ceb518040fdd10858c2a9d9345820" }
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
@@ -126,12 +116,12 @@ moka = "0.12"
|
|||||||
notify = "6.1"
|
notify = "6.1"
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.5", features = [
|
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
] }
|
] }
|
||||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
parquet = "47.0"
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
@@ -155,18 +145,18 @@ serde_with = "3"
|
|||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.7"
|
snafu = "0.7"
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.38.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.36", features = ["full"] }
|
tokio = { version = "1.28", features = ["full"] }
|
||||||
tokio-stream = { version = "0.1" }
|
tokio-stream = { version = "0.1" }
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls"] }
|
tonic = { version = "0.10", features = ["tls"] }
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
@@ -223,6 +213,7 @@ sql = { path = "src/sql" }
|
|||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
|
tests-fuzz = { path = "tests-fuzz" }
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
|
|||||||
4
Makefile
4
Makefile
@@ -54,10 +54,8 @@ ifneq ($(strip $(RELEASE)),)
|
|||||||
CARGO_BUILD_OPTS += --release
|
CARGO_BUILD_OPTS += --release
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
|
||||||
else
|
else
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||||
endif
|
endif
|
||||||
|
|||||||
@@ -215,7 +215,37 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
|||||||
ColumnDataType::String,
|
ColumnDataType::String,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
_ => unimplemented!(),
|
DataType::Null
|
||||||
|
| DataType::Boolean
|
||||||
|
| DataType::Int8
|
||||||
|
| DataType::Int16
|
||||||
|
| DataType::Int32
|
||||||
|
| DataType::UInt8
|
||||||
|
| DataType::UInt16
|
||||||
|
| DataType::UInt32
|
||||||
|
| DataType::UInt64
|
||||||
|
| DataType::Float16
|
||||||
|
| DataType::Float32
|
||||||
|
| DataType::Date32
|
||||||
|
| DataType::Date64
|
||||||
|
| DataType::Time32(_)
|
||||||
|
| DataType::Time64(_)
|
||||||
|
| DataType::Duration(_)
|
||||||
|
| DataType::Interval(_)
|
||||||
|
| DataType::Binary
|
||||||
|
| DataType::FixedSizeBinary(_)
|
||||||
|
| DataType::LargeBinary
|
||||||
|
| DataType::LargeUtf8
|
||||||
|
| DataType::List(_)
|
||||||
|
| DataType::FixedSizeList(_, _)
|
||||||
|
| DataType::LargeList(_)
|
||||||
|
| DataType::Struct(_)
|
||||||
|
| DataType::Union(_, _)
|
||||||
|
| DataType::Dictionary(_, _)
|
||||||
|
| DataType::Decimal128(_, _)
|
||||||
|
| DataType::Decimal256(_, _)
|
||||||
|
| DataType::RunEndEncoded(_, _)
|
||||||
|
| DataType::Map(_, _) => todo!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -414,7 +444,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||||
HashMap::from([
|
HashMap::from([
|
||||||
(
|
(
|
||||||
"count_all".to_string(),
|
"count_all".to_string(),
|
||||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
|||||||
@@ -1,136 +0,0 @@
|
|||||||
# How to write fuzz tests
|
|
||||||
|
|
||||||
This document introduces how to write fuzz tests in GreptimeDB.
|
|
||||||
|
|
||||||
## What is a fuzz test
|
|
||||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
|
||||||
|
|
||||||
## Why we need them
|
|
||||||
- Find bugs by leveraging random generation
|
|
||||||
- Integrate with other tests (e.g., e2e)
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
|
||||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
|
||||||
|
|
||||||
### Fundamental components
|
|
||||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
|
||||||
|
|
||||||
### Test targets
|
|
||||||
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
|
|
||||||
|
|
||||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
|
||||||
```
|
|
||||||
Rng
|
|
||||||
|
|
|
||||||
|
|
|
||||||
v
|
|
||||||
ExprGenerator
|
|
||||||
|
|
|
||||||
|
|
|
||||||
v
|
|
||||||
Intermediate representation (IR)
|
|
||||||
|
|
|
||||||
|
|
|
||||||
+----------------------+----------------------+
|
|
||||||
| | |
|
|
||||||
v v v
|
|
||||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
|
||||||
| | |
|
|
||||||
| | |
|
|
||||||
v v v
|
|
||||||
SQL(MySQL Dialect) ..... .....
|
|
||||||
|
|
|
||||||
|
|
|
||||||
v
|
|
||||||
Fuzz Test
|
|
||||||
|
|
||||||
```
|
|
||||||
(Figure1: Overview of fuzz tests)
|
|
||||||
|
|
||||||
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
|
|
||||||
|
|
||||||
## How to add a fuzz test target
|
|
||||||
|
|
||||||
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
|
|
||||||
|
|
||||||
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[bin]]
|
|
||||||
name = "<fuzz-target>"
|
|
||||||
path = "targets/<fuzz-target>.rs"
|
|
||||||
test = false
|
|
||||||
bench = false
|
|
||||||
doc = false
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
#![no_main]
|
|
||||||
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
struct FuzzInput {
|
|
||||||
seed: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Arbitrary<'_> for FuzzInput {
|
|
||||||
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
|
|
||||||
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
|
|
||||||
Ok(FuzzInput { seed })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
use libfuzzer_sys::fuzz_target;
|
|
||||||
use rand::{Rng, SeedableRng};
|
|
||||||
use rand_chacha::ChaChaRng;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use sqlx::{MySql, Pool};
|
|
||||||
use tests_fuzz::fake::{
|
|
||||||
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
|
|
||||||
MappedGenerator, WordGenerator,
|
|
||||||
};
|
|
||||||
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
|
|
||||||
use tests_fuzz::generator::Generator;
|
|
||||||
use tests_fuzz::ir::CreateTableExpr;
|
|
||||||
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
|
|
||||||
use tests_fuzz::translator::DslTranslator;
|
|
||||||
use tests_fuzz::utils::{init_greptime_connections, Connections};
|
|
||||||
|
|
||||||
fuzz_target!(|input: FuzzInput| {
|
|
||||||
common_telemetry::init_default_ut_logging();
|
|
||||||
common_runtime::block_on_write(async {
|
|
||||||
let Connections { mysql } = init_greptime_connections().await;
|
|
||||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
|
||||||
let columns = rng.gen_range(2..30);
|
|
||||||
let create_table_generator = CreateTableExprGeneratorBuilder::default()
|
|
||||||
.name_generator(Box::new(MappedGenerator::new(
|
|
||||||
WordGenerator,
|
|
||||||
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
|
|
||||||
)))
|
|
||||||
.columns(columns)
|
|
||||||
.engine("mito")
|
|
||||||
.if_not_exists(if_not_exists)
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
let ir = create_table_generator.generate(&mut rng);
|
|
||||||
let translator = CreateTableExprTranslator;
|
|
||||||
let sql = translator.translate(&expr).unwrap();
|
|
||||||
mysql.execute(&sql).await
|
|
||||||
})
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Run your fuzz test target
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
|
||||||
@@ -73,7 +73,7 @@ CREATE TABLE cpu (
|
|||||||
usage_system DOUBLE,
|
usage_system DOUBLE,
|
||||||
datacenter STRING,
|
datacenter STRING,
|
||||||
TIME INDEX (ts),
|
TIME INDEX (ts),
|
||||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||||
```
|
```
|
||||||
|
|
||||||
Then the table's `TableMeta` may look like this:
|
Then the table's `TableMeta` may look like this:
|
||||||
@@ -249,7 +249,7 @@ CREATE TABLE cpu (
|
|||||||
usage_system DOUBLE,
|
usage_system DOUBLE,
|
||||||
datacenter STRING,
|
datacenter STRING,
|
||||||
TIME INDEX (ts),
|
TIME INDEX (ts),
|
||||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||||
|
|
||||||
select ts, usage_system from cpu;
|
select ts, usage_system from cpu;
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
# GreptimeDB Style Guide
|
|
||||||
|
|
||||||
This style guide is intended to help contributors to GreptimeDB write code that is consistent with the rest of the codebase. It is a living document and will be updated as the codebase evolves.
|
|
||||||
|
|
||||||
It's mainly an complement to the [Rust Style Guide](https://pingcap.github.io/style-guide/rust/).
|
|
||||||
|
|
||||||
## Table of Contents
|
|
||||||
|
|
||||||
- Formatting
|
|
||||||
- Modules
|
|
||||||
- Comments
|
|
||||||
|
|
||||||
## Formatting
|
|
||||||
|
|
||||||
- Place all `mod` declaration before any `use`.
|
|
||||||
- Use `unimplemented!()` instead of `todo!()` for things that aren't likely to be implemented.
|
|
||||||
- Add an empty line before and after declaration blocks.
|
|
||||||
- Place comment before attributes (`#[]`) and derive (`#[derive]`).
|
|
||||||
|
|
||||||
## Modules
|
|
||||||
|
|
||||||
- Use the file with same name instead of `mod.rs` to define a module. E.g.:
|
|
||||||
|
|
||||||
```
|
|
||||||
.
|
|
||||||
├── cache
|
|
||||||
│ ├── cache_size.rs
|
|
||||||
│ └── write_cache.rs
|
|
||||||
└── cache.rs
|
|
||||||
```
|
|
||||||
|
|
||||||
## Comments
|
|
||||||
|
|
||||||
- Add comments for public functions and structs.
|
|
||||||
- Prefer document comment (`///`) over normal comment (`//`) for structs, fields, functions etc.
|
|
||||||
- Add link (`[]`) to struct, method, or any other reference. And make sure that link works.
|
|
||||||
|
|
||||||
## Error handling
|
|
||||||
|
|
||||||
- Define a custom error type for the module if needed.
|
|
||||||
- Prefer `with_context()` over `context()` when allocation is needed to construct an error.
|
|
||||||
- Use `error!()` or `warn!()` macros in the `common_telemetry` crate to log errors. E.g.:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
error!(e; "Failed to do something");
|
|
||||||
```
|
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-04-18"
|
channel = "nightly-2023-12-19"
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ pub mod prom_store {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod region;
|
|
||||||
pub mod v1;
|
pub mod v1;
|
||||||
|
|
||||||
pub use greptime_proto;
|
pub use greptime_proto;
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use common_base::AffectedRows;
|
|
||||||
use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
|
||||||
|
|
||||||
/// This result struct is derived from [RegionResponseV1]
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct RegionResponse {
|
|
||||||
pub affected_rows: AffectedRows,
|
|
||||||
pub extension: HashMap<String, Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RegionResponse {
|
|
||||||
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
|
||||||
Self {
|
|
||||||
affected_rows: region_response.affected_rows as _,
|
|
||||||
extension: region_response.extension,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates one response without extension
|
|
||||||
pub fn new(affected_rows: AffectedRows) -> Self {
|
|
||||||
Self {
|
|
||||||
affected_rows,
|
|
||||||
extension: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
|
|||||||
|
|
||||||
impl MockUserProvider {
|
impl MockUserProvider {
|
||||||
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
|
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
|
||||||
info.catalog.clone_into(&mut self.catalog);
|
self.catalog = info.catalog.to_owned();
|
||||||
info.schema.clone_into(&mut self.schema);
|
self.schema = info.schema.to_owned();
|
||||||
info.username.clone_into(&mut self.username);
|
self.username = info.username.to_owned();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -109,7 +109,11 @@ impl Predicate {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
Predicate::Not(p) => {
|
Predicate::Not(p) => {
|
||||||
return Some(!p.eval(row)?);
|
let Some(b) = p.eval(row) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
return Some(!b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,7 +125,13 @@ impl Predicate {
|
|||||||
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
||||||
match expr {
|
match expr {
|
||||||
// NOT expr
|
// NOT expr
|
||||||
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
DfExpr::Not(expr) => {
|
||||||
|
let Some(p) = Self::from_expr(*expr) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(Predicate::Not(Box::new(p)))
|
||||||
|
}
|
||||||
// expr LIKE pattern
|
// expr LIKE pattern
|
||||||
DfExpr::Like(Like {
|
DfExpr::Like(Like {
|
||||||
negated,
|
negated,
|
||||||
@@ -168,15 +178,25 @@ impl Predicate {
|
|||||||
}
|
}
|
||||||
// left AND right
|
// left AND right
|
||||||
(left, Operator::And, right) => {
|
(left, Operator::And, right) => {
|
||||||
let left = Self::from_expr(left)?;
|
let Some(left) = Self::from_expr(left) else {
|
||||||
let right = Self::from_expr(right)?;
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(right) = Self::from_expr(right) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
Some(Predicate::And(Box::new(left), Box::new(right)))
|
Some(Predicate::And(Box::new(left), Box::new(right)))
|
||||||
}
|
}
|
||||||
// left OR right
|
// left OR right
|
||||||
(left, Operator::Or, right) => {
|
(left, Operator::Or, right) => {
|
||||||
let left = Self::from_expr(left)?;
|
let Some(left) = Self::from_expr(left) else {
|
||||||
let right = Self::from_expr(right)?;
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(right) = Self::from_expr(right) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
Some(Predicate::Or(Box::new(left), Box::new(right)))
|
Some(Predicate::Or(Box::new(left), Box::new(right)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use std::fmt::Debug;
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use std::usize;
|
||||||
|
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||||
@@ -505,32 +506,32 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn range(&self, _req: RangeRequest) -> Result<RangeResponse, Self::Error> {
|
async fn range(&self, _req: RangeRequest) -> Result<RangeResponse, Self::Error> {
|
||||||
unimplemented!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
|
async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
|
||||||
unimplemented!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn compare_and_put(
|
async fn compare_and_put(
|
||||||
&self,
|
&self,
|
||||||
_req: CompareAndPutRequest,
|
_req: CompareAndPutRequest,
|
||||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||||
unimplemented!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_range(
|
async fn delete_range(
|
||||||
&self,
|
&self,
|
||||||
_req: DeleteRangeRequest,
|
_req: DeleteRangeRequest,
|
||||||
) -> Result<DeleteRangeResponse, Self::Error> {
|
) -> Result<DeleteRangeResponse, Self::Error> {
|
||||||
unimplemented!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn batch_delete(
|
async fn batch_delete(
|
||||||
&self,
|
&self,
|
||||||
_req: BatchDeleteRequest,
|
_req: BatchDeleteRequest,
|
||||||
) -> Result<BatchDeleteResponse, Self::Error> {
|
) -> Result<BatchDeleteResponse, Self::Error> {
|
||||||
unimplemented!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,10 @@ impl DfTableSourceProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
|
pub fn resolve_table_ref<'a>(
|
||||||
|
&'a self,
|
||||||
|
table_ref: TableReference<'a>,
|
||||||
|
) -> Result<ResolvedTableReference<'a>> {
|
||||||
if self.disallow_cross_catalog_query {
|
if self.disallow_cross_catalog_query {
|
||||||
match &table_ref {
|
match &table_ref {
|
||||||
TableReference::Bare { .. } => (),
|
TableReference::Bare { .. } => (),
|
||||||
@@ -73,7 +76,7 @@ impl DfTableSourceProvider {
|
|||||||
|
|
||||||
pub async fn resolve_table(
|
pub async fn resolve_table(
|
||||||
&mut self,
|
&mut self,
|
||||||
table_ref: TableReference,
|
table_ref: TableReference<'_>,
|
||||||
) -> Result<Arc<dyn TableSource>> {
|
) -> Result<Arc<dyn TableSource>> {
|
||||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||||
|
|
||||||
@@ -103,6 +106,8 @@ impl DfTableSourceProvider {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::borrow::Cow;
|
||||||
|
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -115,37 +120,68 @@ mod tests {
|
|||||||
let table_provider =
|
let table_provider =
|
||||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
||||||
|
|
||||||
let table_ref = TableReference::bare("table_name");
|
let table_ref = TableReference::Bare {
|
||||||
|
table: Cow::Borrowed("table_name"),
|
||||||
|
};
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let table_ref = TableReference::partial("public", "table_name");
|
let table_ref = TableReference::Partial {
|
||||||
|
schema: Cow::Borrowed("public"),
|
||||||
|
table: Cow::Borrowed("table_name"),
|
||||||
|
};
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let table_ref = TableReference::partial("wrong_schema", "table_name");
|
let table_ref = TableReference::Partial {
|
||||||
|
schema: Cow::Borrowed("wrong_schema"),
|
||||||
|
table: Cow::Borrowed("table_name"),
|
||||||
|
};
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let table_ref = TableReference::full("greptime", "public", "table_name");
|
let table_ref = TableReference::Full {
|
||||||
|
catalog: Cow::Borrowed("greptime"),
|
||||||
|
schema: Cow::Borrowed("public"),
|
||||||
|
table: Cow::Borrowed("table_name"),
|
||||||
|
};
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let table_ref = TableReference::full("wrong_catalog", "public", "table_name");
|
let table_ref = TableReference::Full {
|
||||||
|
catalog: Cow::Borrowed("wrong_catalog"),
|
||||||
|
schema: Cow::Borrowed("public"),
|
||||||
|
table: Cow::Borrowed("table_name"),
|
||||||
|
};
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
|
|
||||||
let table_ref = TableReference::partial("information_schema", "columns");
|
let table_ref = TableReference::Partial {
|
||||||
|
schema: Cow::Borrowed("information_schema"),
|
||||||
|
table: Cow::Borrowed("columns"),
|
||||||
|
};
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let table_ref = TableReference::full("greptime", "information_schema", "columns");
|
let table_ref = TableReference::Full {
|
||||||
|
catalog: Cow::Borrowed("greptime"),
|
||||||
|
schema: Cow::Borrowed("information_schema"),
|
||||||
|
table: Cow::Borrowed("columns"),
|
||||||
|
};
|
||||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||||
|
|
||||||
let table_ref = TableReference::full("dummy", "information_schema", "columns");
|
let table_ref = TableReference::Full {
|
||||||
|
catalog: Cow::Borrowed("dummy"),
|
||||||
|
schema: Cow::Borrowed("information_schema"),
|
||||||
|
table: Cow::Borrowed("columns"),
|
||||||
|
};
|
||||||
assert!(table_provider.resolve_table_ref(table_ref).is_err());
|
assert!(table_provider.resolve_table_ref(table_ref).is_err());
|
||||||
|
|
||||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
let table_ref = TableReference::Full {
|
||||||
|
catalog: Cow::Borrowed("greptime"),
|
||||||
|
schema: Cow::Borrowed("greptime_private"),
|
||||||
|
table: Cow::Borrowed("columns"),
|
||||||
|
};
|
||||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::{QueryRequest, RegionRequest};
|
||||||
use api::v1::ResponseHeader;
|
use api::v1::ResponseHeader;
|
||||||
use arc_swap::ArcSwapOption;
|
use arc_swap::ArcSwapOption;
|
||||||
@@ -24,7 +23,7 @@ use async_trait::async_trait;
|
|||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||||
use common_meta::datanode_manager::Datanode;
|
use common_meta::datanode_manager::{Datanode, HandleResponse};
|
||||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||||
@@ -47,7 +46,7 @@ pub struct RegionRequester {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Datanode for RegionRequester {
|
impl Datanode for RegionRequester {
|
||||||
async fn handle(&self, request: RegionRequest) -> MetaResult<RegionResponse> {
|
async fn handle(&self, request: RegionRequest) -> MetaResult<HandleResponse> {
|
||||||
self.handle_inner(request).await.map_err(|err| {
|
self.handle_inner(request).await.map_err(|err| {
|
||||||
if err.should_retry() {
|
if err.should_retry() {
|
||||||
meta_error::Error::RetryLater {
|
meta_error::Error::RetryLater {
|
||||||
@@ -166,7 +165,7 @@ impl RegionRequester {
|
|||||||
Ok(Box::pin(record_batch_stream))
|
Ok(Box::pin(record_batch_stream))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_inner(&self, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle_inner(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
let request_type = request
|
let request_type = request
|
||||||
.body
|
.body
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@@ -195,10 +194,10 @@ impl RegionRequester {
|
|||||||
|
|
||||||
check_response_header(&response.header)?;
|
check_response_header(&response.header)?;
|
||||||
|
|
||||||
Ok(RegionResponse::from_region_response(response))
|
Ok(HandleResponse::from_region_response(response))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
|
pub async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
self.handle_inner(request).await
|
self.handle_inner(request).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,7 +36,6 @@ common-telemetry = { workspace = true, features = [
|
|||||||
"deadlock_detection",
|
"deadlock_detection",
|
||||||
] }
|
] }
|
||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
common-version.workspace = true
|
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
config = "0.13"
|
config = "0.13"
|
||||||
datanode.workspace = true
|
datanode.workspace = true
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ use cmd::options::{CliOptions, Options};
|
|||||||
use cmd::{
|
use cmd::{
|
||||||
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
||||||
};
|
};
|
||||||
use common_version::{short_version, version};
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
@@ -106,8 +105,7 @@ async fn main() -> Result<()> {
|
|||||||
|
|
||||||
common_telemetry::set_panic_hook();
|
common_telemetry::set_panic_hook();
|
||||||
|
|
||||||
let version = version!();
|
let cli = greptimedb_cli();
|
||||||
let cli = greptimedb_cli().version(version);
|
|
||||||
|
|
||||||
let cli = SubCommand::augment_subcommands(cli);
|
let cli = SubCommand::augment_subcommands(cli);
|
||||||
|
|
||||||
@@ -131,7 +129,7 @@ async fn main() -> Result<()> {
|
|||||||
opts.node_id(),
|
opts.node_id(),
|
||||||
);
|
);
|
||||||
|
|
||||||
log_versions(version, short_version!());
|
log_versions();
|
||||||
|
|
||||||
let app = subcmd.build(opts).await?;
|
let app = subcmd.build(opts).await?;
|
||||||
|
|
||||||
|
|||||||
@@ -84,10 +84,10 @@ impl Command {
|
|||||||
let mut logging_opts = LoggingOptions::default();
|
let mut logging_opts = LoggingOptions::default();
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
logging_opts.dir.clone_from(dir);
|
logging_opts.dir = dir.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
logging_opts.level.clone_from(&cli_options.log_level);
|
logging_opts.level = cli_options.log_level.clone();
|
||||||
|
|
||||||
Ok(Options::Cli(Box::new(logging_opts)))
|
Ok(Options::Cli(Box::new(logging_opts)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -492,7 +492,9 @@ mod tests {
|
|||||||
)
|
)
|
||||||
|
|
||||||
ENGINE=mito
|
ENGINE=mito
|
||||||
;
|
WITH(
|
||||||
|
regions = 1
|
||||||
|
);
|
||||||
"#;
|
"#;
|
||||||
assert_eq!(res.trim(), expect.trim());
|
assert_eq!(res.trim(), expect.trim());
|
||||||
|
|
||||||
|
|||||||
@@ -192,10 +192,10 @@ impl MigrateTableMetadata {
|
|||||||
let key = v1SchemaKey::parse(key_str)
|
let key = v1SchemaKey::parse(key_str)
|
||||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
||||||
|
|
||||||
Ok(key)
|
Ok((key, ()))
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||||
let _ = self.migrate_schema_key(&key).await;
|
let _ = self.migrate_schema_key(&key).await;
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
keys.push(key.to_string().as_bytes().to_vec());
|
||||||
}
|
}
|
||||||
@@ -244,10 +244,10 @@ impl MigrateTableMetadata {
|
|||||||
let key = v1CatalogKey::parse(key_str)
|
let key = v1CatalogKey::parse(key_str)
|
||||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
||||||
|
|
||||||
Ok(key)
|
Ok((key, ()))
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||||
let _ = self.migrate_catalog_key(&key).await;
|
let _ = self.migrate_catalog_key(&key).await;
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
keys.push(key.to_string().as_bytes().to_vec());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,19 +139,19 @@ impl StartCommand {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir = dir.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if cli_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level = cli_options.log_level.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
opts.rpc_addr.clone_from(addr);
|
opts.rpc_addr = addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.rpc_hostname.is_some() {
|
if self.rpc_hostname.is_some() {
|
||||||
opts.rpc_hostname.clone_from(&self.rpc_hostname);
|
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(node_id) = self.node_id {
|
if let Some(node_id) = self.node_id {
|
||||||
@@ -161,8 +161,7 @@ impl StartCommand {
|
|||||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||||
opts.meta_client
|
opts.meta_client
|
||||||
.get_or_insert_with(MetaClientOptions::default)
|
.get_or_insert_with(MetaClientOptions::default)
|
||||||
.metasrv_addrs
|
.metasrv_addrs = metasrv_addrs.clone();
|
||||||
.clone_from(metasrv_addrs);
|
|
||||||
opts.mode = Mode::Distributed;
|
opts.mode = Mode::Distributed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +173,7 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(data_home) = &self.data_home {
|
if let Some(data_home) = &self.data_home {
|
||||||
opts.storage.data_home.clone_from(data_home);
|
opts.storage.data_home = data_home.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
// `wal_dir` only affects raft-engine config.
|
// `wal_dir` only affects raft-engine config.
|
||||||
@@ -192,7 +191,7 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(http_addr) = &self.http_addr {
|
if let Some(http_addr) = &self.http_addr {
|
||||||
opts.http.addr.clone_from(http_addr);
|
opts.http.addr = http_addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(http_timeout) = self.http_timeout {
|
if let Some(http_timeout) = self.http_timeout {
|
||||||
|
|||||||
@@ -157,11 +157,11 @@ impl StartCommand {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir = dir.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if cli_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level = cli_options.log_level.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
let tls_opts = TlsOption::new(
|
let tls_opts = TlsOption::new(
|
||||||
@@ -171,7 +171,7 @@ impl StartCommand {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if let Some(addr) = &self.http_addr {
|
if let Some(addr) = &self.http_addr {
|
||||||
opts.http.addr.clone_from(addr);
|
opts.http.addr = addr.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(http_timeout) = self.http_timeout {
|
if let Some(http_timeout) = self.http_timeout {
|
||||||
@@ -183,24 +183,24 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
opts.grpc.addr.clone_from(addr);
|
opts.grpc.addr = addr.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.mysql_addr {
|
if let Some(addr) = &self.mysql_addr {
|
||||||
opts.mysql.enable = true;
|
opts.mysql.enable = true;
|
||||||
opts.mysql.addr.clone_from(addr);
|
opts.mysql.addr = addr.clone();
|
||||||
opts.mysql.tls = tls_opts.clone();
|
opts.mysql.tls = tls_opts.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.postgres_addr {
|
if let Some(addr) = &self.postgres_addr {
|
||||||
opts.postgres.enable = true;
|
opts.postgres.enable = true;
|
||||||
opts.postgres.addr.clone_from(addr);
|
opts.postgres.addr = addr.clone();
|
||||||
opts.postgres.tls = tls_opts;
|
opts.postgres.tls = tls_opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.opentsdb_addr {
|
if let Some(addr) = &self.opentsdb_addr {
|
||||||
opts.opentsdb.enable = true;
|
opts.opentsdb.enable = true;
|
||||||
opts.opentsdb.addr.clone_from(addr);
|
opts.opentsdb.addr = addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(enable) = self.influxdb_enable {
|
if let Some(enable) = self.influxdb_enable {
|
||||||
@@ -210,12 +210,11 @@ impl StartCommand {
|
|||||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||||
opts.meta_client
|
opts.meta_client
|
||||||
.get_or_insert_with(MetaClientOptions::default)
|
.get_or_insert_with(MetaClientOptions::default)
|
||||||
.metasrv_addrs
|
.metasrv_addrs = metasrv_addrs.clone();
|
||||||
.clone_from(metasrv_addrs);
|
|
||||||
opts.mode = Mode::Distributed;
|
opts.mode = Mode::Distributed;
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.user_provider.clone_from(&self.user_provider);
|
opts.user_provider = self.user_provider.clone();
|
||||||
|
|
||||||
Ok(Options::Frontend(Box::new(opts)))
|
Ok(Options::Frontend(Box::new(opts)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,23 +64,26 @@ pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Log the versions of the application, and the arguments passed to the cli.
|
pub fn log_versions() {
|
||||||
/// `version_string` should be the same as the output of cli "--version";
|
|
||||||
/// and the `app_version` is the short version of the codes, often consist of git branch and commit.
|
|
||||||
pub fn log_versions(version_string: &str, app_version: &str) {
|
|
||||||
// Report app version as gauge.
|
// Report app version as gauge.
|
||||||
APP_VERSION
|
APP_VERSION
|
||||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), app_version])
|
.with_label_values(&[short_version(), full_version()])
|
||||||
.inc();
|
.inc();
|
||||||
|
|
||||||
// Log version and argument flags.
|
// Log version and argument flags.
|
||||||
info!("GreptimeDB version: {}", version_string);
|
info!(
|
||||||
|
"short_version: {}, full_version: {}",
|
||||||
|
short_version(),
|
||||||
|
full_version()
|
||||||
|
);
|
||||||
|
|
||||||
log_env_flags();
|
log_env_flags();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn greptimedb_cli() -> clap::Command {
|
pub fn greptimedb_cli() -> clap::Command {
|
||||||
let cmd = clap::Command::new("greptimedb").subcommand_required(true);
|
let cmd = clap::Command::new("greptimedb")
|
||||||
|
.version(print_version())
|
||||||
|
.subcommand_required(true);
|
||||||
|
|
||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
|
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
|
||||||
@@ -88,6 +91,35 @@ pub fn greptimedb_cli() -> clap::Command {
|
|||||||
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
|
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn print_version() -> &'static str {
|
||||||
|
concat!(
|
||||||
|
"\nbranch: ",
|
||||||
|
env!("GIT_BRANCH"),
|
||||||
|
"\ncommit: ",
|
||||||
|
env!("GIT_COMMIT"),
|
||||||
|
"\ndirty: ",
|
||||||
|
env!("GIT_DIRTY"),
|
||||||
|
"\nversion: ",
|
||||||
|
env!("CARGO_PKG_VERSION")
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn short_version() -> &'static str {
|
||||||
|
env!("CARGO_PKG_VERSION")
|
||||||
|
}
|
||||||
|
|
||||||
|
// {app_name}-{branch_name}-{commit_short}
|
||||||
|
// The branch name (tag) of a release build should already contain the short
|
||||||
|
// version so the full version doesn't concat the short version explicitly.
|
||||||
|
fn full_version() -> &'static str {
|
||||||
|
concat!(
|
||||||
|
"greptimedb-",
|
||||||
|
env!("GIT_BRANCH"),
|
||||||
|
"-",
|
||||||
|
env!("GIT_COMMIT_SHORT")
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
fn log_env_flags() {
|
fn log_env_flags() {
|
||||||
info!("command line arguments");
|
info!("command line arguments");
|
||||||
for argument in std::env::args() {
|
for argument in std::env::args() {
|
||||||
|
|||||||
@@ -134,23 +134,23 @@ impl StartCommand {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir = dir.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if cli_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level = cli_options.log_level.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.bind_addr {
|
if let Some(addr) = &self.bind_addr {
|
||||||
opts.bind_addr.clone_from(addr);
|
opts.bind_addr = addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.server_addr {
|
if let Some(addr) = &self.server_addr {
|
||||||
opts.server_addr.clone_from(addr);
|
opts.server_addr = addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.store_addr {
|
if let Some(addr) = &self.store_addr {
|
||||||
opts.store_addr.clone_from(addr);
|
opts.store_addr = addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(selector_type) = &self.selector {
|
if let Some(selector_type) = &self.selector {
|
||||||
@@ -168,7 +168,7 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(http_addr) = &self.http_addr {
|
if let Some(http_addr) = &self.http_addr {
|
||||||
opts.http.addr.clone_from(http_addr);
|
opts.http.addr = http_addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(http_timeout) = self.http_timeout {
|
if let Some(http_timeout) = self.http_timeout {
|
||||||
@@ -176,11 +176,11 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(data_home) = &self.data_home {
|
if let Some(data_home) = &self.data_home {
|
||||||
opts.data_home.clone_from(data_home);
|
opts.data_home = data_home.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.store_key_prefix.is_empty() {
|
if !self.store_key_prefix.is_empty() {
|
||||||
opts.store_key_prefix.clone_from(&self.store_key_prefix)
|
opts.store_key_prefix = self.store_key_prefix.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||||
|
|||||||
@@ -293,11 +293,11 @@ impl StartCommand {
|
|||||||
opts.mode = Mode::Standalone;
|
opts.mode = Mode::Standalone;
|
||||||
|
|
||||||
if let Some(dir) = &cli_options.log_dir {
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir = dir.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if cli_options.log_level.is_some() {
|
if cli_options.log_level.is_some() {
|
||||||
opts.logging.level.clone_from(&cli_options.log_level);
|
opts.logging.level = cli_options.log_level.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
let tls_opts = TlsOption::new(
|
let tls_opts = TlsOption::new(
|
||||||
@@ -307,11 +307,11 @@ impl StartCommand {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if let Some(addr) = &self.http_addr {
|
if let Some(addr) = &self.http_addr {
|
||||||
opts.http.addr.clone_from(addr);
|
opts.http.addr = addr.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(data_home) = &self.data_home {
|
if let Some(data_home) = &self.data_home {
|
||||||
opts.storage.data_home.clone_from(data_home);
|
opts.storage.data_home = data_home.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
@@ -325,31 +325,31 @@ impl StartCommand {
|
|||||||
}
|
}
|
||||||
.fail();
|
.fail();
|
||||||
}
|
}
|
||||||
opts.grpc.addr.clone_from(addr)
|
opts.grpc.addr = addr.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.mysql_addr {
|
if let Some(addr) = &self.mysql_addr {
|
||||||
opts.mysql.enable = true;
|
opts.mysql.enable = true;
|
||||||
opts.mysql.addr.clone_from(addr);
|
opts.mysql.addr = addr.clone();
|
||||||
opts.mysql.tls = tls_opts.clone();
|
opts.mysql.tls = tls_opts.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.postgres_addr {
|
if let Some(addr) = &self.postgres_addr {
|
||||||
opts.postgres.enable = true;
|
opts.postgres.enable = true;
|
||||||
opts.postgres.addr.clone_from(addr);
|
opts.postgres.addr = addr.clone();
|
||||||
opts.postgres.tls = tls_opts;
|
opts.postgres.tls = tls_opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.opentsdb_addr {
|
if let Some(addr) = &self.opentsdb_addr {
|
||||||
opts.opentsdb.enable = true;
|
opts.opentsdb.enable = true;
|
||||||
opts.opentsdb.addr.clone_from(addr);
|
opts.opentsdb.addr = addr.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.influxdb_enable {
|
if self.influxdb_enable {
|
||||||
opts.influxdb.enable = self.influxdb_enable;
|
opts.influxdb.enable = self.influxdb_enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.user_provider.clone_from(&self.user_provider);
|
opts.user_provider = self.user_provider.clone();
|
||||||
|
|
||||||
let metadata_store = opts.metadata_store.clone();
|
let metadata_store = opts.metadata_store.clone();
|
||||||
let procedure = opts.procedure.clone();
|
let procedure = opts.procedure.clone();
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ derive_builder.workspace = true
|
|||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
orc-rust = { git = "https://github.com/MichaelScofield/orc-rs.git", rev = "17347f5f084ac937863317df882218055c4ea8c1" }
|
orc-rust = "0.2"
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
regex = "1.7"
|
regex = "1.7"
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ impl CsvConfig {
|
|||||||
let mut builder = csv::ReaderBuilder::new(self.file_schema.clone())
|
let mut builder = csv::ReaderBuilder::new(self.file_schema.clone())
|
||||||
.with_delimiter(self.delimiter)
|
.with_delimiter(self.delimiter)
|
||||||
.with_batch_size(self.batch_size)
|
.with_batch_size(self.batch_size)
|
||||||
.with_header(self.has_header);
|
.has_header(self.has_header);
|
||||||
|
|
||||||
if let Some(proj) = &self.file_projection {
|
if let Some(proj) = &self.file_projection {
|
||||||
builder = builder.with_projection(proj.clone());
|
builder = builder.with_projection(proj.clone());
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ use std::vec;
|
|||||||
|
|
||||||
use common_test_util::find_workspace_path;
|
use common_test_util::find_workspace_path;
|
||||||
use datafusion::assert_batches_eq;
|
use datafusion::assert_batches_eq;
|
||||||
use datafusion::config::TableParquetOptions;
|
|
||||||
use datafusion::datasource::physical_plan::{FileOpener, FileScanConfig, FileStream, ParquetExec};
|
use datafusion::datasource::physical_plan::{FileOpener, FileScanConfig, FileStream, ParquetExec};
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||||
@@ -167,7 +166,7 @@ async fn test_parquet_exec() {
|
|||||||
.to_string();
|
.to_string();
|
||||||
let base_config = scan_config(schema.clone(), None, path);
|
let base_config = scan_config(schema.clone(), None, path);
|
||||||
|
|
||||||
let exec = ParquetExec::new(base_config, None, None, TableParquetOptions::default())
|
let exec = ParquetExec::new(base_config, None, None)
|
||||||
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
|
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
|
||||||
|
|
||||||
let ctx = SessionContext::new();
|
let ctx = SessionContext::new();
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||||
use datafusion::common::Statistics;
|
|
||||||
use datafusion::datasource::listing::PartitionedFile;
|
use datafusion::datasource::listing::PartitionedFile;
|
||||||
use datafusion::datasource::object_store::ObjectStoreUrl;
|
use datafusion::datasource::object_store::ObjectStoreUrl;
|
||||||
use datafusion::datasource::physical_plan::{FileScanConfig, FileStream};
|
use datafusion::datasource::physical_plan::{FileScanConfig, FileStream};
|
||||||
@@ -73,16 +72,17 @@ pub fn test_basic_schema() -> SchemaRef {
|
|||||||
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
|
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
|
||||||
// object_store only recognize the Unix style path, so make it happy.
|
// object_store only recognize the Unix style path, so make it happy.
|
||||||
let filename = &filename.replace('\\', "/");
|
let filename = &filename.replace('\\', "/");
|
||||||
let statistics = Statistics::new_unknown(file_schema.as_ref());
|
|
||||||
FileScanConfig {
|
FileScanConfig {
|
||||||
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
|
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
|
||||||
file_schema,
|
file_schema,
|
||||||
file_groups: vec![vec![PartitionedFile::new(filename.to_string(), 10)]],
|
file_groups: vec![vec![PartitionedFile::new(filename.to_string(), 10)]],
|
||||||
statistics,
|
statistics: Default::default(),
|
||||||
projection: None,
|
projection: None,
|
||||||
limit,
|
limit,
|
||||||
table_partition_cols: vec![],
|
table_partition_cols: vec![],
|
||||||
output_ordering: vec![],
|
output_ordering: vec![],
|
||||||
|
infinite_source: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -59,7 +59,6 @@ pub enum StatusCode {
|
|||||||
RegionNotFound = 4005,
|
RegionNotFound = 4005,
|
||||||
RegionAlreadyExists = 4006,
|
RegionAlreadyExists = 4006,
|
||||||
RegionReadonly = 4007,
|
RegionReadonly = 4007,
|
||||||
/// Region is not in a proper state to handle specific request.
|
|
||||||
RegionNotReady = 4008,
|
RegionNotReady = 4008,
|
||||||
// If mutually exclusive operations are reached at the same time,
|
// If mutually exclusive operations are reached at the same time,
|
||||||
// only one can be executed, another one will get region busy.
|
// only one can be executed, another one will get region busy.
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ where
|
|||||||
.map(|&n| n.into())
|
.map(|&n| n.into())
|
||||||
.collect::<Vec<Value>>();
|
.collect::<Vec<Value>>();
|
||||||
Ok(vec![Value::List(ListValue::new(
|
Ok(vec![Value::List(ListValue::new(
|
||||||
nums,
|
Some(Box::new(nums)),
|
||||||
I::LogicalType::build_data_type(),
|
I::LogicalType::build_data_type(),
|
||||||
))])
|
))])
|
||||||
}
|
}
|
||||||
@@ -120,7 +120,10 @@ where
|
|||||||
O::from_native(native).into()
|
O::from_native(native).into()
|
||||||
})
|
})
|
||||||
.collect::<Vec<Value>>();
|
.collect::<Vec<Value>>();
|
||||||
let diff = Value::List(ListValue::new(diff, O::LogicalType::build_data_type()));
|
let diff = Value::List(ListValue::new(
|
||||||
|
Some(Box::new(diff)),
|
||||||
|
O::LogicalType::build_data_type(),
|
||||||
|
));
|
||||||
Ok(diff)
|
Ok(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -215,7 +218,10 @@ mod test {
|
|||||||
let values = vec![Value::from(2_i64), Value::from(1_i64)];
|
let values = vec![Value::from(2_i64), Value::from(1_i64)];
|
||||||
diff.update_batch(&v).unwrap();
|
diff.update_batch(&v).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Value::List(ListValue::new(values, ConcreteDataType::int64_datatype())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(values)),
|
||||||
|
ConcreteDataType::int64_datatype()
|
||||||
|
)),
|
||||||
diff.evaluate().unwrap()
|
diff.evaluate().unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -230,7 +236,10 @@ mod test {
|
|||||||
let values = vec![Value::from(5_i64), Value::from(1_i64)];
|
let values = vec![Value::from(5_i64), Value::from(1_i64)];
|
||||||
diff.update_batch(&v).unwrap();
|
diff.update_batch(&v).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Value::List(ListValue::new(values, ConcreteDataType::int64_datatype())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(values)),
|
||||||
|
ConcreteDataType::int64_datatype()
|
||||||
|
)),
|
||||||
diff.evaluate().unwrap()
|
diff.evaluate().unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -243,7 +252,10 @@ mod test {
|
|||||||
let values = vec![Value::from(0_i64), Value::from(0_i64), Value::from(0_i64)];
|
let values = vec![Value::from(0_i64), Value::from(0_i64), Value::from(0_i64)];
|
||||||
diff.update_batch(&v).unwrap();
|
diff.update_batch(&v).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Value::List(ListValue::new(values, ConcreteDataType::int64_datatype())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(values)),
|
||||||
|
ConcreteDataType::int64_datatype()
|
||||||
|
)),
|
||||||
diff.evaluate().unwrap()
|
diff.evaluate().unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,7 +104,10 @@ where
|
|||||||
.map(|&n| n.into())
|
.map(|&n| n.into())
|
||||||
.collect::<Vec<Value>>();
|
.collect::<Vec<Value>>();
|
||||||
Ok(vec![
|
Ok(vec![
|
||||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(nums)),
|
||||||
|
T::LogicalType::build_data_type(),
|
||||||
|
)),
|
||||||
self.p.into(),
|
self.p.into(),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,7 +72,10 @@ where
|
|||||||
.map(|&n| n.into())
|
.map(|&n| n.into())
|
||||||
.collect::<Vec<Value>>();
|
.collect::<Vec<Value>>();
|
||||||
Ok(vec![
|
Ok(vec![
|
||||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(nums)),
|
||||||
|
T::LogicalType::build_data_type(),
|
||||||
|
)),
|
||||||
self.x.into(),
|
self.x.into(),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,7 +56,10 @@ where
|
|||||||
.map(|&x| x.into())
|
.map(|&x| x.into())
|
||||||
.collect::<Vec<Value>>();
|
.collect::<Vec<Value>>();
|
||||||
Ok(vec![
|
Ok(vec![
|
||||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(nums)),
|
||||||
|
T::LogicalType::build_data_type(),
|
||||||
|
)),
|
||||||
self.x.into(),
|
self.x.into(),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,7 +56,10 @@ where
|
|||||||
.map(|&x| x.into())
|
.map(|&x| x.into())
|
||||||
.collect::<Vec<Value>>();
|
.collect::<Vec<Value>>();
|
||||||
Ok(vec![
|
Ok(vec![
|
||||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
Value::List(ListValue::new(
|
||||||
|
Some(Box::new(nums)),
|
||||||
|
T::LogicalType::build_data_type(),
|
||||||
|
)),
|
||||||
self.x.into(),
|
self.x.into(),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ impl Function for RangeFunction {
|
|||||||
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
|
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
|
||||||
/// In fact, the arguments loaded by `range_fn` are very complicated, and it is difficult to use `Signature` to describe
|
/// In fact, the arguments loaded by `range_fn` are very complicated, and it is difficult to use `Signature` to describe
|
||||||
fn signature(&self) -> Signature {
|
fn signature(&self) -> Signature {
|
||||||
Signature::variadic_any(Volatility::Immutable)
|
Signature::any(0, Volatility::Immutable)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
pub mod channel_manager;
|
pub mod channel_manager;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod flight;
|
pub mod flight;
|
||||||
pub mod precision;
|
|
||||||
pub mod select;
|
pub mod select;
|
||||||
|
pub mod writer;
|
||||||
|
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
|
|||||||
@@ -1,141 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::fmt::Display;
|
|
||||||
|
|
||||||
use common_time::timestamp::TimeUnit;
|
|
||||||
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
/// Precision represents the precision of a timestamp.
|
|
||||||
/// It is used to convert timestamps between different precisions.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum Precision {
|
|
||||||
Nanosecond,
|
|
||||||
Microsecond,
|
|
||||||
Millisecond,
|
|
||||||
Second,
|
|
||||||
Minute,
|
|
||||||
Hour,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Precision {
|
|
||||||
pub fn to_nanos(&self, amount: i64) -> Option<i64> {
|
|
||||||
match self {
|
|
||||||
Precision::Nanosecond => Some(amount),
|
|
||||||
Precision::Microsecond => amount.checked_mul(1_000),
|
|
||||||
Precision::Millisecond => amount.checked_mul(1_000_000),
|
|
||||||
Precision::Second => amount.checked_mul(1_000_000_000),
|
|
||||||
Precision::Minute => amount
|
|
||||||
.checked_mul(60)
|
|
||||||
.and_then(|a| a.checked_mul(1_000_000_000)),
|
|
||||||
Precision::Hour => amount
|
|
||||||
.checked_mul(3600)
|
|
||||||
.and_then(|a| a.checked_mul(1_000_000_000)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_millis(&self, amount: i64) -> Option<i64> {
|
|
||||||
match self {
|
|
||||||
Precision::Nanosecond => amount.checked_div(1_000_000),
|
|
||||||
Precision::Microsecond => amount.checked_div(1_000),
|
|
||||||
Precision::Millisecond => Some(amount),
|
|
||||||
Precision::Second => amount.checked_mul(1_000),
|
|
||||||
Precision::Minute => amount.checked_mul(60_000),
|
|
||||||
Precision::Hour => amount.checked_mul(3_600_000),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for Precision {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
Precision::Nanosecond => write!(f, "Precision::Nanosecond"),
|
|
||||||
Precision::Microsecond => write!(f, "Precision::Microsecond"),
|
|
||||||
Precision::Millisecond => write!(f, "Precision::Millisecond"),
|
|
||||||
Precision::Second => write!(f, "Precision::Second"),
|
|
||||||
Precision::Minute => write!(f, "Precision::Minute"),
|
|
||||||
Precision::Hour => write!(f, "Precision::Hour"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<Precision> for TimeUnit {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(precision: Precision) -> Result<Self, Self::Error> {
|
|
||||||
Ok(match precision {
|
|
||||||
Precision::Second => TimeUnit::Second,
|
|
||||||
Precision::Millisecond => TimeUnit::Millisecond,
|
|
||||||
Precision::Microsecond => TimeUnit::Microsecond,
|
|
||||||
Precision::Nanosecond => TimeUnit::Nanosecond,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::NotSupported {
|
|
||||||
feat: format!("convert {precision} into TimeUnit"),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::precision::Precision;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_to_nanos() {
|
|
||||||
assert_eq!(Precision::Nanosecond.to_nanos(1).unwrap(), 1);
|
|
||||||
assert_eq!(Precision::Microsecond.to_nanos(1).unwrap(), 1_000);
|
|
||||||
assert_eq!(Precision::Millisecond.to_nanos(1).unwrap(), 1_000_000);
|
|
||||||
assert_eq!(Precision::Second.to_nanos(1).unwrap(), 1_000_000_000);
|
|
||||||
assert_eq!(Precision::Minute.to_nanos(1).unwrap(), 60 * 1_000_000_000);
|
|
||||||
assert_eq!(
|
|
||||||
Precision::Hour.to_nanos(1).unwrap(),
|
|
||||||
60 * 60 * 1_000_000_000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_to_millis() {
|
|
||||||
assert_eq!(Precision::Nanosecond.to_millis(1_000_000).unwrap(), 1);
|
|
||||||
assert_eq!(Precision::Microsecond.to_millis(1_000).unwrap(), 1);
|
|
||||||
assert_eq!(Precision::Millisecond.to_millis(1).unwrap(), 1);
|
|
||||||
assert_eq!(Precision::Second.to_millis(1).unwrap(), 1_000);
|
|
||||||
assert_eq!(Precision::Minute.to_millis(1).unwrap(), 60 * 1_000);
|
|
||||||
assert_eq!(Precision::Hour.to_millis(1).unwrap(), 60 * 60 * 1_000);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_to_nanos_basic() {
|
|
||||||
assert_eq!(Precision::Second.to_nanos(1), Some(1_000_000_000));
|
|
||||||
assert_eq!(Precision::Minute.to_nanos(1), Some(60 * 1_000_000_000));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_to_millis_basic() {
|
|
||||||
assert_eq!(Precision::Second.to_millis(1), Some(1_000));
|
|
||||||
assert_eq!(Precision::Minute.to_millis(1), Some(60_000));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_to_nanos_overflow() {
|
|
||||||
assert_eq!(Precision::Hour.to_nanos(i64::MAX / 100), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_zero_input() {
|
|
||||||
assert_eq!(Precision::Second.to_nanos(0), Some(0));
|
|
||||||
assert_eq!(Precision::Minute.to_millis(0), Some(0));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
441
src/common/grpc/src/writer.rs
Normal file
441
src/common/grpc/src/writer.rs
Normal file
@@ -0,0 +1,441 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use api::helper::values_with_capacity;
|
||||||
|
use api::v1::{Column, ColumnDataType, ColumnDataTypeExtension, SemanticType};
|
||||||
|
use common_base::BitVec;
|
||||||
|
use common_time::timestamp::TimeUnit;
|
||||||
|
use snafu::ensure;
|
||||||
|
|
||||||
|
use crate::error::{Result, TypeMismatchSnafu};
|
||||||
|
use crate::Error;
|
||||||
|
|
||||||
|
type ColumnName = String;
|
||||||
|
|
||||||
|
type RowCount = u32;
|
||||||
|
|
||||||
|
// TODO(fys): will remove in the future.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct LinesWriter {
|
||||||
|
column_name_index: HashMap<ColumnName, usize>,
|
||||||
|
null_masks: Vec<BitVec>,
|
||||||
|
batch: (Vec<Column>, RowCount),
|
||||||
|
lines: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LinesWriter {
|
||||||
|
pub fn with_lines(lines: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
lines,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_ts(&mut self, column_name: &str, value: (i64, Precision)) -> Result<()> {
|
||||||
|
let (idx, column) = self.mut_column(
|
||||||
|
column_name,
|
||||||
|
ColumnDataType::TimestampMillisecond,
|
||||||
|
SemanticType::Timestamp,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::TimestampMillisecond as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "timestamp",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values
|
||||||
|
.timestamp_millisecond_values
|
||||||
|
.push(to_ms_ts(value.1, value.0));
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_tag(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||||
|
let (idx, column) =
|
||||||
|
self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag, None);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::String as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "string",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values.string_values.push(value.to_string());
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_u64(&mut self, column_name: &str, value: u64) -> Result<()> {
|
||||||
|
let (idx, column) = self.mut_column(
|
||||||
|
column_name,
|
||||||
|
ColumnDataType::Uint64,
|
||||||
|
SemanticType::Field,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::Uint64 as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "u64",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values.u64_values.push(value);
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_i64(&mut self, column_name: &str, value: i64) -> Result<()> {
|
||||||
|
let (idx, column) = self.mut_column(
|
||||||
|
column_name,
|
||||||
|
ColumnDataType::Int64,
|
||||||
|
SemanticType::Field,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::Int64 as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "i64",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values.i64_values.push(value);
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_f64(&mut self, column_name: &str, value: f64) -> Result<()> {
|
||||||
|
let (idx, column) = self.mut_column(
|
||||||
|
column_name,
|
||||||
|
ColumnDataType::Float64,
|
||||||
|
SemanticType::Field,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::Float64 as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "f64",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values.f64_values.push(value);
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_string(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||||
|
let (idx, column) = self.mut_column(
|
||||||
|
column_name,
|
||||||
|
ColumnDataType::String,
|
||||||
|
SemanticType::Field,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::String as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "string",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values.string_values.push(value.to_string());
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_bool(&mut self, column_name: &str, value: bool) -> Result<()> {
|
||||||
|
let (idx, column) = self.mut_column(
|
||||||
|
column_name,
|
||||||
|
ColumnDataType::Boolean,
|
||||||
|
SemanticType::Field,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
column.datatype == ColumnDataType::Boolean as i32,
|
||||||
|
TypeMismatchSnafu {
|
||||||
|
column_name,
|
||||||
|
expected: "boolean",
|
||||||
|
actual: format!("{:?}", column.datatype)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||||
|
let values = column.values.as_mut().unwrap();
|
||||||
|
values.bool_values.push(value);
|
||||||
|
self.null_masks[idx].push(false);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit(&mut self) {
|
||||||
|
let batch = &mut self.batch;
|
||||||
|
batch.1 += 1;
|
||||||
|
|
||||||
|
for i in 0..batch.0.len() {
|
||||||
|
let null_mask = &mut self.null_masks[i];
|
||||||
|
if batch.1 as usize > null_mask.len() {
|
||||||
|
null_mask.push(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn finish(mut self) -> (Vec<Column>, RowCount) {
|
||||||
|
let null_masks = self.null_masks;
|
||||||
|
for (i, null_mask) in null_masks.into_iter().enumerate() {
|
||||||
|
let columns = &mut self.batch.0;
|
||||||
|
columns[i].null_mask = null_mask.into_vec();
|
||||||
|
}
|
||||||
|
self.batch
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mut_column(
|
||||||
|
&mut self,
|
||||||
|
column_name: &str,
|
||||||
|
datatype: ColumnDataType,
|
||||||
|
semantic_type: SemanticType,
|
||||||
|
datatype_extension: Option<ColumnDataTypeExtension>,
|
||||||
|
) -> (usize, &mut Column) {
|
||||||
|
let column_names = &mut self.column_name_index;
|
||||||
|
let column_idx = match column_names.get(column_name) {
|
||||||
|
Some(i) => *i,
|
||||||
|
None => {
|
||||||
|
let new_idx = column_names.len();
|
||||||
|
let batch = &mut self.batch;
|
||||||
|
let to_insert = self.lines;
|
||||||
|
let mut null_mask = BitVec::with_capacity(to_insert);
|
||||||
|
null_mask.extend(BitVec::repeat(true, batch.1 as usize));
|
||||||
|
self.null_masks.push(null_mask);
|
||||||
|
batch.0.push(Column {
|
||||||
|
column_name: column_name.to_string(),
|
||||||
|
semantic_type: semantic_type.into(),
|
||||||
|
values: Some(values_with_capacity(datatype, to_insert)),
|
||||||
|
datatype: datatype as i32,
|
||||||
|
null_mask: Vec::default(),
|
||||||
|
datatype_extension,
|
||||||
|
});
|
||||||
|
let _ = column_names.insert(column_name.to_string(), new_idx);
|
||||||
|
new_idx
|
||||||
|
}
|
||||||
|
};
|
||||||
|
(column_idx, &mut self.batch.0[column_idx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_ms_ts(p: Precision, ts: i64) -> i64 {
|
||||||
|
match p {
|
||||||
|
Precision::Nanosecond => ts / 1_000_000,
|
||||||
|
Precision::Microsecond => ts / 1000,
|
||||||
|
Precision::Millisecond => ts,
|
||||||
|
Precision::Second => ts * 1000,
|
||||||
|
Precision::Minute => ts * 1000 * 60,
|
||||||
|
Precision::Hour => ts * 1000 * 60 * 60,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum Precision {
|
||||||
|
Nanosecond,
|
||||||
|
Microsecond,
|
||||||
|
Millisecond,
|
||||||
|
Second,
|
||||||
|
Minute,
|
||||||
|
Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Precision {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Precision::Nanosecond => write!(f, "Precision::Nanosecond"),
|
||||||
|
Precision::Microsecond => write!(f, "Precision::Microsecond"),
|
||||||
|
Precision::Millisecond => write!(f, "Precision::Millisecond"),
|
||||||
|
Precision::Second => write!(f, "Precision::Second"),
|
||||||
|
Precision::Minute => write!(f, "Precision::Minute"),
|
||||||
|
Precision::Hour => write!(f, "Precision::Hour"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<Precision> for TimeUnit {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(precision: Precision) -> std::result::Result<Self, Self::Error> {
|
||||||
|
Ok(match precision {
|
||||||
|
Precision::Second => TimeUnit::Second,
|
||||||
|
Precision::Millisecond => TimeUnit::Millisecond,
|
||||||
|
Precision::Microsecond => TimeUnit::Microsecond,
|
||||||
|
Precision::Nanosecond => TimeUnit::Nanosecond,
|
||||||
|
_ => {
|
||||||
|
return Err(Error::NotSupported {
|
||||||
|
feat: format!("convert {precision} into TimeUnit"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use api::v1::{ColumnDataType, SemanticType};
|
||||||
|
use common_base::BitVec;
|
||||||
|
|
||||||
|
use super::LinesWriter;
|
||||||
|
use crate::writer::{to_ms_ts, Precision};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lines_writer() {
|
||||||
|
let mut writer = LinesWriter::with_lines(3);
|
||||||
|
|
||||||
|
writer.write_tag("host", "host1").unwrap();
|
||||||
|
writer.write_f64("cpu", 0.5).unwrap();
|
||||||
|
writer.write_f64("memory", 0.4).unwrap();
|
||||||
|
writer.write_string("name", "name1").unwrap();
|
||||||
|
writer
|
||||||
|
.write_ts("ts", (101011000, Precision::Millisecond))
|
||||||
|
.unwrap();
|
||||||
|
writer.commit();
|
||||||
|
|
||||||
|
writer.write_tag("host", "host2").unwrap();
|
||||||
|
writer
|
||||||
|
.write_ts("ts", (102011001, Precision::Millisecond))
|
||||||
|
.unwrap();
|
||||||
|
writer.write_bool("enable_reboot", true).unwrap();
|
||||||
|
writer.write_u64("year_of_service", 2).unwrap();
|
||||||
|
writer.write_i64("temperature", 4).unwrap();
|
||||||
|
writer.commit();
|
||||||
|
|
||||||
|
writer.write_tag("host", "host3").unwrap();
|
||||||
|
writer.write_f64("cpu", 0.4).unwrap();
|
||||||
|
writer.write_u64("cpu_core_num", 16).unwrap();
|
||||||
|
writer
|
||||||
|
.write_ts("ts", (103011002, Precision::Millisecond))
|
||||||
|
.unwrap();
|
||||||
|
writer.commit();
|
||||||
|
|
||||||
|
let insert_batch = writer.finish();
|
||||||
|
assert_eq!(3, insert_batch.1);
|
||||||
|
|
||||||
|
let columns = insert_batch.0;
|
||||||
|
assert_eq!(9, columns.len());
|
||||||
|
|
||||||
|
let column = &columns[0];
|
||||||
|
assert_eq!("host", columns[0].column_name);
|
||||||
|
assert_eq!(ColumnDataType::String as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Tag as i32, column.semantic_type);
|
||||||
|
assert_eq!(
|
||||||
|
vec!["host1", "host2", "host3"],
|
||||||
|
column.values.as_ref().unwrap().string_values
|
||||||
|
);
|
||||||
|
verify_null_mask(&column.null_mask, vec![false, false, false]);
|
||||||
|
|
||||||
|
let column = &columns[1];
|
||||||
|
assert_eq!("cpu", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::Float64 as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec![0.5, 0.4], column.values.as_ref().unwrap().f64_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![false, true, false]);
|
||||||
|
|
||||||
|
let column = &columns[2];
|
||||||
|
assert_eq!("memory", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::Float64 as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec![0.4], column.values.as_ref().unwrap().f64_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![false, true, true]);
|
||||||
|
|
||||||
|
let column = &columns[3];
|
||||||
|
assert_eq!("name", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::String as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec!["name1"], column.values.as_ref().unwrap().string_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![false, true, true]);
|
||||||
|
|
||||||
|
let column = &columns[4];
|
||||||
|
assert_eq!("ts", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::TimestampMillisecond as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Timestamp as i32, column.semantic_type);
|
||||||
|
assert_eq!(
|
||||||
|
vec![101011000, 102011001, 103011002],
|
||||||
|
column.values.as_ref().unwrap().timestamp_millisecond_values
|
||||||
|
);
|
||||||
|
verify_null_mask(&column.null_mask, vec![false, false, false]);
|
||||||
|
|
||||||
|
let column = &columns[5];
|
||||||
|
assert_eq!("enable_reboot", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::Boolean as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec![true], column.values.as_ref().unwrap().bool_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![true, false, true]);
|
||||||
|
|
||||||
|
let column = &columns[6];
|
||||||
|
assert_eq!("year_of_service", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::Uint64 as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec![2], column.values.as_ref().unwrap().u64_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![true, false, true]);
|
||||||
|
|
||||||
|
let column = &columns[7];
|
||||||
|
assert_eq!("temperature", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::Int64 as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec![4], column.values.as_ref().unwrap().i64_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![true, false, true]);
|
||||||
|
|
||||||
|
let column = &columns[8];
|
||||||
|
assert_eq!("cpu_core_num", column.column_name);
|
||||||
|
assert_eq!(ColumnDataType::Uint64 as i32, column.datatype);
|
||||||
|
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||||
|
assert_eq!(vec![16], column.values.as_ref().unwrap().u64_values);
|
||||||
|
verify_null_mask(&column.null_mask, vec![true, true, false]);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_null_mask(data: &[u8], expected: Vec<bool>) {
|
||||||
|
let bitvec = BitVec::from_slice(data);
|
||||||
|
for (idx, b) in expected.iter().enumerate() {
|
||||||
|
assert_eq!(b, bitvec.get(idx).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_ms() {
|
||||||
|
assert_eq!(100, to_ms_ts(Precision::Nanosecond, 100110000));
|
||||||
|
assert_eq!(100110, to_ms_ts(Precision::Microsecond, 100110000));
|
||||||
|
assert_eq!(100110000, to_ms_ts(Precision::Millisecond, 100110000));
|
||||||
|
assert_eq!(
|
||||||
|
100110000 * 1000 * 60,
|
||||||
|
to_ms_ts(Precision::Minute, 100110000)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
100110000 * 1000 * 60 * 60,
|
||||||
|
to_ms_ts(Precision::Hour, 100110000)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -119,17 +119,15 @@ fn build_struct(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn scalar_udf() -> ScalarUDF {
|
pub fn scalar_udf() -> ScalarUDF {
|
||||||
// TODO(LFC): Use the new Datafusion UDF impl.
|
ScalarUDF {
|
||||||
#[allow(deprecated)]
|
name: Self::name().to_string(),
|
||||||
ScalarUDF::new(
|
signature: Signature::new(
|
||||||
Self::name(),
|
|
||||||
&Signature::new(
|
|
||||||
TypeSignature::Exact(Self::input_type()),
|
TypeSignature::Exact(Self::input_type()),
|
||||||
Volatility::Immutable,
|
Volatility::Immutable,
|
||||||
),
|
),
|
||||||
&(Arc::new(|_: &_| Ok(Arc::new(Self::return_type()))) as _),
|
return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
|
||||||
&(Arc::new(Self::calc) as _),
|
fun: Arc::new(Self::calc),
|
||||||
)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn input_type() -> Vec<DataType> {
|
fn input_type() -> Vec<DataType> {
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ use tokio::sync::RwLock;
|
|||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::schema_name::SchemaNameKey;
|
|
||||||
use crate::key::table_info::TableInfoKey;
|
use crate::key::table_info::TableInfoKey;
|
||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
use crate::key::table_route::TableRouteKey;
|
use crate::key::table_route::TableRouteKey;
|
||||||
@@ -108,10 +107,6 @@ where
|
|||||||
let key: TableNameKey = (&table_name).into();
|
let key: TableNameKey = (&table_name).into();
|
||||||
self.invalidate_key(&key.as_raw_key()).await
|
self.invalidate_key(&key.as_raw_key()).await
|
||||||
}
|
}
|
||||||
CacheIdent::SchemaName(schema_name) => {
|
|
||||||
let key: SchemaNameKey = (&schema_name).into();
|
|
||||||
self.invalidate_key(&key.as_raw_key()).await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -12,10 +12,10 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
|
||||||
pub use common_base::AffectedRows;
|
pub use common_base::AffectedRows;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
|
||||||
@@ -26,7 +26,7 @@ use crate::peer::Peer;
|
|||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait Datanode: Send + Sync {
|
pub trait Datanode: Send + Sync {
|
||||||
/// Handles DML, and DDL requests.
|
/// Handles DML, and DDL requests.
|
||||||
async fn handle(&self, request: RegionRequest) -> Result<RegionResponse>;
|
async fn handle(&self, request: RegionRequest) -> Result<HandleResponse>;
|
||||||
|
|
||||||
/// Handles query requests
|
/// Handles query requests
|
||||||
async fn handle_query(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
|
async fn handle_query(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
|
||||||
@@ -42,3 +42,27 @@ pub trait DatanodeManager: Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub type DatanodeManagerRef = Arc<dyn DatanodeManager>;
|
pub type DatanodeManagerRef = Arc<dyn DatanodeManager>;
|
||||||
|
|
||||||
|
/// This result struct is derived from [RegionResponse]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct HandleResponse {
|
||||||
|
pub affected_rows: AffectedRows,
|
||||||
|
pub extension: HashMap<String, Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HandleResponse {
|
||||||
|
pub fn from_region_response(region_response: RegionResponse) -> Self {
|
||||||
|
Self {
|
||||||
|
affected_rows: region_response.affected_rows as _,
|
||||||
|
extension: region_response.extension,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates one response without extension
|
||||||
|
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||||
|
Self {
|
||||||
|
affected_rows,
|
||||||
|
extension: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ impl AlterTableProcedure {
|
|||||||
AlterKind::RenameTable { new_table_name } => {
|
AlterKind::RenameTable { new_table_name } => {
|
||||||
new_info.name = new_table_name.to_string();
|
new_info.name = new_table_name.to_string();
|
||||||
}
|
}
|
||||||
AlterKind::DropColumns { .. } | AlterKind::ChangeColumnTypes { .. } => {}
|
AlterKind::DropColumns { .. } => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(new_info)
|
Ok(new_info)
|
||||||
|
|||||||
@@ -271,7 +271,7 @@ impl CreateTableProcedure {
|
|||||||
///
|
///
|
||||||
/// Abort(not-retry):
|
/// Abort(not-retry):
|
||||||
/// - Failed to create table metadata.
|
/// - Failed to create table metadata.
|
||||||
async fn on_create_metadata(&mut self) -> Result<Status> {
|
async fn on_create_metadata(&self) -> Result<Status> {
|
||||||
let table_id = self.table_id();
|
let table_id = self.table_id();
|
||||||
let manager = &self.context.table_metadata_manager;
|
let manager = &self.context.table_metadata_manager;
|
||||||
|
|
||||||
@@ -285,7 +285,6 @@ impl CreateTableProcedure {
|
|||||||
.await?;
|
.await?;
|
||||||
info!("Created table metadata for table {table_id}");
|
info!("Created table metadata for table {table_id}");
|
||||||
|
|
||||||
self.creator.opening_regions.clear();
|
|
||||||
Ok(Status::done_with_output(table_id))
|
Ok(Status::done_with_output(table_id))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,7 +385,7 @@ impl TableCreator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr)]
|
||||||
pub enum CreateTableState {
|
pub enum CreateTableState {
|
||||||
/// Prepares to create the table
|
/// Prepares to create the table
|
||||||
Prepare,
|
Prepare,
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ mod tests {
|
|||||||
async fn test_next_without_logical_tables() {
|
async fn test_next_without_logical_tables() {
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
create_physical_table(&ddl_context, 0, "phy").await;
|
create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||||
// It always starts from Logical
|
// It always starts from Logical
|
||||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||||
let mut ctx = DropDatabaseContext {
|
let mut ctx = DropDatabaseContext {
|
||||||
@@ -199,7 +199,7 @@ mod tests {
|
|||||||
async fn test_next_with_logical_tables() {
|
async fn test_next_with_logical_tables() {
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
|
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
|
||||||
// It always starts from Logical
|
// It always starts from Logical
|
||||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||||
|
|||||||
@@ -122,12 +122,12 @@ impl State for DropDatabaseExecutor {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::{QueryRequest, RegionRequest};
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
|
||||||
|
use crate::datanode_manager::HandleResponse;
|
||||||
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
||||||
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
||||||
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
|
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
|
||||||
@@ -144,8 +144,8 @@ mod tests {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for NaiveDatanodeHandler {
|
impl MockDatanodeHandler for NaiveDatanodeHandler {
|
||||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
|
||||||
Ok(RegionResponse::new(0))
|
Ok(HandleResponse::new(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_query(
|
async fn handle_query(
|
||||||
@@ -161,7 +161,7 @@ mod tests {
|
|||||||
async fn test_next_with_physical_table() {
|
async fn test_next_with_physical_table() {
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||||
let (_, table_route) = ddl_context
|
let (_, table_route) = ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.table_route_manager()
|
.table_route_manager()
|
||||||
@@ -211,7 +211,7 @@ mod tests {
|
|||||||
async fn test_next_logical_table() {
|
async fn test_next_logical_table() {
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
|
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
|
||||||
let logical_table_id = physical_table_id + 1;
|
let logical_table_id = physical_table_id + 1;
|
||||||
let (_, table_route) = ddl_context
|
let (_, table_route) = ddl_context
|
||||||
@@ -291,7 +291,7 @@ mod tests {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
||||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
|
||||||
Err(Error::RetryLater {
|
Err(Error::RetryLater {
|
||||||
source: BoxedError::new(
|
source: BoxedError::new(
|
||||||
error::UnexpectedSnafu {
|
error::UnexpectedSnafu {
|
||||||
@@ -315,7 +315,7 @@ mod tests {
|
|||||||
async fn test_next_retryable_err() {
|
async fn test_next_retryable_err() {
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||||
let (_, table_route) = ddl_context
|
let (_, table_route) = ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.table_route_manager()
|
.table_route_manager()
|
||||||
|
|||||||
@@ -18,12 +18,10 @@ use common_procedure::Status;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::end::DropDatabaseEnd;
|
use super::end::DropDatabaseEnd;
|
||||||
use crate::cache_invalidator::Context;
|
|
||||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||||
use crate::ddl::DdlContext;
|
use crate::ddl::DdlContext;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::instruction::CacheIdent;
|
use crate::key::schema_name::SchemaNameKey;
|
||||||
use crate::key::schema_name::{SchemaName, SchemaNameKey};
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub(crate) struct DropDatabaseRemoveMetadata;
|
pub(crate) struct DropDatabaseRemoveMetadata;
|
||||||
@@ -42,53 +40,7 @@ impl State for DropDatabaseRemoveMetadata {
|
|||||||
.delete(SchemaNameKey::new(&ctx.catalog, &ctx.schema))
|
.delete(SchemaNameKey::new(&ctx.catalog, &ctx.schema))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok((Box::new(DropMetadataBroadcast), Status::executing(true)));
|
return Ok((Box::new(DropDatabaseEnd), Status::done()));
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub(crate) struct DropMetadataBroadcast;
|
|
||||||
|
|
||||||
impl DropMetadataBroadcast {
|
|
||||||
/// Invalidates frontend caches
|
|
||||||
async fn invalidate_schema_cache(
|
|
||||||
&self,
|
|
||||||
ddl_ctx: &DdlContext,
|
|
||||||
db_ctx: &mut DropDatabaseContext,
|
|
||||||
) -> Result<()> {
|
|
||||||
let cache_invalidator = &ddl_ctx.cache_invalidator;
|
|
||||||
let ctx = Context {
|
|
||||||
subject: Some("Invalidate schema cache by dropping database".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
cache_invalidator
|
|
||||||
.invalidate(
|
|
||||||
&ctx,
|
|
||||||
vec![CacheIdent::SchemaName(SchemaName {
|
|
||||||
catalog_name: db_ctx.catalog.clone(),
|
|
||||||
schema_name: db_ctx.schema.clone(),
|
|
||||||
})],
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
#[typetag::serde]
|
|
||||||
impl State for DropMetadataBroadcast {
|
|
||||||
async fn next(
|
|
||||||
&mut self,
|
|
||||||
ddl_ctx: &DdlContext,
|
|
||||||
ctx: &mut DropDatabaseContext,
|
|
||||||
) -> Result<(Box<dyn State>, Status)> {
|
|
||||||
self.invalidate_schema_cache(ddl_ctx, ctx).await?;
|
|
||||||
Ok((Box::new(DropDatabaseEnd), Status::done()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
@@ -101,7 +53,7 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::ddl::drop_database::end::DropDatabaseEnd;
|
use crate::ddl::drop_database::end::DropDatabaseEnd;
|
||||||
use crate::ddl::drop_database::metadata::{DropDatabaseRemoveMetadata, DropMetadataBroadcast};
|
use crate::ddl::drop_database::metadata::DropDatabaseRemoveMetadata;
|
||||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||||
use crate::key::schema_name::SchemaNameKey;
|
use crate::key::schema_name::SchemaNameKey;
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||||
@@ -124,23 +76,14 @@ mod tests {
|
|||||||
tables: None,
|
tables: None,
|
||||||
};
|
};
|
||||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
||||||
state
|
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
|
||||||
.as_any()
|
assert!(status.is_done());
|
||||||
.downcast_ref::<DropMetadataBroadcast>()
|
|
||||||
.unwrap();
|
|
||||||
assert!(!status.is_done());
|
|
||||||
assert!(!ddl_context
|
assert!(!ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.schema_manager()
|
.schema_manager()
|
||||||
.exists(SchemaNameKey::new("foo", "bar"))
|
.exists(SchemaNameKey::new("foo", "bar"))
|
||||||
.await
|
.await
|
||||||
.unwrap());
|
.unwrap());
|
||||||
|
|
||||||
let mut state = DropMetadataBroadcast;
|
|
||||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
|
||||||
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
|
|
||||||
assert!(status.is_done());
|
|
||||||
|
|
||||||
// Schema not exists
|
// Schema not exists
|
||||||
let mut state = DropDatabaseRemoveMetadata;
|
let mut state = DropDatabaseRemoveMetadata;
|
||||||
let mut ctx = DropDatabaseContext {
|
let mut ctx = DropDatabaseContext {
|
||||||
@@ -150,10 +93,7 @@ mod tests {
|
|||||||
tables: None,
|
tables: None,
|
||||||
};
|
};
|
||||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
||||||
state
|
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
|
||||||
.as_any()
|
assert!(status.is_done());
|
||||||
.downcast_ref::<DropMetadataBroadcast>()
|
|
||||||
.unwrap();
|
|
||||||
assert!(!status.is_done());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ pub struct DropTableProcedure {
|
|||||||
/// The serializable data.
|
/// The serializable data.
|
||||||
pub data: DropTableData,
|
pub data: DropTableData,
|
||||||
/// The guards of opening regions.
|
/// The guards of opening regions.
|
||||||
pub(crate) dropping_regions: Vec<OperatingRegionGuard>,
|
pub dropping_regions: Vec<OperatingRegionGuard>,
|
||||||
/// The drop table executor.
|
/// The drop table executor.
|
||||||
executor: DropTableExecutor,
|
executor: DropTableExecutor,
|
||||||
}
|
}
|
||||||
@@ -153,7 +153,7 @@ impl DropTableProcedure {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes metadata tombstone.
|
/// Deletes metadata tombstone.
|
||||||
async fn on_delete_metadata_tombstone(&mut self) -> Result<Status> {
|
async fn on_delete_metadata_tombstone(&self) -> Result<Status> {
|
||||||
let table_route_value = &TableRouteValue::new(
|
let table_route_value = &TableRouteValue::new(
|
||||||
self.data.task.table_id,
|
self.data.task.table_id,
|
||||||
// Safety: checked
|
// Safety: checked
|
||||||
@@ -163,8 +163,6 @@ impl DropTableProcedure {
|
|||||||
self.executor
|
self.executor
|
||||||
.on_delete_metadata_tombstone(&self.context, table_route_value)
|
.on_delete_metadata_tombstone(&self.context, table_route_value)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.dropping_regions.clear();
|
|
||||||
Ok(Status::done())
|
Ok(Status::done())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -268,7 +266,7 @@ impl DropTableData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The state of drop table.
|
/// The state of drop table.
|
||||||
#[derive(Debug, Serialize, Deserialize, AsRefStr, PartialEq)]
|
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||||
pub enum DropTableState {
|
pub enum DropTableState {
|
||||||
/// Prepares to drop the table
|
/// Prepares to drop the table
|
||||||
Prepare,
|
Prepare,
|
||||||
|
|||||||
@@ -106,6 +106,19 @@ impl DropTableExecutor {
|
|||||||
ctx: &DdlContext,
|
ctx: &DdlContext,
|
||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let table_name_key = TableNameKey::new(
|
||||||
|
&self.table.catalog_name,
|
||||||
|
&self.table.schema_name,
|
||||||
|
&self.table.table_name,
|
||||||
|
);
|
||||||
|
if !ctx
|
||||||
|
.table_metadata_manager
|
||||||
|
.table_name_manager()
|
||||||
|
.exists(table_name_key)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
ctx.table_metadata_manager
|
ctx.table_metadata_manager
|
||||||
.delete_table_metadata(self.table_id, &self.table, table_route_value)
|
.delete_table_metadata(self.table_id, &self.table, table_route_value)
|
||||||
.await
|
.await
|
||||||
@@ -139,6 +152,19 @@ impl DropTableExecutor {
|
|||||||
ctx: &DdlContext,
|
ctx: &DdlContext,
|
||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let table_name_key = TableNameKey::new(
|
||||||
|
&self.table.catalog_name,
|
||||||
|
&self.table.schema_name,
|
||||||
|
&self.table.table_name,
|
||||||
|
);
|
||||||
|
if ctx
|
||||||
|
.table_metadata_manager
|
||||||
|
.table_name_manager()
|
||||||
|
.exists(table_name_key)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
ctx.table_metadata_manager
|
ctx.table_metadata_manager
|
||||||
.restore_table_metadata(self.table_id, &self.table, table_route_value)
|
.restore_table_metadata(self.table_id, &self.table, table_route_value)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -52,9 +52,5 @@ pub(crate) fn build_new_physical_table_info(
|
|||||||
columns.push(col.column_schema.clone());
|
columns.push(col.column_schema.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(time_index) = *time_index {
|
|
||||||
raw_table_info.meta.schema.column_schemas[time_index].set_time_index();
|
|
||||||
}
|
|
||||||
|
|
||||||
raw_table_info
|
raw_table_info
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ pub async fn create_physical_table_metadata(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_physical_table(
|
pub async fn create_physical_table(
|
||||||
ddl_context: &DdlContext,
|
ddl_context: DdlContext,
|
||||||
cluster_id: ClusterId,
|
cluster_id: ClusterId,
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> TableId {
|
) -> TableId {
|
||||||
@@ -67,7 +67,7 @@ pub async fn create_physical_table(
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
create_physical_table_task.set_table_id(table_id);
|
create_physical_table_task.set_table_id(table_id);
|
||||||
create_physical_table_metadata(
|
create_physical_table_metadata(
|
||||||
ddl_context,
|
&ddl_context,
|
||||||
create_physical_table_task.table_info.clone(),
|
create_physical_table_task.table_info.clone(),
|
||||||
TableRouteValue::Physical(table_route),
|
TableRouteValue::Physical(table_route),
|
||||||
)
|
)
|
||||||
@@ -81,7 +81,7 @@ pub async fn create_logical_table(
|
|||||||
cluster_id: ClusterId,
|
cluster_id: ClusterId,
|
||||||
physical_table_id: TableId,
|
physical_table_id: TableId,
|
||||||
table_name: &str,
|
table_name: &str,
|
||||||
) -> TableId {
|
) {
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
|
|
||||||
let tasks = vec![test_create_logical_table_task(table_name)];
|
let tasks = vec![test_create_logical_table_task(table_name)];
|
||||||
@@ -91,14 +91,6 @@ pub async fn create_logical_table(
|
|||||||
assert_matches!(status, Status::Executing { persist: true });
|
assert_matches!(status, Status::Executing { persist: true });
|
||||||
let status = procedure.on_create_metadata().await.unwrap();
|
let status = procedure.on_create_metadata().await.unwrap();
|
||||||
assert_matches!(status, Status::Done { .. });
|
assert_matches!(status, Status::Done { .. });
|
||||||
|
|
||||||
let Status::Done {
|
|
||||||
output: Some(output),
|
|
||||||
} = status
|
|
||||||
else {
|
|
||||||
panic!("Unexpected status: {:?}", status);
|
|
||||||
};
|
|
||||||
output.downcast_ref::<Vec<u32>>().unwrap()[0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
|
pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::{QueryRequest, RegionRequest};
|
||||||
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
@@ -21,13 +20,14 @@ use common_telemetry::debug;
|
|||||||
use snafu::{ResultExt, Snafu};
|
use snafu::{ResultExt, Snafu};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use crate::datanode_manager::HandleResponse;
|
||||||
use crate::error::{self, Error, Result};
|
use crate::error::{self, Error, Result};
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::test_util::MockDatanodeHandler;
|
use crate::test_util::MockDatanodeHandler;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for () {
|
impl MockDatanodeHandler for () {
|
||||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,10 +45,10 @@ pub struct DatanodeWatcher(pub mpsc::Sender<(Peer, RegionRequest)>);
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for DatanodeWatcher {
|
impl MockDatanodeHandler for DatanodeWatcher {
|
||||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
|
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
|
||||||
self.0.send((peer.clone(), request)).await.unwrap();
|
self.0.send((peer.clone(), request)).await.unwrap();
|
||||||
Ok(RegionResponse::new(0))
|
Ok(HandleResponse::new(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_query(
|
async fn handle_query(
|
||||||
@@ -65,7 +65,7 @@ pub struct RetryErrorDatanodeHandler;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
||||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
debug!("Returning retry later for request: {request:?}, peer: {peer:?}");
|
debug!("Returning retry later for request: {request:?}, peer: {peer:?}");
|
||||||
Err(Error::RetryLater {
|
Err(Error::RetryLater {
|
||||||
source: BoxedError::new(
|
source: BoxedError::new(
|
||||||
@@ -91,7 +91,7 @@ pub struct UnexpectedErrorDatanodeHandler;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for UnexpectedErrorDatanodeHandler {
|
impl MockDatanodeHandler for UnexpectedErrorDatanodeHandler {
|
||||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
|
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
|
||||||
error::UnexpectedSnafu {
|
error::UnexpectedSnafu {
|
||||||
err_msg: "mock error",
|
err_msg: "mock error",
|
||||||
@@ -135,7 +135,7 @@ impl ErrorExt for MockRequestOutdatedError {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for RequestOutdatedErrorDatanodeHandler {
|
impl MockDatanodeHandler for RequestOutdatedErrorDatanodeHandler {
|
||||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
|
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
|
||||||
Err(BoxedError::new(MockRequestOutdatedError)).context(error::ExternalSnafu)
|
Err(BoxedError::new(MockRequestOutdatedError)).context(error::ExternalSnafu)
|
||||||
}
|
}
|
||||||
@@ -154,9 +154,9 @@ pub struct NaiveDatanodeHandler;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl MockDatanodeHandler for NaiveDatanodeHandler {
|
impl MockDatanodeHandler for NaiveDatanodeHandler {
|
||||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
|
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
|
||||||
Ok(RegionResponse::new(0))
|
Ok(HandleResponse::new(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_query(
|
async fn handle_query(
|
||||||
|
|||||||
@@ -128,9 +128,9 @@ async fn test_on_prepare_different_physical_table() {
|
|||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
|
|
||||||
let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
|
let phy1_id = create_physical_table(ddl_context.clone(), cluster_id, "phy1").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
|
||||||
let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await;
|
let phy2_id = create_physical_table(ddl_context.clone(), cluster_id, "phy2").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
|
||||||
|
|
||||||
let tasks = vec![
|
let tasks = vec![
|
||||||
@@ -150,7 +150,7 @@ async fn test_on_prepare_logical_table_not_exists() {
|
|||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
|
|
||||||
// Creates physical table
|
// Creates physical table
|
||||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||||
// Creates 3 logical tables
|
// Creates 3 logical tables
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||||
|
|
||||||
@@ -172,7 +172,7 @@ async fn test_on_prepare() {
|
|||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
|
|
||||||
// Creates physical table
|
// Creates physical table
|
||||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||||
// Creates 3 logical tables
|
// Creates 3 logical tables
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||||
@@ -196,7 +196,7 @@ async fn test_on_update_metadata() {
|
|||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
|
|
||||||
// Creates physical table
|
// Creates physical table
|
||||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||||
// Creates 3 logical tables
|
// Creates 3 logical tables
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||||
@@ -233,7 +233,7 @@ async fn test_on_part_duplicate_alter_request() {
|
|||||||
let ddl_context = new_ddl_context(datanode_manager);
|
let ddl_context = new_ddl_context(datanode_manager);
|
||||||
|
|
||||||
// Creates physical table
|
// Creates physical table
|
||||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||||
// Creates 3 logical tables
|
// Creates 3 logical tables
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||||
|
|||||||
@@ -21,12 +21,9 @@ use api::v1::{ColumnDataType, SemanticType};
|
|||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||||
use common_procedure_test::{
|
use common_procedure_test::MockContextProvider;
|
||||||
execute_procedure_until, execute_procedure_until_done, MockContextProvider,
|
|
||||||
};
|
|
||||||
use store_api::storage::RegionId;
|
|
||||||
|
|
||||||
use crate::ddl::create_table::{CreateTableProcedure, CreateTableState};
|
use crate::ddl::create_table::CreateTableProcedure;
|
||||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||||
use crate::ddl::test_util::create_table::{
|
use crate::ddl::test_util::create_table::{
|
||||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||||
@@ -36,9 +33,8 @@ use crate::ddl::test_util::datanode_handler::{
|
|||||||
};
|
};
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
|
||||||
use crate::rpc::ddl::CreateTableTask;
|
use crate::rpc::ddl::CreateTableTask;
|
||||||
use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||||
|
|
||||||
fn test_create_table_task(name: &str) -> CreateTableTask {
|
fn test_create_table_task(name: &str) -> CreateTableTask {
|
||||||
let create_table = TestCreateTableExprBuilder::default()
|
let create_table = TestCreateTableExprBuilder::default()
|
||||||
@@ -248,39 +244,3 @@ async fn test_on_create_metadata() {
|
|||||||
let table_id = status.downcast_output_ref::<u32>().unwrap();
|
let table_id = status.downcast_output_ref::<u32>().unwrap();
|
||||||
assert_eq!(*table_id, 1024);
|
assert_eq!(*table_id, 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
|
||||||
let cluster_id = 1;
|
|
||||||
|
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
|
||||||
let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend);
|
|
||||||
|
|
||||||
let task = test_create_table_task("foo");
|
|
||||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
|
|
||||||
|
|
||||||
execute_procedure_until(&mut procedure, |p| {
|
|
||||||
p.creator.data.state == CreateTableState::CreateMetadata
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Ensure that after running to the state `CreateMetadata`(just past `DatanodeCreateRegions`),
|
|
||||||
// the opening regions should be recorded:
|
|
||||||
let guards = &procedure.creator.opening_regions;
|
|
||||||
assert_eq!(guards.len(), 1);
|
|
||||||
let (datanode_id, region_id) = (0, RegionId::new(procedure.table_id(), 0));
|
|
||||||
assert_eq!(guards[0].info(), (datanode_id, region_id));
|
|
||||||
assert!(ddl_context
|
|
||||||
.memory_region_keeper
|
|
||||||
.contains(datanode_id, region_id));
|
|
||||||
|
|
||||||
execute_procedure_until_done(&mut procedure).await;
|
|
||||||
|
|
||||||
// Ensure that when run to the end, the opening regions should be cleared:
|
|
||||||
let guards = &procedure.creator.opening_regions;
|
|
||||||
assert!(guards.is_empty());
|
|
||||||
assert!(!ddl_context
|
|
||||||
.memory_region_keeper
|
|
||||||
.contains(datanode_id, region_id));
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ async fn test_drop_database_with_logical_tables() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Creates physical table
|
// Creates physical table
|
||||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||||
// Creates 3 logical tables
|
// Creates 3 logical tables
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||||
@@ -91,7 +91,7 @@ async fn test_drop_database_retryable_error() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Creates physical table
|
// Creates physical table
|
||||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||||
// Creates 3 logical tables
|
// Creates 3 logical tables
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||||
|
|||||||
@@ -19,21 +19,17 @@ use api::v1::region::{region_request, RegionRequest};
|
|||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_procedure::Procedure;
|
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId};
|
||||||
use common_procedure_test::{
|
use common_procedure_test::MockContextProvider;
|
||||||
execute_procedure_until, execute_procedure_until_done, new_test_procedure_context,
|
|
||||||
};
|
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use table::metadata::TableId;
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||||
use crate::ddl::drop_table::{DropTableProcedure, DropTableState};
|
use crate::ddl::drop_table::DropTableProcedure;
|
||||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||||
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
||||||
use crate::ddl::test_util::{
|
use crate::ddl::test_util::{
|
||||||
create_logical_table, create_physical_table, create_physical_table_metadata,
|
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
|
||||||
test_create_logical_table_task, test_create_physical_table_task,
|
|
||||||
};
|
};
|
||||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
@@ -62,7 +58,14 @@ async fn test_on_prepare_table_not_exists_err() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let task = new_drop_table_task("bar", table_id, false);
|
let task = DropTableTask {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
table: "bar".to_string(),
|
||||||
|
table_id,
|
||||||
|
drop_if_exists: false,
|
||||||
|
};
|
||||||
|
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||||
let err = procedure.on_prepare().await.unwrap_err();
|
let err = procedure.on_prepare().await.unwrap_err();
|
||||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||||
@@ -87,12 +90,26 @@ async fn test_on_prepare_table() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let task = new_drop_table_task("bar", table_id, true);
|
let task = DropTableTask {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
table: "bar".to_string(),
|
||||||
|
table_id,
|
||||||
|
drop_if_exists: true,
|
||||||
|
};
|
||||||
|
|
||||||
// Drop if exists
|
// Drop if exists
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||||
procedure.on_prepare().await.unwrap();
|
procedure.on_prepare().await.unwrap();
|
||||||
|
|
||||||
let task = new_drop_table_task(table_name, table_id, false);
|
let task = DropTableTask {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
table: table_name.to_string(),
|
||||||
|
table_id,
|
||||||
|
drop_if_exists: false,
|
||||||
|
};
|
||||||
|
|
||||||
// Drop table
|
// Drop table
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||||
procedure.on_prepare().await.unwrap();
|
procedure.on_prepare().await.unwrap();
|
||||||
@@ -141,7 +158,13 @@ async fn test_on_datanode_drop_regions() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let task = new_drop_table_task(table_name, table_id, false);
|
let task = DropTableTask {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
table: table_name.to_string(),
|
||||||
|
table_id,
|
||||||
|
drop_if_exists: false,
|
||||||
|
};
|
||||||
// Drop table
|
// Drop table
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||||
procedure.on_prepare().await.unwrap();
|
procedure.on_prepare().await.unwrap();
|
||||||
@@ -211,7 +234,10 @@ async fn test_on_rollback() {
|
|||||||
ddl_context.clone(),
|
ddl_context.clone(),
|
||||||
);
|
);
|
||||||
procedure.on_prepare().await.unwrap();
|
procedure.on_prepare().await.unwrap();
|
||||||
let ctx = new_test_procedure_context();
|
let ctx = ProcedureContext {
|
||||||
|
procedure_id: ProcedureId::random(),
|
||||||
|
provider: Arc::new(MockContextProvider::default()),
|
||||||
|
};
|
||||||
procedure.execute(&ctx).await.unwrap();
|
procedure.execute(&ctx).await.unwrap();
|
||||||
// Triggers procedure to create table metadata
|
// Triggers procedure to create table metadata
|
||||||
let status = procedure.execute(&ctx).await.unwrap();
|
let status = procedure.execute(&ctx).await.unwrap();
|
||||||
@@ -221,10 +247,20 @@ async fn test_on_rollback() {
|
|||||||
let expected_kvs = kv_backend.dump();
|
let expected_kvs = kv_backend.dump();
|
||||||
// Drops the physical table
|
// Drops the physical table
|
||||||
{
|
{
|
||||||
let task = new_drop_table_task("phy_table", physical_table_id, false);
|
let task = DropTableTask {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
table: "phy_table".to_string(),
|
||||||
|
table_id: physical_table_id,
|
||||||
|
drop_if_exists: false,
|
||||||
|
};
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||||
procedure.on_prepare().await.unwrap();
|
procedure.on_prepare().await.unwrap();
|
||||||
procedure.on_delete_metadata().await.unwrap();
|
procedure.on_delete_metadata().await.unwrap();
|
||||||
|
let ctx = ProcedureContext {
|
||||||
|
procedure_id: ProcedureId::random(),
|
||||||
|
provider: Arc::new(MockContextProvider::default()),
|
||||||
|
};
|
||||||
procedure.rollback(&ctx).await.unwrap();
|
procedure.rollback(&ctx).await.unwrap();
|
||||||
// Rollback again
|
// Rollback again
|
||||||
procedure.rollback(&ctx).await.unwrap();
|
procedure.rollback(&ctx).await.unwrap();
|
||||||
@@ -233,66 +269,23 @@ async fn test_on_rollback() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Drops the logical table
|
// Drops the logical table
|
||||||
let task = new_drop_table_task("foo", table_ids[0], false);
|
let task = DropTableTask {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
table: "foo".to_string(),
|
||||||
|
table_id: table_ids[0],
|
||||||
|
drop_if_exists: false,
|
||||||
|
};
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||||
procedure.on_prepare().await.unwrap();
|
procedure.on_prepare().await.unwrap();
|
||||||
procedure.on_delete_metadata().await.unwrap();
|
procedure.on_delete_metadata().await.unwrap();
|
||||||
|
let ctx = ProcedureContext {
|
||||||
|
procedure_id: ProcedureId::random(),
|
||||||
|
provider: Arc::new(MockContextProvider::default()),
|
||||||
|
};
|
||||||
procedure.rollback(&ctx).await.unwrap();
|
procedure.rollback(&ctx).await.unwrap();
|
||||||
// Rollback again
|
// Rollback again
|
||||||
procedure.rollback(&ctx).await.unwrap();
|
procedure.rollback(&ctx).await.unwrap();
|
||||||
let kvs = kv_backend.dump();
|
let kvs = kv_backend.dump();
|
||||||
assert_eq!(kvs, expected_kvs);
|
assert_eq!(kvs, expected_kvs);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool) -> DropTableTask {
|
|
||||||
DropTableTask {
|
|
||||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
|
||||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
|
||||||
table: table_name.to_string(),
|
|
||||||
table_id,
|
|
||||||
drop_if_exists,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
|
||||||
let cluster_id = 1;
|
|
||||||
|
|
||||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
|
||||||
let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend);
|
|
||||||
|
|
||||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
|
||||||
let logical_table_id =
|
|
||||||
create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await;
|
|
||||||
|
|
||||||
let inner_test = |task: DropTableTask| async {
|
|
||||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
|
||||||
execute_procedure_until(&mut procedure, |p| {
|
|
||||||
p.data.state == DropTableState::InvalidateTableCache
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Ensure that after running to the state `InvalidateTableCache`(just past `DeleteMetadata`),
|
|
||||||
// the dropping regions should be recorded:
|
|
||||||
let guards = &procedure.dropping_regions;
|
|
||||||
assert_eq!(guards.len(), 1);
|
|
||||||
let (datanode_id, region_id) = (0, RegionId::new(physical_table_id, 0));
|
|
||||||
assert_eq!(guards[0].info(), (datanode_id, region_id));
|
|
||||||
assert!(ddl_context
|
|
||||||
.memory_region_keeper
|
|
||||||
.contains(datanode_id, region_id));
|
|
||||||
|
|
||||||
execute_procedure_until_done(&mut procedure).await;
|
|
||||||
|
|
||||||
// Ensure that when run to the end, the dropping regions should be cleared:
|
|
||||||
let guards = &procedure.dropping_regions;
|
|
||||||
assert!(guards.is_empty());
|
|
||||||
assert!(!ddl_context
|
|
||||||
.memory_region_keeper
|
|
||||||
.contains(datanode_id, region_id));
|
|
||||||
};
|
|
||||||
|
|
||||||
inner_test(new_drop_table_task("s", logical_table_id, false)).await;
|
|
||||||
inner_test(new_drop_table_task("t", physical_table_id, false)).await;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -258,7 +258,7 @@ pub enum Error {
|
|||||||
error: Utf8Error,
|
error: Utf8Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Table not found: '{}'", table_name))]
|
#[snafu(display("Table nod found, table: {}", table_name))]
|
||||||
TableNotFound {
|
TableNotFound {
|
||||||
table_name: String,
|
table_name: String,
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -421,8 +421,8 @@ pub enum Error {
|
|||||||
#[snafu(display("Invalid role: {}", role))]
|
#[snafu(display("Invalid role: {}", role))]
|
||||||
InvalidRole { role: i32, location: Location },
|
InvalidRole { role: i32, location: Location },
|
||||||
|
|
||||||
#[snafu(display("Failed to move values: {err_msg}"))]
|
#[snafu(display("Atomic key changed: {err_msg}"))]
|
||||||
MoveValues { err_msg: String, location: Location },
|
CasKeyChanged { err_msg: String, location: Location },
|
||||||
|
|
||||||
#[snafu(display("Failed to parse {} from utf8", name))]
|
#[snafu(display("Failed to parse {} from utf8", name))]
|
||||||
FromUtf8 {
|
FromUtf8 {
|
||||||
@@ -444,7 +444,7 @@ impl ErrorExt for Error {
|
|||||||
| EtcdFailed { .. }
|
| EtcdFailed { .. }
|
||||||
| EtcdTxnFailed { .. }
|
| EtcdTxnFailed { .. }
|
||||||
| ConnectEtcd { .. }
|
| ConnectEtcd { .. }
|
||||||
| MoveValues { .. } => StatusCode::Internal,
|
| CasKeyChanged { .. } => StatusCode::Internal,
|
||||||
|
|
||||||
SerdeJson { .. }
|
SerdeJson { .. }
|
||||||
| ParseOption { .. }
|
| ParseOption { .. }
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ use store_api::storage::{RegionId, RegionNumber};
|
|||||||
use strum::Display;
|
use strum::Display;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
|
||||||
use crate::key::schema_name::SchemaName;
|
|
||||||
use crate::table_name::TableName;
|
use crate::table_name::TableName;
|
||||||
use crate::{ClusterId, DatanodeId};
|
use crate::{ClusterId, DatanodeId};
|
||||||
|
|
||||||
@@ -157,7 +156,6 @@ pub struct UpgradeRegion {
|
|||||||
pub enum CacheIdent {
|
pub enum CacheIdent {
|
||||||
TableId(TableId),
|
TableId(TableId),
|
||||||
TableName(TableName),
|
TableName(TableName),
|
||||||
SchemaName(SchemaName),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Display, PartialEq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, Display, PartialEq)]
|
||||||
|
|||||||
@@ -88,13 +88,13 @@ use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
|
|||||||
use self::table_route::{TableRouteManager, TableRouteValue};
|
use self::table_route::{TableRouteManager, TableRouteValue};
|
||||||
use self::tombstone::TombstoneManager;
|
use self::tombstone::TombstoneManager;
|
||||||
use crate::ddl::utils::region_storage_path;
|
use crate::ddl::utils::region_storage_path;
|
||||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
use crate::error::{self, Result, SerdeJsonSnafu, UnexpectedSnafu};
|
||||||
use crate::key::table_route::TableRouteKey;
|
use crate::key::table_route::TableRouteKey;
|
||||||
|
use crate::key::tombstone::Key;
|
||||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||||
use crate::rpc::store::BatchDeleteRequest;
|
|
||||||
use crate::table_name::TableName;
|
use crate::table_name::TableName;
|
||||||
use crate::DatanodeId;
|
use crate::DatanodeId;
|
||||||
|
|
||||||
@@ -466,18 +466,17 @@ impl TableMetadataManager {
|
|||||||
txn = txn.merge(create_datanode_table_txn);
|
txn = txn.merge(create_datanode_table_txn);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut r = self.kv_backend.txn(txn).await?;
|
let r = self.kv_backend.txn(txn).await?;
|
||||||
|
|
||||||
// Checks whether metadata was already created.
|
// Checks whether metadata was already created.
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
let remote_table_info = on_create_table_info_failure(&r.responses)?
|
||||||
let remote_table_info = on_create_table_info_failure(&mut set)?
|
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table info during the create table metadata",
|
err_msg: "Reads the empty table info during the create table metadata",
|
||||||
})?
|
})?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
let remote_table_route = on_create_table_route_failure(&mut set)?
|
let remote_table_route = on_create_table_route_failure(&r.responses)?
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table route during the create table metadata",
|
err_msg: "Reads the empty table route during the create table metadata",
|
||||||
})?
|
})?
|
||||||
@@ -506,8 +505,8 @@ impl TableMetadataManager {
|
|||||||
let mut txns = Vec::with_capacity(3 * len);
|
let mut txns = Vec::with_capacity(3 * len);
|
||||||
struct OnFailure<F1, R1, F2, R2>
|
struct OnFailure<F1, R1, F2, R2>
|
||||||
where
|
where
|
||||||
F1: FnOnce(&mut TxnOpGetResponseSet) -> R1,
|
F1: FnOnce(&Vec<TxnOpResponse>) -> R1,
|
||||||
F2: FnOnce(&mut TxnOpGetResponseSet) -> R2,
|
F2: FnOnce(&Vec<TxnOpResponse>) -> R2,
|
||||||
{
|
{
|
||||||
table_info_value: TableInfoValue,
|
table_info_value: TableInfoValue,
|
||||||
on_create_table_info_failure: F1,
|
on_create_table_info_failure: F1,
|
||||||
@@ -552,19 +551,18 @@ impl TableMetadataManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let txn = Txn::merge_all(txns);
|
let txn = Txn::merge_all(txns);
|
||||||
let mut r = self.kv_backend.txn(txn).await?;
|
let r = self.kv_backend.txn(txn).await?;
|
||||||
|
|
||||||
// Checks whether metadata was already created.
|
// Checks whether metadata was already created.
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
|
||||||
for on_failure in on_failures {
|
for on_failure in on_failures {
|
||||||
let remote_table_info = (on_failure.on_create_table_info_failure)(&mut set)?
|
let remote_table_info = (on_failure.on_create_table_info_failure)(&r.responses)?
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table info during the create table metadata",
|
err_msg: "Reads the empty table info during the create table metadata",
|
||||||
})?
|
})?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
let remote_table_route = (on_failure.on_create_table_route_failure)(&mut set)?
|
let remote_table_route = (on_failure.on_create_table_route_failure)(&r.responses)?
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table route during the create table metadata",
|
err_msg: "Reads the empty table route during the create table metadata",
|
||||||
})?
|
})?
|
||||||
@@ -584,7 +582,7 @@ impl TableMetadataManager {
|
|||||||
table_id: TableId,
|
table_id: TableId,
|
||||||
table_name: &TableName,
|
table_name: &TableName,
|
||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<Vec<Vec<u8>>> {
|
) -> Result<Vec<Key>> {
|
||||||
// Builds keys
|
// Builds keys
|
||||||
let datanode_ids = if table_route_value.is_physical() {
|
let datanode_ids = if table_route_value.is_physical() {
|
||||||
region_distribution(table_route_value.region_routes()?)
|
region_distribution(table_route_value.region_routes()?)
|
||||||
@@ -606,11 +604,11 @@ impl TableMetadataManager {
|
|||||||
.map(|datanode_id| DatanodeTableKey::new(datanode_id, table_id))
|
.map(|datanode_id| DatanodeTableKey::new(datanode_id, table_id))
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
keys.push(table_name.as_raw_key());
|
keys.push(Key::compare_and_swap(table_name.as_raw_key()));
|
||||||
keys.push(table_info_key.as_raw_key());
|
keys.push(Key::new(table_info_key.as_raw_key()));
|
||||||
keys.push(table_route_key.as_raw_key());
|
keys.push(Key::new(table_route_key.as_raw_key()));
|
||||||
for key in &datanode_table_keys {
|
for key in &datanode_table_keys {
|
||||||
keys.push(key.as_raw_key());
|
keys.push(Key::new(key.as_raw_key()));
|
||||||
}
|
}
|
||||||
Ok(keys)
|
Ok(keys)
|
||||||
}
|
}
|
||||||
@@ -624,7 +622,8 @@ impl TableMetadataManager {
|
|||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||||
self.tombstone_manager.create(keys).await
|
self.tombstone_manager.create(keys).await?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes metadata tombstone for table **permanently**.
|
/// Deletes metadata tombstone for table **permanently**.
|
||||||
@@ -635,7 +634,11 @@ impl TableMetadataManager {
|
|||||||
table_name: &TableName,
|
table_name: &TableName,
|
||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
let keys = self
|
||||||
|
.table_metadata_keys(table_id, table_name, table_route_value)?
|
||||||
|
.into_iter()
|
||||||
|
.map(|key| key.into_bytes())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
self.tombstone_manager.delete(keys).await
|
self.tombstone_manager.delete(keys).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -648,7 +651,8 @@ impl TableMetadataManager {
|
|||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||||
self.tombstone_manager.restore(keys).await
|
self.tombstone_manager.restore(keys).await?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes metadata for table **permanently**.
|
/// Deletes metadata for table **permanently**.
|
||||||
@@ -659,11 +663,21 @@ impl TableMetadataManager {
|
|||||||
table_name: &TableName,
|
table_name: &TableName,
|
||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
let operations = self
|
||||||
let _ = self
|
.table_metadata_keys(table_id, table_name, table_route_value)?
|
||||||
.kv_backend
|
.into_iter()
|
||||||
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
.map(|key| TxnOp::Delete(key.into_bytes()))
|
||||||
.await?;
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let txn = Txn::new().and_then(operations);
|
||||||
|
let resp = self.kv_backend.txn(txn).await?;
|
||||||
|
ensure!(
|
||||||
|
resp.succeeded,
|
||||||
|
UnexpectedSnafu {
|
||||||
|
err_msg: format!("Failed to destroy table metadata: {table_id}")
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -710,12 +724,11 @@ impl TableMetadataManager {
|
|||||||
|
|
||||||
let txn = Txn::merge_all(vec![update_table_name_txn, update_table_info_txn]);
|
let txn = Txn::merge_all(vec![update_table_name_txn, update_table_info_txn]);
|
||||||
|
|
||||||
let mut r = self.kv_backend.txn(txn).await?;
|
let r = self.kv_backend.txn(txn).await?;
|
||||||
|
|
||||||
// Checks whether metadata was already updated.
|
// Checks whether metadata was already updated.
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
let remote_table_info = on_update_table_info_failure(&r.responses)?
|
||||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table info during the rename table metadata",
|
err_msg: "Reads the empty table info during the rename table metadata",
|
||||||
})?
|
})?
|
||||||
@@ -743,12 +756,11 @@ impl TableMetadataManager {
|
|||||||
.table_info_manager()
|
.table_info_manager()
|
||||||
.build_update_txn(table_id, current_table_info_value, &new_table_info_value)?;
|
.build_update_txn(table_id, current_table_info_value, &new_table_info_value)?;
|
||||||
|
|
||||||
let mut r = self.kv_backend.txn(update_table_info_txn).await?;
|
let r = self.kv_backend.txn(update_table_info_txn).await?;
|
||||||
|
|
||||||
// Checks whether metadata was already updated.
|
// Checks whether metadata was already updated.
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
let remote_table_info = on_update_table_info_failure(&r.responses)?
|
||||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table info during the updating table info",
|
err_msg: "Reads the empty table info during the updating table info",
|
||||||
})?
|
})?
|
||||||
@@ -772,7 +784,7 @@ impl TableMetadataManager {
|
|||||||
let mut txns = Vec::with_capacity(len);
|
let mut txns = Vec::with_capacity(len);
|
||||||
struct OnFailure<F, R>
|
struct OnFailure<F, R>
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut TxnOpGetResponseSet) -> R,
|
F: FnOnce(&Vec<TxnOpResponse>) -> R,
|
||||||
{
|
{
|
||||||
table_info_value: TableInfoValue,
|
table_info_value: TableInfoValue,
|
||||||
on_update_table_info_failure: F,
|
on_update_table_info_failure: F,
|
||||||
@@ -800,12 +812,11 @@ impl TableMetadataManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let txn = Txn::merge_all(txns);
|
let txn = Txn::merge_all(txns);
|
||||||
let mut r = self.kv_backend.txn(txn).await?;
|
let r = self.kv_backend.txn(txn).await?;
|
||||||
|
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
|
||||||
for on_failure in on_failures {
|
for on_failure in on_failures {
|
||||||
let remote_table_info = (on_failure.on_update_table_info_failure)(&mut set)?
|
let remote_table_info = (on_failure.on_update_table_info_failure)(&r.responses)?
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table info during the updating table info",
|
err_msg: "Reads the empty table info during the updating table info",
|
||||||
})?
|
})?
|
||||||
@@ -852,12 +863,11 @@ impl TableMetadataManager {
|
|||||||
|
|
||||||
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
|
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
|
||||||
|
|
||||||
let mut r = self.kv_backend.txn(txn).await?;
|
let r = self.kv_backend.txn(txn).await?;
|
||||||
|
|
||||||
// Checks whether metadata was already updated.
|
// Checks whether metadata was already updated.
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table route during the updating table route",
|
err_msg: "Reads the empty table route during the updating table route",
|
||||||
})?
|
})?
|
||||||
@@ -904,12 +914,11 @@ impl TableMetadataManager {
|
|||||||
.table_route_storage()
|
.table_route_storage()
|
||||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||||
|
|
||||||
let mut r = self.kv_backend.txn(update_table_route_txn).await?;
|
let r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||||
|
|
||||||
// Checks whether metadata was already updated.
|
// Checks whether metadata was already updated.
|
||||||
if !r.succeeded {
|
if !r.succeeded {
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
|
||||||
.context(error::UnexpectedSnafu {
|
.context(error::UnexpectedSnafu {
|
||||||
err_msg: "Reads the empty table route during the updating leader region status",
|
err_msg: "Reads the empty table route during the updating leader region status",
|
||||||
})?
|
})?
|
||||||
@@ -1277,17 +1286,14 @@ mod tests {
|
|||||||
.delete_table_metadata(table_id, &table_name, table_route_value)
|
.delete_table_metadata(table_id, &table_name, table_route_value)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Should be ignored.
|
|
||||||
table_metadata_manager
|
|
||||||
.delete_table_metadata(table_id, &table_name, table_route_value)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(table_metadata_manager
|
assert!(table_metadata_manager
|
||||||
.table_info_manager()
|
.table_info_manager()
|
||||||
.get(table_id)
|
.get(table_id)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_none());
|
.is_none());
|
||||||
|
|
||||||
assert!(table_metadata_manager
|
assert!(table_metadata_manager
|
||||||
.table_route_manager()
|
.table_route_manager()
|
||||||
.table_route_storage()
|
.table_route_storage()
|
||||||
@@ -1295,6 +1301,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_none());
|
.is_none());
|
||||||
|
|
||||||
assert!(table_metadata_manager
|
assert!(table_metadata_manager
|
||||||
.datanode_table_manager()
|
.datanode_table_manager()
|
||||||
.tables(datanode_id)
|
.tables(datanode_id)
|
||||||
@@ -1309,6 +1316,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(table_info.is_none());
|
assert!(table_info.is_none());
|
||||||
|
|
||||||
let table_route = table_metadata_manager
|
let table_route = table_metadata_manager
|
||||||
.table_route_manager()
|
.table_route_manager()
|
||||||
.table_route_storage()
|
.table_route_storage()
|
||||||
@@ -1784,12 +1792,5 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let kvs = mem_kv.dump();
|
let kvs = mem_kv.dump();
|
||||||
assert_eq!(kvs, expected_result);
|
assert_eq!(kvs, expected_result);
|
||||||
// Should be ignored.
|
|
||||||
table_metadata_manager
|
|
||||||
.restore_table_metadata(table_id, &table_name, &table_route_value)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let kvs = mem_kv.dump();
|
|
||||||
assert_eq!(kvs, expected_result);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
|
use futures::StreamExt;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
@@ -83,11 +84,11 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decoder `KeyValue` to ({catalog},())
|
/// Decoder `KeyValue` to ({catalog},())
|
||||||
pub fn catalog_decoder(kv: KeyValue) -> Result<String> {
|
pub fn catalog_decoder(kv: KeyValue) -> Result<(String, ())> {
|
||||||
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
|
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
|
||||||
let catalog_name = CatalogNameKey::try_from(str)?;
|
let catalog_name = CatalogNameKey::try_from(str)?;
|
||||||
|
|
||||||
Ok(catalog_name.catalog.to_string())
|
Ok((catalog_name.catalog.to_string(), ()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct CatalogManager {
|
pub struct CatalogManager {
|
||||||
@@ -133,7 +134,7 @@ impl CatalogManager {
|
|||||||
Arc::new(catalog_decoder),
|
Arc::new(catalog_decoder),
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::pin(stream)
|
Box::pin(stream.map(|kv| kv.map(|kv| kv.0)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
|
use futures::StreamExt;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use store_api::storage::RegionNumber;
|
use store_api::storage::RegionNumber;
|
||||||
@@ -125,8 +126,10 @@ impl DatanodeTableValue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decodes `KeyValue` to ((),`DatanodeTableValue`)
|
/// Decodes `KeyValue` to ((),`DatanodeTableValue`)
|
||||||
pub fn datanode_table_value_decoder(kv: KeyValue) -> Result<DatanodeTableValue> {
|
pub fn datanode_table_value_decoder(kv: KeyValue) -> Result<((), DatanodeTableValue)> {
|
||||||
DatanodeTableValue::try_from_raw_value(&kv.value)
|
let value = DatanodeTableValue::try_from_raw_value(&kv.value)?;
|
||||||
|
|
||||||
|
Ok(((), value))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct DatanodeTableManager {
|
pub struct DatanodeTableManager {
|
||||||
@@ -160,7 +163,7 @@ impl DatanodeTableManager {
|
|||||||
Arc::new(datanode_table_value_decoder),
|
Arc::new(datanode_table_value_decoder),
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::pin(stream)
|
Box::pin(stream.map(|kv| kv.map(|kv| kv.1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds the create datanode table transactions. It only executes while the primary keys comparing successes.
|
/// Builds the create datanode table transactions. It only executes while the primary keys comparing successes.
|
||||||
@@ -237,14 +240,10 @@ impl DatanodeTableManager {
|
|||||||
// FIXME(weny): add unit tests.
|
// FIXME(weny): add unit tests.
|
||||||
let mut new_region_info = region_info.clone();
|
let mut new_region_info = region_info.clone();
|
||||||
if need_update_options {
|
if need_update_options {
|
||||||
new_region_info
|
new_region_info.region_options = new_region_options.clone();
|
||||||
.region_options
|
|
||||||
.clone_from(new_region_options);
|
|
||||||
}
|
}
|
||||||
if need_update_wal_options {
|
if need_update_wal_options {
|
||||||
new_region_info
|
new_region_info.region_wal_options = new_region_wal_options.clone();
|
||||||
.region_wal_options
|
|
||||||
.clone_from(new_region_wal_options);
|
|
||||||
}
|
}
|
||||||
let val = DatanodeTableValue::new(table_id, regions, new_region_info)
|
let val = DatanodeTableValue::new(table_id, regions, new_region_info)
|
||||||
.try_as_raw_value()?;
|
.try_as_raw_value()?;
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use futures::stream::BoxStream;
|
use futures::stream::BoxStream;
|
||||||
|
use futures::StreamExt;
|
||||||
use humantime_serde::re::humantime;
|
use humantime_serde::re::humantime;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -102,11 +103,11 @@ impl TableMetaKey for SchemaNameKey<'_> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decodes `KeyValue` to ({schema},())
|
/// Decodes `KeyValue` to ({schema},())
|
||||||
pub fn schema_decoder(kv: KeyValue) -> Result<String> {
|
pub fn schema_decoder(kv: KeyValue) -> Result<(String, ())> {
|
||||||
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
|
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
|
||||||
let schema_name = SchemaNameKey::try_from(str)?;
|
let schema_name = SchemaNameKey::try_from(str)?;
|
||||||
|
|
||||||
Ok(schema_name.schema.to_string())
|
Ok((schema_name.schema.to_string(), ()))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
|
impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
|
||||||
@@ -192,22 +193,7 @@ impl SchemaManager {
|
|||||||
Arc::new(schema_decoder),
|
Arc::new(schema_decoder),
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::pin(stream)
|
Box::pin(stream.map(|kv| kv.map(|kv| kv.0)))
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
|
|
||||||
pub struct SchemaName {
|
|
||||||
pub catalog_name: String,
|
|
||||||
pub schema_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a SchemaName> for SchemaNameKey<'a> {
|
|
||||||
fn from(value: &'a SchemaName) -> Self {
|
|
||||||
Self {
|
|
||||||
catalog: &value.catalog_name,
|
|
||||||
schema: &value.schema_name,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,12 +18,11 @@ use serde::{Deserialize, Serialize};
|
|||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
use super::txn_helper::TxnOpGetResponseSet;
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::key::{
|
use crate::key::{
|
||||||
txn_helper, DeserializedValueWithBytes, TableMetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX,
|
txn_helper, DeserializedValueWithBytes, TableMetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX,
|
||||||
};
|
};
|
||||||
use crate::kv_backend::txn::Txn;
|
use crate::kv_backend::txn::{Txn, TxnOpResponse};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::store::BatchGetRequest;
|
use crate::rpc::store::BatchGetRequest;
|
||||||
use crate::table_name::TableName;
|
use crate::table_name::TableName;
|
||||||
@@ -110,9 +109,7 @@ impl TableInfoManager {
|
|||||||
table_info_value: &TableInfoValue,
|
table_info_value: &TableInfoValue,
|
||||||
) -> Result<(
|
) -> Result<(
|
||||||
Txn,
|
Txn,
|
||||||
impl FnOnce(
|
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||||
&mut TxnOpGetResponseSet,
|
|
||||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
|
||||||
)> {
|
)> {
|
||||||
let key = TableInfoKey::new(table_id);
|
let key = TableInfoKey::new(table_id);
|
||||||
let raw_key = key.as_raw_key();
|
let raw_key = key.as_raw_key();
|
||||||
@@ -122,10 +119,7 @@ impl TableInfoManager {
|
|||||||
table_info_value.try_as_raw_value()?,
|
table_info_value.try_as_raw_value()?,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((
|
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||||
txn,
|
|
||||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a update table info transaction, it expected the remote value equals the `current_current_table_info_value`.
|
/// Builds a update table info transaction, it expected the remote value equals the `current_current_table_info_value`.
|
||||||
@@ -137,9 +131,7 @@ impl TableInfoManager {
|
|||||||
new_table_info_value: &TableInfoValue,
|
new_table_info_value: &TableInfoValue,
|
||||||
) -> Result<(
|
) -> Result<(
|
||||||
Txn,
|
Txn,
|
||||||
impl FnOnce(
|
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||||
&mut TxnOpGetResponseSet,
|
|
||||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
|
||||||
)> {
|
)> {
|
||||||
let key = TableInfoKey::new(table_id);
|
let key = TableInfoKey::new(table_id);
|
||||||
let raw_key = key.as_raw_key();
|
let raw_key = key.as_raw_key();
|
||||||
@@ -148,10 +140,7 @@ impl TableInfoManager {
|
|||||||
|
|
||||||
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
||||||
|
|
||||||
Ok((
|
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||||
txn,
|
|
||||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(
|
pub async fn get(
|
||||||
|
|||||||
@@ -20,16 +20,13 @@ use snafu::{ensure, OptionExt, ResultExt};
|
|||||||
use store_api::storage::{RegionId, RegionNumber};
|
use store_api::storage::{RegionId, RegionNumber};
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
|
||||||
|
use super::{txn_helper, DeserializedValueWithBytes, TableMetaValue};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
|
self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
|
||||||
UnexpectedLogicalRouteTableSnafu,
|
UnexpectedLogicalRouteTableSnafu,
|
||||||
};
|
};
|
||||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
use crate::key::{RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
|
||||||
use crate::key::{
|
use crate::kv_backend::txn::{Txn, TxnOpResponse};
|
||||||
txn_helper, DeserializedValueWithBytes, RegionDistribution, TableMetaKey, TableMetaValue,
|
|
||||||
TABLE_ROUTE_PREFIX,
|
|
||||||
};
|
|
||||||
use crate::kv_backend::txn::Txn;
|
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::router::{region_distribution, RegionRoute};
|
use crate::rpc::router::{region_distribution, RegionRoute};
|
||||||
use crate::rpc::store::BatchGetRequest;
|
use crate::rpc::store::BatchGetRequest;
|
||||||
@@ -457,9 +454,7 @@ impl TableRouteStorage {
|
|||||||
table_route_value: &TableRouteValue,
|
table_route_value: &TableRouteValue,
|
||||||
) -> Result<(
|
) -> Result<(
|
||||||
Txn,
|
Txn,
|
||||||
impl FnOnce(
|
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||||
&mut TxnOpGetResponseSet,
|
|
||||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
|
||||||
)> {
|
)> {
|
||||||
let key = TableRouteKey::new(table_id);
|
let key = TableRouteKey::new(table_id);
|
||||||
let raw_key = key.as_raw_key();
|
let raw_key = key.as_raw_key();
|
||||||
@@ -469,10 +464,7 @@ impl TableRouteStorage {
|
|||||||
table_route_value.try_as_raw_value()?,
|
table_route_value.try_as_raw_value()?,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((
|
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||||
txn,
|
|
||||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a update table route transaction,
|
/// Builds a update table route transaction,
|
||||||
@@ -485,9 +477,7 @@ impl TableRouteStorage {
|
|||||||
new_table_route_value: &TableRouteValue,
|
new_table_route_value: &TableRouteValue,
|
||||||
) -> Result<(
|
) -> Result<(
|
||||||
Txn,
|
Txn,
|
||||||
impl FnOnce(
|
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||||
&mut TxnOpGetResponseSet,
|
|
||||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
|
||||||
)> {
|
)> {
|
||||||
let key = TableRouteKey::new(table_id);
|
let key = TableRouteKey::new(table_id);
|
||||||
let raw_key = key.as_raw_key();
|
let raw_key = key.as_raw_key();
|
||||||
@@ -496,10 +486,7 @@ impl TableRouteStorage {
|
|||||||
|
|
||||||
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
||||||
|
|
||||||
Ok((
|
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||||
txn,
|
|
||||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [`TableRouteValue`].
|
/// Returns the [`TableRouteValue`].
|
||||||
|
|||||||
@@ -12,15 +12,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use snafu::{ensure, OptionExt};
|
||||||
|
|
||||||
use snafu::ensure;
|
|
||||||
|
|
||||||
|
use super::TableMetaKeyGetTxnOp;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::store::BatchGetRequest;
|
|
||||||
|
|
||||||
/// [TombstoneManager] provides the ability to:
|
/// [TombstoneManager] provides the ability to:
|
||||||
/// - logically delete values
|
/// - logically delete values
|
||||||
@@ -31,160 +29,307 @@ pub(crate) struct TombstoneManager {
|
|||||||
|
|
||||||
const TOMBSTONE_PREFIX: &str = "__tombstone/";
|
const TOMBSTONE_PREFIX: &str = "__tombstone/";
|
||||||
|
|
||||||
pub(crate) struct TombstoneKeyValue {
|
pub(crate) struct TombstoneKey<T>(T);
|
||||||
pub(crate) origin_key: Vec<u8>,
|
|
||||||
pub(crate) tombstone_key: Vec<u8>,
|
|
||||||
pub(crate) value: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_tombstone(key: &[u8]) -> Vec<u8> {
|
fn to_tombstone(key: &[u8]) -> Vec<u8> {
|
||||||
[TOMBSTONE_PREFIX.as_bytes(), key].concat()
|
[TOMBSTONE_PREFIX.as_bytes(), key].concat()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TombstoneKey<&Vec<u8>> {
|
||||||
|
/// Returns the origin key and tombstone key.
|
||||||
|
fn to_keys(&self) -> (Vec<u8>, Vec<u8>) {
|
||||||
|
let key = self.0;
|
||||||
|
let tombstone_key = to_tombstone(key);
|
||||||
|
(key.clone(), tombstone_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the origin key and tombstone key.
|
||||||
|
fn into_keys(self) -> (Vec<u8>, Vec<u8>) {
|
||||||
|
self.to_keys()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the tombstone key.
|
||||||
|
fn to_tombstone_key(&self) -> Vec<u8> {
|
||||||
|
let key = self.0;
|
||||||
|
to_tombstone(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TableMetaKeyGetTxnOp for TombstoneKey<&Vec<u8>> {
|
||||||
|
fn build_get_op(
|
||||||
|
&self,
|
||||||
|
) -> (
|
||||||
|
TxnOp,
|
||||||
|
impl FnMut(&'_ mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||||
|
) {
|
||||||
|
TxnOpGetResponseSet::build_get_op(to_tombstone(self.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The key used in the [TombstoneManager].
|
||||||
|
pub(crate) struct Key {
|
||||||
|
bytes: Vec<u8>,
|
||||||
|
// Atomic Key:
|
||||||
|
// The value corresponding to the key remains consistent between two transactions.
|
||||||
|
atomic: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Key {
|
||||||
|
/// Returns a new atomic key.
|
||||||
|
pub(crate) fn compare_and_swap<T: Into<Vec<u8>>>(key: T) -> Self {
|
||||||
|
Self {
|
||||||
|
bytes: key.into(),
|
||||||
|
atomic: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a new normal key.
|
||||||
|
pub(crate) fn new<T: Into<Vec<u8>>>(key: T) -> Self {
|
||||||
|
Self {
|
||||||
|
bytes: key.into(),
|
||||||
|
atomic: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Into bytes
|
||||||
|
pub(crate) fn into_bytes(self) -> Vec<u8> {
|
||||||
|
self.bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_inner(&self) -> &Vec<u8> {
|
||||||
|
&self.bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_atomic(&self) -> bool {
|
||||||
|
self.atomic
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TableMetaKeyGetTxnOp for Key {
|
||||||
|
fn build_get_op(
|
||||||
|
&self,
|
||||||
|
) -> (
|
||||||
|
TxnOp,
|
||||||
|
impl FnMut(&'_ mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||||
|
) {
|
||||||
|
let key = self.get_inner().clone();
|
||||||
|
(TxnOp::Get(key.clone()), TxnOpGetResponseSet::filter(key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_on_failure_error_message<F: FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>>(
|
||||||
|
mut set: TxnOpGetResponseSet,
|
||||||
|
on_failure_kv_and_filters: Vec<(Vec<u8>, Vec<u8>, F)>,
|
||||||
|
) -> String {
|
||||||
|
on_failure_kv_and_filters
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|(key, value, mut filter)| {
|
||||||
|
let got = filter(&mut set);
|
||||||
|
let Some(got) = got else {
|
||||||
|
return Some(format!(
|
||||||
|
"For key: {} was expected: {}, but value does not exists",
|
||||||
|
String::from_utf8_lossy(&key),
|
||||||
|
String::from_utf8_lossy(&value),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
if got != value {
|
||||||
|
Some(format!(
|
||||||
|
"For key: {} was expected: {}, but got: {}",
|
||||||
|
String::from_utf8_lossy(&key),
|
||||||
|
String::from_utf8_lossy(&value),
|
||||||
|
String::from_utf8_lossy(&got),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("; ")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_keys(keys: &[Key]) -> String {
|
||||||
|
keys.iter()
|
||||||
|
.map(|key| String::from_utf8_lossy(&key.bytes))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(", ")
|
||||||
|
}
|
||||||
|
|
||||||
impl TombstoneManager {
|
impl TombstoneManager {
|
||||||
/// Returns [TombstoneManager].
|
/// Returns [TombstoneManager].
|
||||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||||
Self { kv_backend }
|
Self { kv_backend }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Moves value to `dest_key`.
|
|
||||||
///
|
|
||||||
/// Puts `value` to `dest_key` if the value of `src_key` equals `value`.
|
|
||||||
///
|
|
||||||
/// Otherwise retrieves the value of `src_key`.
|
|
||||||
fn build_move_value_txn(
|
|
||||||
&self,
|
|
||||||
src_key: Vec<u8>,
|
|
||||||
value: Vec<u8>,
|
|
||||||
dest_key: Vec<u8>,
|
|
||||||
) -> (Txn, impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>) {
|
|
||||||
let txn = Txn::new()
|
|
||||||
.when(vec![Compare::with_value(
|
|
||||||
src_key.clone(),
|
|
||||||
CompareOp::Equal,
|
|
||||||
value.clone(),
|
|
||||||
)])
|
|
||||||
.and_then(vec![
|
|
||||||
TxnOp::Put(dest_key.clone(), value.clone()),
|
|
||||||
TxnOp::Delete(src_key.clone()),
|
|
||||||
])
|
|
||||||
.or_else(vec![TxnOp::Get(src_key.clone())]);
|
|
||||||
|
|
||||||
(txn, TxnOpGetResponseSet::filter(src_key))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn move_values_inner(&self, keys: &[Vec<u8>], dest_keys: &[Vec<u8>]) -> Result<()> {
|
|
||||||
ensure!(
|
|
||||||
keys.len() == dest_keys.len(),
|
|
||||||
error::UnexpectedSnafu {
|
|
||||||
err_msg: "The length of keys does not match the length of dest_keys."
|
|
||||||
}
|
|
||||||
);
|
|
||||||
// The key -> dest key mapping.
|
|
||||||
let lookup_table = keys.iter().zip(dest_keys.iter()).collect::<HashMap<_, _>>();
|
|
||||||
|
|
||||||
let resp = self
|
|
||||||
.kv_backend
|
|
||||||
.batch_get(BatchGetRequest::new().with_keys(keys.to_vec()))
|
|
||||||
.await?;
|
|
||||||
let mut results = resp
|
|
||||||
.kvs
|
|
||||||
.into_iter()
|
|
||||||
.map(|kv| (kv.key, kv.value))
|
|
||||||
.collect::<HashMap<_, _>>();
|
|
||||||
|
|
||||||
const MAX_RETRIES: usize = 8;
|
|
||||||
for _ in 0..MAX_RETRIES {
|
|
||||||
let (txns, (keys, filters)): (Vec<_>, (Vec<_>, Vec<_>)) = results
|
|
||||||
.iter()
|
|
||||||
.map(|(key, value)| {
|
|
||||||
let (txn, filter) = self.build_move_value_txn(
|
|
||||||
key.clone(),
|
|
||||||
value.clone(),
|
|
||||||
lookup_table[&key].clone(),
|
|
||||||
);
|
|
||||||
(txn, (key.clone(), filter))
|
|
||||||
})
|
|
||||||
.unzip();
|
|
||||||
let mut resp = self.kv_backend.txn(Txn::merge_all(txns)).await?;
|
|
||||||
if resp.succeeded {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
|
||||||
// Updates results.
|
|
||||||
for (idx, mut filter) in filters.into_iter().enumerate() {
|
|
||||||
if let Some(value) = filter(&mut set) {
|
|
||||||
results.insert(keys[idx].clone(), value);
|
|
||||||
} else {
|
|
||||||
results.remove(&keys[idx]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
error::MoveValuesSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"keys: {:?}",
|
|
||||||
keys.iter().map(|key| String::from_utf8_lossy(key)),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Moves values to `dest_key`.
|
|
||||||
async fn move_values(&self, keys: Vec<Vec<u8>>, dest_keys: Vec<Vec<u8>>) -> Result<()> {
|
|
||||||
let chunk_size = self.kv_backend.max_txn_ops() / 2;
|
|
||||||
if keys.len() > chunk_size {
|
|
||||||
let keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
|
||||||
let dest_keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
|
||||||
for (keys, dest_keys) in keys_chunks.into_iter().zip(dest_keys_chunks) {
|
|
||||||
self.move_values_inner(keys, dest_keys).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
self.move_values_inner(&keys, &dest_keys).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates tombstones for keys.
|
/// Creates tombstones for keys.
|
||||||
///
|
///
|
||||||
/// Preforms to:
|
/// Preforms to:
|
||||||
/// - deletes origin values.
|
/// - retrieve all values corresponding `keys`.
|
||||||
/// - stores tombstone values.
|
/// - stores tombstone values.
|
||||||
pub(crate) async fn create(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
pub(crate) async fn create(&self, keys: Vec<Key>) -> Result<()> {
|
||||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = keys
|
// Builds transaction to retrieve all values
|
||||||
.into_iter()
|
let (operations, mut filters): (Vec<_>, Vec<_>) =
|
||||||
.map(|key| {
|
keys.iter().map(|key| key.build_get_op()).unzip();
|
||||||
let tombstone_key = to_tombstone(&key);
|
|
||||||
(key, tombstone_key)
|
|
||||||
})
|
|
||||||
.unzip();
|
|
||||||
|
|
||||||
self.move_values(keys, dest_keys).await
|
let txn = Txn::new().and_then(operations);
|
||||||
|
let mut resp = self.kv_backend.txn(txn).await?;
|
||||||
|
ensure!(
|
||||||
|
resp.succeeded,
|
||||||
|
error::UnexpectedSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"Failed to retrieves the metadata, keys: {}",
|
||||||
|
format_keys(&keys)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||||
|
// Builds the create tombstone transaction.
|
||||||
|
let mut tombstone_operations = Vec::with_capacity(keys.len() * 2);
|
||||||
|
let mut tombstone_comparison = vec![];
|
||||||
|
let mut on_failure_operations = vec![];
|
||||||
|
let mut on_failure_kv_and_filters = vec![];
|
||||||
|
for (idx, key) in keys.iter().enumerate() {
|
||||||
|
let filter = &mut filters[idx];
|
||||||
|
let value = filter(&mut set).with_context(|| error::UnexpectedSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"Missing value, key: {}",
|
||||||
|
String::from_utf8_lossy(key.get_inner())
|
||||||
|
),
|
||||||
|
})?;
|
||||||
|
let (origin_key, tombstone_key) = TombstoneKey(key.get_inner()).into_keys();
|
||||||
|
// Compares the atomic key.
|
||||||
|
if key.is_atomic() {
|
||||||
|
tombstone_comparison.push(Compare::with_not_exist_value(
|
||||||
|
tombstone_key.clone(),
|
||||||
|
CompareOp::Equal,
|
||||||
|
));
|
||||||
|
tombstone_comparison.push(Compare::with_value(
|
||||||
|
origin_key.clone(),
|
||||||
|
CompareOp::Equal,
|
||||||
|
value.clone(),
|
||||||
|
));
|
||||||
|
let (op, filter) = TxnOpGetResponseSet::build_get_op(origin_key.clone());
|
||||||
|
on_failure_operations.push(op);
|
||||||
|
on_failure_kv_and_filters.push((origin_key.clone(), value.clone(), filter));
|
||||||
|
}
|
||||||
|
tombstone_operations.push(TxnOp::Delete(origin_key));
|
||||||
|
tombstone_operations.push(TxnOp::Put(tombstone_key, value));
|
||||||
|
}
|
||||||
|
|
||||||
|
let txn = if !tombstone_comparison.is_empty() {
|
||||||
|
Txn::new().when(tombstone_comparison)
|
||||||
|
} else {
|
||||||
|
Txn::new()
|
||||||
|
}
|
||||||
|
.and_then(tombstone_operations);
|
||||||
|
|
||||||
|
let txn = if !on_failure_operations.is_empty() {
|
||||||
|
txn.or_else(on_failure_operations)
|
||||||
|
} else {
|
||||||
|
txn
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut resp = self.kv_backend.txn(txn).await?;
|
||||||
|
// TODO(weny): add tests for atomic key changed.
|
||||||
|
if !resp.succeeded {
|
||||||
|
let set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||||
|
let err_msg = format_on_failure_error_message(set, on_failure_kv_and_filters);
|
||||||
|
return error::CasKeyChangedSnafu { err_msg }.fail();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restores tombstones for keys.
|
/// Restores tombstones for keys.
|
||||||
///
|
///
|
||||||
/// Preforms to:
|
/// Preforms to:
|
||||||
/// - restore origin value.
|
/// - retrieve all tombstone values corresponding `keys`.
|
||||||
/// - deletes tombstone values.
|
/// - stores tombstone values.
|
||||||
pub(crate) async fn restore(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
pub(crate) async fn restore(&self, keys: Vec<Key>) -> Result<()> {
|
||||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = keys
|
// Builds transaction to retrieve all tombstone values
|
||||||
.into_iter()
|
let tombstone_keys = keys
|
||||||
.map(|key| {
|
.iter()
|
||||||
let tombstone_key = to_tombstone(&key);
|
.map(|key| TombstoneKey(key.get_inner()))
|
||||||
(tombstone_key, key)
|
.collect::<Vec<_>>();
|
||||||
})
|
let (operations, mut filters): (Vec<_>, Vec<_>) =
|
||||||
.unzip();
|
tombstone_keys.iter().map(|key| key.build_get_op()).unzip();
|
||||||
|
|
||||||
self.move_values(keys, dest_keys).await
|
let txn = Txn::new().and_then(operations);
|
||||||
|
let mut resp = self.kv_backend.txn(txn).await?;
|
||||||
|
ensure!(
|
||||||
|
resp.succeeded,
|
||||||
|
error::UnexpectedSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"Failed to retrieves the metadata, keys: {}",
|
||||||
|
format_keys(&keys)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||||
|
|
||||||
|
// Builds the restore tombstone transaction.
|
||||||
|
let mut tombstone_operations = Vec::with_capacity(keys.len() * 2);
|
||||||
|
let mut tombstone_comparison = vec![];
|
||||||
|
let mut on_failure_operations = vec![];
|
||||||
|
let mut on_failure_kv_and_filters = vec![];
|
||||||
|
for (idx, key) in keys.iter().enumerate() {
|
||||||
|
let filter = &mut filters[idx];
|
||||||
|
let value = filter(&mut set).with_context(|| error::UnexpectedSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"Missing value, key: {}",
|
||||||
|
String::from_utf8_lossy(key.get_inner())
|
||||||
|
),
|
||||||
|
})?;
|
||||||
|
let (origin_key, tombstone_key) = tombstone_keys[idx].to_keys();
|
||||||
|
// Compares the atomic key.
|
||||||
|
if key.is_atomic() {
|
||||||
|
tombstone_comparison.push(Compare::with_not_exist_value(
|
||||||
|
origin_key.clone(),
|
||||||
|
CompareOp::Equal,
|
||||||
|
));
|
||||||
|
tombstone_comparison.push(Compare::with_value(
|
||||||
|
tombstone_key.clone(),
|
||||||
|
CompareOp::Equal,
|
||||||
|
value.clone(),
|
||||||
|
));
|
||||||
|
let (op, filter) = tombstone_keys[idx].build_get_op();
|
||||||
|
on_failure_operations.push(op);
|
||||||
|
on_failure_kv_and_filters.push((tombstone_key.clone(), value.clone(), filter));
|
||||||
|
}
|
||||||
|
tombstone_operations.push(TxnOp::Delete(tombstone_key));
|
||||||
|
tombstone_operations.push(TxnOp::Put(origin_key, value));
|
||||||
|
}
|
||||||
|
|
||||||
|
let txn = if !tombstone_comparison.is_empty() {
|
||||||
|
Txn::new().when(tombstone_comparison)
|
||||||
|
} else {
|
||||||
|
Txn::new()
|
||||||
|
}
|
||||||
|
.and_then(tombstone_operations);
|
||||||
|
|
||||||
|
let txn = if !on_failure_operations.is_empty() {
|
||||||
|
txn.or_else(on_failure_operations)
|
||||||
|
} else {
|
||||||
|
txn
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut resp = self.kv_backend.txn(txn).await?;
|
||||||
|
// TODO(weny): add tests for atomic key changed.
|
||||||
|
if !resp.succeeded {
|
||||||
|
let set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||||
|
let err_msg = format_on_failure_error_message(set, on_failure_kv_and_filters);
|
||||||
|
return error::CasKeyChangedSnafu { err_msg }.fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes tombstones values for the specified `keys`.
|
/// Deletes tombstones for keys.
|
||||||
pub(crate) async fn delete(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
pub(crate) async fn delete(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||||
let operations = keys
|
let operations = keys
|
||||||
.iter()
|
.iter()
|
||||||
.map(|key| TxnOp::Delete(to_tombstone(key)))
|
.map(|key| TxnOp::Delete(TombstoneKey(key).to_tombstone_key()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let txn = Txn::new().and_then(operations);
|
let txn = Txn::new().and_then(operations);
|
||||||
@@ -197,41 +342,13 @@ impl TombstoneManager {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::to_tombstone;
|
use crate::key::tombstone::{Key, TombstoneKey, TombstoneManager};
|
||||||
use crate::error::Error;
|
|
||||||
use crate::key::tombstone::TombstoneManager;
|
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::kv_backend::KvBackend;
|
use crate::kv_backend::KvBackend;
|
||||||
use crate::rpc::store::PutRequest;
|
use crate::rpc::store::PutRequest;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
struct MoveValue {
|
|
||||||
key: Vec<u8>,
|
|
||||||
dest_key: Vec<u8>,
|
|
||||||
value: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn check_moved_values(
|
|
||||||
kv_backend: Arc<MemoryKvBackend<Error>>,
|
|
||||||
move_values: &[MoveValue],
|
|
||||||
) {
|
|
||||||
for MoveValue {
|
|
||||||
key,
|
|
||||||
dest_key,
|
|
||||||
value,
|
|
||||||
} in move_values
|
|
||||||
{
|
|
||||||
assert!(kv_backend.get(key).await.unwrap().is_none());
|
|
||||||
assert_eq!(
|
|
||||||
&kv_backend.get(dest_key).await.unwrap().unwrap().value,
|
|
||||||
value,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_create_tombstone() {
|
async fn test_create_tombstone() {
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||||
@@ -245,14 +362,14 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tombstone_manager
|
tombstone_manager
|
||||||
.create(vec![b"bar".to_vec(), b"foo".to_vec()])
|
.create(vec![Key::compare_and_swap("bar"), Key::new("foo")])
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(!kv_backend.exists(b"bar").await.unwrap());
|
assert!(!kv_backend.exists(b"bar").await.unwrap());
|
||||||
assert!(!kv_backend.exists(b"foo").await.unwrap());
|
assert!(!kv_backend.exists(b"foo").await.unwrap());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
kv_backend
|
kv_backend
|
||||||
.get(&to_tombstone(b"bar"))
|
.get(&TombstoneKey(&"bar".into()).to_tombstone_key())
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -261,7 +378,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
kv_backend
|
kv_backend
|
||||||
.get(&to_tombstone(b"foo"))
|
.get(&TombstoneKey(&"foo".into()).to_tombstone_key())
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -272,7 +389,46 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_create_tombstone_with_non_exist_values() {
|
async fn test_create_tombstone_without_atomic_key() {
|
||||||
|
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||||
|
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||||
|
kv_backend
|
||||||
|
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
kv_backend
|
||||||
|
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tombstone_manager
|
||||||
|
.create(vec![Key::new("bar"), Key::new("foo")])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(!kv_backend.exists(b"bar").await.unwrap());
|
||||||
|
assert!(!kv_backend.exists(b"foo").await.unwrap());
|
||||||
|
assert_eq!(
|
||||||
|
kv_backend
|
||||||
|
.get(&TombstoneKey(&"bar".into()).to_tombstone_key())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap()
|
||||||
|
.value,
|
||||||
|
b"baz"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
kv_backend
|
||||||
|
.get(&TombstoneKey(&"foo".into()).to_tombstone_key())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap()
|
||||||
|
.value,
|
||||||
|
b"hi"
|
||||||
|
);
|
||||||
|
assert_eq!(kv_backend.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_tombstone_origin_value_not_found_err() {
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||||
|
|
||||||
@@ -285,19 +441,11 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
tombstone_manager
|
let err = tombstone_manager
|
||||||
.create(vec![b"bar".to_vec(), b"baz".to_vec()])
|
.create(vec![Key::compare_and_swap("bar"), Key::new("baz")])
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap_err();
|
||||||
check_moved_values(
|
assert!(err.to_string().contains("Missing value"));
|
||||||
kv_backend.clone(),
|
|
||||||
&[MoveValue {
|
|
||||||
key: b"bar".to_vec(),
|
|
||||||
dest_key: to_tombstone(b"bar"),
|
|
||||||
value: b"baz".to_vec(),
|
|
||||||
}],
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -314,16 +462,63 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let expected_kvs = kv_backend.dump();
|
let expected_kvs = kv_backend.dump();
|
||||||
tombstone_manager
|
tombstone_manager
|
||||||
.create(vec![b"bar".to_vec(), b"foo".to_vec()])
|
.create(vec![Key::compare_and_swap("bar"), Key::new("foo")])
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tombstone_manager
|
tombstone_manager
|
||||||
.restore(vec![b"bar".to_vec(), b"foo".to_vec()])
|
.restore(vec![Key::compare_and_swap("bar"), Key::new("foo")])
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(expected_kvs, kv_backend.dump());
|
assert_eq!(expected_kvs, kv_backend.dump());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_restore_tombstone_without_atomic_key() {
|
||||||
|
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||||
|
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||||
|
kv_backend
|
||||||
|
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
kv_backend
|
||||||
|
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let expected_kvs = kv_backend.dump();
|
||||||
|
tombstone_manager
|
||||||
|
.create(vec![Key::compare_and_swap("bar"), Key::new("foo")])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tombstone_manager
|
||||||
|
.restore(vec![Key::new("bar"), Key::new("foo")])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(expected_kvs, kv_backend.dump());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_restore_tombstone_origin_value_not_found_err() {
|
||||||
|
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||||
|
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||||
|
kv_backend
|
||||||
|
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
kv_backend
|
||||||
|
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tombstone_manager
|
||||||
|
.create(vec![Key::compare_and_swap("bar"), Key::new("foo")])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let err = tombstone_manager
|
||||||
|
.restore(vec![Key::new("bar"), Key::new("baz")])
|
||||||
|
.await
|
||||||
|
.unwrap_err();
|
||||||
|
assert!(err.to_string().contains("Missing value"));
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_delete_tombstone() {
|
async fn test_delete_tombstone() {
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||||
@@ -337,7 +532,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tombstone_manager
|
tombstone_manager
|
||||||
.create(vec![b"bar".to_vec(), b"foo".to_vec()])
|
.create(vec![Key::compare_and_swap("bar"), Key::new("foo")])
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tombstone_manager
|
tombstone_manager
|
||||||
@@ -346,216 +541,4 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(kv_backend.is_empty());
|
assert!(kv_backend.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_move_values() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
|
||||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
|
||||||
let kvs = HashMap::from([
|
|
||||||
(b"bar".to_vec(), b"baz".to_vec()),
|
|
||||||
(b"foo".to_vec(), b"hi".to_vec()),
|
|
||||||
(b"baz".to_vec(), b"hello".to_vec()),
|
|
||||||
]);
|
|
||||||
for (key, value) in &kvs {
|
|
||||||
kv_backend
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(key.clone())
|
|
||||||
.with_value(value.clone()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
let move_values = kvs
|
|
||||||
.iter()
|
|
||||||
.map(|(key, value)| MoveValue {
|
|
||||||
key: key.clone(),
|
|
||||||
dest_key: to_tombstone(key),
|
|
||||||
value: value.clone(),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.map(|kv| (kv.key, kv.dest_key))
|
|
||||||
.unzip();
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys.clone(), dest_keys.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
|
||||||
// Moves again
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys.clone(), dest_keys.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_move_values_with_non_exists_values() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
|
||||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
|
||||||
let kvs = HashMap::from([
|
|
||||||
(b"bar".to_vec(), b"baz".to_vec()),
|
|
||||||
(b"foo".to_vec(), b"hi".to_vec()),
|
|
||||||
(b"baz".to_vec(), b"hello".to_vec()),
|
|
||||||
]);
|
|
||||||
for (key, value) in &kvs {
|
|
||||||
kv_backend
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(key.clone())
|
|
||||||
.with_value(value.clone()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
let move_values = kvs
|
|
||||||
.iter()
|
|
||||||
.map(|(key, value)| MoveValue {
|
|
||||||
key: key.clone(),
|
|
||||||
dest_key: to_tombstone(key),
|
|
||||||
value: value.clone(),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let (mut keys, mut dest_keys): (Vec<_>, Vec<_>) = move_values
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.map(|kv| (kv.key, kv.dest_key))
|
|
||||||
.unzip();
|
|
||||||
keys.push(b"non-exists".to_vec());
|
|
||||||
dest_keys.push(b"hi/non-exists".to_vec());
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys.clone(), dest_keys.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
|
||||||
// Moves again
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys.clone(), dest_keys.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_move_values_changed() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
|
||||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
|
||||||
let kvs = HashMap::from([
|
|
||||||
(b"bar".to_vec(), b"baz".to_vec()),
|
|
||||||
(b"foo".to_vec(), b"hi".to_vec()),
|
|
||||||
(b"baz".to_vec(), b"hello".to_vec()),
|
|
||||||
]);
|
|
||||||
for (key, value) in &kvs {
|
|
||||||
kv_backend
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(key.clone())
|
|
||||||
.with_value(value.clone()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
kv_backend
|
|
||||||
.put(PutRequest::new().with_key("baz").with_value("changed"))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let move_values = kvs
|
|
||||||
.iter()
|
|
||||||
.map(|(key, value)| MoveValue {
|
|
||||||
key: key.clone(),
|
|
||||||
dest_key: to_tombstone(key),
|
|
||||||
value: value.clone(),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.map(|kv| (kv.key, kv.dest_key))
|
|
||||||
.unzip();
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys, dest_keys)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_move_values_overwrite_dest_values() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
|
||||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
|
||||||
let kvs = HashMap::from([
|
|
||||||
(b"bar".to_vec(), b"baz".to_vec()),
|
|
||||||
(b"foo".to_vec(), b"hi".to_vec()),
|
|
||||||
(b"baz".to_vec(), b"hello".to_vec()),
|
|
||||||
]);
|
|
||||||
for (key, value) in &kvs {
|
|
||||||
kv_backend
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(key.clone())
|
|
||||||
.with_value(value.clone()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepares
|
|
||||||
let move_values = kvs
|
|
||||||
.iter()
|
|
||||||
.map(|(key, value)| MoveValue {
|
|
||||||
key: key.clone(),
|
|
||||||
dest_key: to_tombstone(key),
|
|
||||||
value: value.clone(),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.map(|kv| (kv.key, kv.dest_key))
|
|
||||||
.unzip();
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys, dest_keys)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
|
||||||
|
|
||||||
// Overwrites existing dest keys.
|
|
||||||
let kvs = HashMap::from([
|
|
||||||
(b"bar".to_vec(), b"new baz".to_vec()),
|
|
||||||
(b"foo".to_vec(), b"new hi".to_vec()),
|
|
||||||
(b"baz".to_vec(), b"new baz".to_vec()),
|
|
||||||
]);
|
|
||||||
for (key, value) in &kvs {
|
|
||||||
kv_backend
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(key.clone())
|
|
||||||
.with_value(value.clone()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
let move_values = kvs
|
|
||||||
.iter()
|
|
||||||
.map(|(key, value)| MoveValue {
|
|
||||||
key: key.clone(),
|
|
||||||
dest_key: to_tombstone(key),
|
|
||||||
value: value.clone(),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.map(|kv| (kv.key, kv.dest_key))
|
|
||||||
.unzip();
|
|
||||||
tombstone_manager
|
|
||||||
.move_values(keys, dest_keys)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,9 +21,21 @@ use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
|||||||
use crate::rpc::KeyValue;
|
use crate::rpc::KeyValue;
|
||||||
|
|
||||||
/// The response set of [TxnOpResponse::ResponseGet]
|
/// The response set of [TxnOpResponse::ResponseGet]
|
||||||
pub struct TxnOpGetResponseSet(Vec<KeyValue>);
|
pub(crate) struct TxnOpGetResponseSet(Vec<KeyValue>);
|
||||||
|
|
||||||
impl TxnOpGetResponseSet {
|
impl TxnOpGetResponseSet {
|
||||||
|
/// Returns a [TxnOp] to retrieve the value corresponding `key` and
|
||||||
|
/// a filter to consume corresponding [KeyValue] from [TxnOpGetResponseSet].
|
||||||
|
pub(crate) fn build_get_op<T: Into<Vec<u8>>>(
|
||||||
|
key: T,
|
||||||
|
) -> (
|
||||||
|
TxnOp,
|
||||||
|
impl FnMut(&'_ mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||||
|
) {
|
||||||
|
let key = key.into();
|
||||||
|
(TxnOp::Get(key.clone()), TxnOpGetResponseSet::filter(key))
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a filter to consume a [KeyValue] where the key equals `key`.
|
/// Returns a filter to consume a [KeyValue] where the key equals `key`.
|
||||||
pub(crate) fn filter(key: Vec<u8>) -> impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>> {
|
pub(crate) fn filter(key: Vec<u8>) -> impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>> {
|
||||||
move |set| {
|
move |set| {
|
||||||
@@ -68,6 +80,30 @@ impl From<&mut Vec<TxnOpResponse>> for TxnOpGetResponseSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(weny): using `TxnOpGetResponseSet`.
|
||||||
|
pub(crate) fn build_txn_response_decoder_fn<T>(
|
||||||
|
raw_key: Vec<u8>,
|
||||||
|
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<T>>>
|
||||||
|
where
|
||||||
|
T: Serialize + DeserializeOwned + TableMetaValue,
|
||||||
|
{
|
||||||
|
move |txn_res: &Vec<TxnOpResponse>| {
|
||||||
|
txn_res
|
||||||
|
.iter()
|
||||||
|
.filter_map(|resp| {
|
||||||
|
if let TxnOpResponse::ResponseGet(r) = resp {
|
||||||
|
Some(r)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.flat_map(|r| &r.kvs)
|
||||||
|
.find(|kv| kv.key == raw_key)
|
||||||
|
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn build_put_if_absent_txn(key: Vec<u8>, value: Vec<u8>) -> Txn {
|
pub(crate) fn build_put_if_absent_txn(key: Vec<u8>, value: Vec<u8>) -> Txn {
|
||||||
Txn::new()
|
Txn::new()
|
||||||
.when(vec![Compare::with_not_exist_value(
|
.when(vec![Compare::with_not_exist_value(
|
||||||
|
|||||||
@@ -28,13 +28,13 @@ use crate::rpc::store::{RangeRequest, RangeResponse};
|
|||||||
use crate::rpc::KeyValue;
|
use crate::rpc::KeyValue;
|
||||||
use crate::util::get_next_prefix_key;
|
use crate::util::get_next_prefix_key;
|
||||||
|
|
||||||
pub type KeyValueDecoderFn<T> = dyn Fn(KeyValue) -> Result<T> + Send + Sync;
|
pub type KeyValueDecoderFn<K, V> = dyn Fn(KeyValue) -> Result<(K, V)> + Send + Sync;
|
||||||
|
|
||||||
enum PaginationStreamState<T> {
|
enum PaginationStreamState<K, V> {
|
||||||
/// At the start of reading.
|
/// At the start of reading.
|
||||||
Init,
|
Init,
|
||||||
/// Decoding key value pairs.
|
/// Decoding key value pairs.
|
||||||
Decoding(SimpleKeyValueDecoder<T>),
|
Decoding(SimpleKeyValueDecoder<K, V>),
|
||||||
/// Retrieving data from backend.
|
/// Retrieving data from backend.
|
||||||
Reading(BoxFuture<'static, Result<(PaginationStreamFactory, Option<RangeResponse>)>>),
|
Reading(BoxFuture<'static, Result<(PaginationStreamFactory, Option<RangeResponse>)>>),
|
||||||
/// Error
|
/// Error
|
||||||
@@ -77,7 +77,7 @@ struct PaginationStreamFactory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PaginationStreamFactory {
|
impl PaginationStreamFactory {
|
||||||
fn new(
|
pub fn new(
|
||||||
kv: &KvBackendRef,
|
kv: &KvBackendRef,
|
||||||
key: Vec<u8>,
|
key: Vec<u8>,
|
||||||
range_end: Vec<u8>,
|
range_end: Vec<u8>,
|
||||||
@@ -137,7 +137,7 @@ impl PaginationStreamFactory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
|
pub async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
|
||||||
if self.more {
|
if self.more {
|
||||||
let resp = self
|
let resp = self
|
||||||
.adaptive_range(RangeRequest {
|
.adaptive_range(RangeRequest {
|
||||||
@@ -174,19 +174,18 @@ impl PaginationStreamFactory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PaginationStream<T> {
|
pub struct PaginationStream<K, V> {
|
||||||
state: PaginationStreamState<T>,
|
state: PaginationStreamState<K, V>,
|
||||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
decoder_fn: Arc<KeyValueDecoderFn<K, V>>,
|
||||||
factory: Option<PaginationStreamFactory>,
|
factory: Option<PaginationStreamFactory>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> PaginationStream<T> {
|
impl<K, V> PaginationStream<K, V> {
|
||||||
/// Returns a new [PaginationStream].
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
kv: KvBackendRef,
|
kv: KvBackendRef,
|
||||||
req: RangeRequest,
|
req: RangeRequest,
|
||||||
page_size: usize,
|
page_size: usize,
|
||||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
decoder_fn: Arc<KeyValueDecoderFn<K, V>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
state: PaginationStreamState::Init,
|
state: PaginationStreamState::Init,
|
||||||
@@ -203,13 +202,13 @@ impl<T> PaginationStream<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SimpleKeyValueDecoder<T> {
|
struct SimpleKeyValueDecoder<K, V> {
|
||||||
kv: VecDeque<KeyValue>,
|
kv: VecDeque<KeyValue>,
|
||||||
decoder: Arc<KeyValueDecoderFn<T>>,
|
decoder: Arc<KeyValueDecoderFn<K, V>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Iterator for SimpleKeyValueDecoder<T> {
|
impl<K, V> Iterator for SimpleKeyValueDecoder<K, V> {
|
||||||
type Item = Result<T>;
|
type Item = Result<(K, V)>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
if let Some(kv) = self.kv.pop_front() {
|
if let Some(kv) = self.kv.pop_front() {
|
||||||
@@ -220,8 +219,8 @@ impl<T> Iterator for SimpleKeyValueDecoder<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Stream for PaginationStream<T> {
|
impl<K, V> Stream for PaginationStream<K, V> {
|
||||||
type Item = Result<T>;
|
type Item = Result<(K, V)>;
|
||||||
|
|
||||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
loop {
|
loop {
|
||||||
|
|||||||
@@ -97,6 +97,7 @@ impl Display for RangeRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RangeRequest {
|
impl RangeRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
key: vec![],
|
key: vec![],
|
||||||
@@ -113,6 +114,7 @@ impl RangeRequest {
|
|||||||
|
|
||||||
/// key is the first key for the range, If range_end is not given, the
|
/// key is the first key for the range, If range_end is not given, the
|
||||||
/// request only looks up key.
|
/// request only looks up key.
|
||||||
|
#[inline]
|
||||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self
|
self
|
||||||
@@ -127,6 +129,7 @@ impl RangeRequest {
|
|||||||
/// then the range request gets all keys prefixed with key.
|
/// then the range request gets all keys prefixed with key.
|
||||||
/// If both key and range_end are '\0', then the range request returns all
|
/// If both key and range_end are '\0', then the range request returns all
|
||||||
/// keys.
|
/// keys.
|
||||||
|
#[inline]
|
||||||
pub fn with_range(mut self, key: impl Into<Vec<u8>>, range_end: impl Into<Vec<u8>>) -> Self {
|
pub fn with_range(mut self, key: impl Into<Vec<u8>>, range_end: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self.range_end = range_end.into();
|
self.range_end = range_end.into();
|
||||||
@@ -135,6 +138,7 @@ impl RangeRequest {
|
|||||||
|
|
||||||
/// Gets all keys prefixed with key.
|
/// Gets all keys prefixed with key.
|
||||||
/// range_end is the key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
/// range_end is the key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||||
|
#[inline]
|
||||||
pub fn with_prefix(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn with_prefix(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self.range_end = util::get_prefix_end_key(&self.key);
|
self.range_end = util::get_prefix_end_key(&self.key);
|
||||||
@@ -143,12 +147,14 @@ impl RangeRequest {
|
|||||||
|
|
||||||
/// limit is a limit on the number of keys returned for the request. When
|
/// limit is a limit on the number of keys returned for the request. When
|
||||||
/// limit is set to 0, it is treated as no limit.
|
/// limit is set to 0, it is treated as no limit.
|
||||||
|
#[inline]
|
||||||
pub fn with_limit(mut self, limit: i64) -> Self {
|
pub fn with_limit(mut self, limit: i64) -> Self {
|
||||||
self.limit = limit;
|
self.limit = limit;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// keys_only when set returns only the keys and not the values.
|
/// keys_only when set returns only the keys and not the values.
|
||||||
|
#[inline]
|
||||||
pub fn with_keys_only(mut self) -> Self {
|
pub fn with_keys_only(mut self) -> Self {
|
||||||
self.keys_only = true;
|
self.keys_only = true;
|
||||||
self
|
self
|
||||||
@@ -198,6 +204,7 @@ impl RangeResponse {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn take_kvs(&mut self) -> Vec<KeyValue> {
|
pub fn take_kvs(&mut self) -> Vec<KeyValue> {
|
||||||
self.kvs.drain(..).collect()
|
self.kvs.drain(..).collect()
|
||||||
}
|
}
|
||||||
@@ -237,6 +244,7 @@ impl From<PbPutRequest> for PutRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PutRequest {
|
impl PutRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
key: vec![],
|
key: vec![],
|
||||||
@@ -246,6 +254,7 @@ impl PutRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// key is the key, in bytes, to put into the key-value store.
|
/// key is the key, in bytes, to put into the key-value store.
|
||||||
|
#[inline]
|
||||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self
|
self
|
||||||
@@ -253,6 +262,7 @@ impl PutRequest {
|
|||||||
|
|
||||||
/// value is the value, in bytes, to associate with the key in the
|
/// value is the value, in bytes, to associate with the key in the
|
||||||
/// key-value store.
|
/// key-value store.
|
||||||
|
#[inline]
|
||||||
pub fn with_value(mut self, value: impl Into<Vec<u8>>) -> Self {
|
pub fn with_value(mut self, value: impl Into<Vec<u8>>) -> Self {
|
||||||
self.value = value.into();
|
self.value = value.into();
|
||||||
self
|
self
|
||||||
@@ -260,6 +270,7 @@ impl PutRequest {
|
|||||||
|
|
||||||
/// If prev_kv is set, gets the previous key-value pair before changing it.
|
/// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||||
/// The previous key-value pair will be returned in the put response.
|
/// The previous key-value pair will be returned in the put response.
|
||||||
|
#[inline]
|
||||||
pub fn with_prev_kv(mut self) -> Self {
|
pub fn with_prev_kv(mut self) -> Self {
|
||||||
self.prev_kv = true;
|
self.prev_kv = true;
|
||||||
self
|
self
|
||||||
@@ -319,15 +330,18 @@ impl Default for BatchGetRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BatchGetRequest {
|
impl BatchGetRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self { keys: vec![] }
|
Self { keys: vec![] }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn with_keys(mut self, keys: Vec<Vec<u8>>) -> Self {
|
pub fn with_keys(mut self, keys: Vec<Vec<u8>>) -> Self {
|
||||||
self.keys = keys;
|
self.keys = keys;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.keys.push(key.into());
|
self.keys.push(key.into());
|
||||||
self
|
self
|
||||||
@@ -402,6 +416,7 @@ impl From<PbBatchPutRequest> for BatchPutRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BatchPutRequest {
|
impl BatchPutRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
kvs: vec![],
|
kvs: vec![],
|
||||||
@@ -409,6 +424,7 @@ impl BatchPutRequest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn add_kv(mut self, key: impl Into<Vec<u8>>, value: impl Into<Vec<u8>>) -> Self {
|
pub fn add_kv(mut self, key: impl Into<Vec<u8>>, value: impl Into<Vec<u8>>) -> Self {
|
||||||
self.kvs.push(KeyValue {
|
self.kvs.push(KeyValue {
|
||||||
key: key.into(),
|
key: key.into(),
|
||||||
@@ -419,6 +435,7 @@ impl BatchPutRequest {
|
|||||||
|
|
||||||
/// If prev_kv is set, gets the previous key-value pair before changing it.
|
/// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||||
/// The previous key-value pair will be returned in the put response.
|
/// The previous key-value pair will be returned in the put response.
|
||||||
|
#[inline]
|
||||||
pub fn with_prev_kv(mut self) -> Self {
|
pub fn with_prev_kv(mut self) -> Self {
|
||||||
self.prev_kv = true;
|
self.prev_kv = true;
|
||||||
self
|
self
|
||||||
@@ -450,6 +467,7 @@ impl BatchPutResponse {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
||||||
self.prev_kvs.drain(..).collect()
|
self.prev_kvs.drain(..).collect()
|
||||||
}
|
}
|
||||||
@@ -483,6 +501,7 @@ impl From<PbBatchDeleteRequest> for BatchDeleteRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BatchDeleteRequest {
|
impl BatchDeleteRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
keys: vec![],
|
keys: vec![],
|
||||||
@@ -490,12 +509,7 @@ impl BatchDeleteRequest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets `keys`.
|
#[inline]
|
||||||
pub fn with_keys(mut self, keys: Vec<Vec<u8>>) -> Self {
|
|
||||||
self.keys = keys;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.keys.push(key.into());
|
self.keys.push(key.into());
|
||||||
self
|
self
|
||||||
@@ -503,6 +517,7 @@ impl BatchDeleteRequest {
|
|||||||
|
|
||||||
/// If prev_kv is set, gets the previous key-value pair before deleting it.
|
/// If prev_kv is set, gets the previous key-value pair before deleting it.
|
||||||
/// The previous key-value pair will be returned in the batch delete response.
|
/// The previous key-value pair will be returned in the batch delete response.
|
||||||
|
#[inline]
|
||||||
pub fn with_prev_kv(mut self) -> Self {
|
pub fn with_prev_kv(mut self) -> Self {
|
||||||
self.prev_kv = true;
|
self.prev_kv = true;
|
||||||
self
|
self
|
||||||
@@ -567,6 +582,7 @@ impl From<PbCompareAndPutRequest> for CompareAndPutRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CompareAndPutRequest {
|
impl CompareAndPutRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
key: vec![],
|
key: vec![],
|
||||||
@@ -576,12 +592,14 @@ impl CompareAndPutRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// key is the key, in bytes, to put into the key-value store.
|
/// key is the key, in bytes, to put into the key-value store.
|
||||||
|
#[inline]
|
||||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// expect is the previous value, in bytes
|
/// expect is the previous value, in bytes
|
||||||
|
#[inline]
|
||||||
pub fn with_expect(mut self, expect: impl Into<Vec<u8>>) -> Self {
|
pub fn with_expect(mut self, expect: impl Into<Vec<u8>>) -> Self {
|
||||||
self.expect = expect.into();
|
self.expect = expect.into();
|
||||||
self
|
self
|
||||||
@@ -589,6 +607,7 @@ impl CompareAndPutRequest {
|
|||||||
|
|
||||||
/// value is the value, in bytes, to associate with the key in the
|
/// value is the value, in bytes, to associate with the key in the
|
||||||
/// key-value store.
|
/// key-value store.
|
||||||
|
#[inline]
|
||||||
pub fn with_value(mut self, value: impl Into<Vec<u8>>) -> Self {
|
pub fn with_value(mut self, value: impl Into<Vec<u8>>) -> Self {
|
||||||
self.value = value.into();
|
self.value = value.into();
|
||||||
self
|
self
|
||||||
@@ -630,10 +649,12 @@ impl CompareAndPutResponse {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn is_success(&self) -> bool {
|
pub fn is_success(&self) -> bool {
|
||||||
self.success
|
self.success
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn take_prev_kv(&mut self) -> Option<KeyValue> {
|
pub fn take_prev_kv(&mut self) -> Option<KeyValue> {
|
||||||
self.prev_kv.take()
|
self.prev_kv.take()
|
||||||
}
|
}
|
||||||
@@ -682,6 +703,7 @@ impl From<PbDeleteRangeRequest> for DeleteRangeRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DeleteRangeRequest {
|
impl DeleteRangeRequest {
|
||||||
|
#[inline]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
key: vec![],
|
key: vec![],
|
||||||
@@ -697,6 +719,7 @@ impl DeleteRangeRequest {
|
|||||||
|
|
||||||
/// key is the first key to delete in the range. If range_end is not given,
|
/// key is the first key to delete in the range. If range_end is not given,
|
||||||
/// the range is defined to contain only the key argument.
|
/// the range is defined to contain only the key argument.
|
||||||
|
#[inline]
|
||||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self
|
self
|
||||||
@@ -712,6 +735,7 @@ impl DeleteRangeRequest {
|
|||||||
/// the keys with the prefix (the given key).
|
/// the keys with the prefix (the given key).
|
||||||
/// If range_end is '\0', the range is all keys greater than or equal to the
|
/// If range_end is '\0', the range is all keys greater than or equal to the
|
||||||
/// key argument.
|
/// key argument.
|
||||||
|
#[inline]
|
||||||
pub fn with_range(mut self, key: impl Into<Vec<u8>>, range_end: impl Into<Vec<u8>>) -> Self {
|
pub fn with_range(mut self, key: impl Into<Vec<u8>>, range_end: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self.range_end = range_end.into();
|
self.range_end = range_end.into();
|
||||||
@@ -720,6 +744,7 @@ impl DeleteRangeRequest {
|
|||||||
|
|
||||||
/// Deletes all keys prefixed with key.
|
/// Deletes all keys prefixed with key.
|
||||||
/// range_end is one bit larger than the given key.
|
/// range_end is one bit larger than the given key.
|
||||||
|
#[inline]
|
||||||
pub fn with_prefix(mut self, key: impl Into<Vec<u8>>) -> Self {
|
pub fn with_prefix(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||||
self.key = key.into();
|
self.key = key.into();
|
||||||
self.range_end = util::get_prefix_end_key(&self.key);
|
self.range_end = util::get_prefix_end_key(&self.key);
|
||||||
@@ -728,6 +753,7 @@ impl DeleteRangeRequest {
|
|||||||
|
|
||||||
/// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
/// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||||
/// The previous key-value pairs will be returned in the delete response.
|
/// The previous key-value pairs will be returned in the delete response.
|
||||||
|
#[inline]
|
||||||
pub fn with_prev_kv(mut self) -> Self {
|
pub fn with_prev_kv(mut self) -> Self {
|
||||||
self.prev_kv = true;
|
self.prev_kv = true;
|
||||||
self
|
self
|
||||||
@@ -762,10 +788,12 @@ impl DeleteRangeResponse {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn deleted(&self) -> i64 {
|
pub fn deleted(&self) -> i64 {
|
||||||
self.deleted
|
self.deleted
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
||||||
self.prev_kvs.drain(..).collect()
|
self.prev_kvs.drain(..).collect()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -172,7 +172,9 @@ impl Inner {
|
|||||||
|
|
||||||
if !res.success {
|
if !res.success {
|
||||||
if let Some(kv) = res.prev_kv {
|
if let Some(kv) = res.prev_kv {
|
||||||
let v: [u8; 8] = match kv.value.clone().try_into() {
|
expect = kv.value.clone();
|
||||||
|
|
||||||
|
let v: [u8; 8] = match kv.value.try_into() {
|
||||||
Ok(a) => a,
|
Ok(a) => a,
|
||||||
Err(v) => {
|
Err(v) => {
|
||||||
return error::UnexpectedSequenceValueSnafu {
|
return error::UnexpectedSequenceValueSnafu {
|
||||||
@@ -182,12 +184,13 @@ impl Inner {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
let v = u64::from_le_bytes(v);
|
let v = u64::from_le_bytes(v);
|
||||||
|
|
||||||
// If the existed value is smaller than the initial, we should start from the initial.
|
// If the existed value is smaller than the initial, we should start from the initial.
|
||||||
start = v.max(self.initial);
|
start = v.max(self.initial);
|
||||||
expect = kv.value;
|
|
||||||
} else {
|
} else {
|
||||||
start = self.initial;
|
|
||||||
expect = vec![];
|
expect = vec![];
|
||||||
|
|
||||||
|
start = self.initial;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ use common_procedure::store::util::multiple_value_stream;
|
|||||||
use common_procedure::Result as ProcedureResult;
|
use common_procedure::Result as ProcedureResult;
|
||||||
use futures::future::try_join_all;
|
use futures::future::try_join_all;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use itertools::Itertools;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -80,21 +79,17 @@ fn decode_kv(kv: KeyValue) -> Result<(String, Vec<u8>)> {
|
|||||||
Ok((key, value))
|
Ok((key, value))
|
||||||
}
|
}
|
||||||
|
|
||||||
enum SplitValue {
|
enum SplitValue<'a> {
|
||||||
Single(Vec<u8>),
|
Single(&'a [u8]),
|
||||||
Multiple(Vec<Vec<u8>>),
|
Multiple(Vec<&'a [u8]>),
|
||||||
}
|
}
|
||||||
|
|
||||||
fn split_value(value: Vec<u8>, max_value_size: Option<usize>) -> SplitValue {
|
fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
|
||||||
if let Some(max_value_size) = max_value_size {
|
if let Some(max_value_size) = max_value_size {
|
||||||
if value.len() <= max_value_size {
|
if value.len() <= max_value_size {
|
||||||
SplitValue::Single(value)
|
SplitValue::Single(value)
|
||||||
} else {
|
} else {
|
||||||
let mut values = vec![];
|
SplitValue::Multiple(value.chunks(max_value_size).collect::<Vec<_>>())
|
||||||
for chunk in value.into_iter().chunks(max_value_size).into_iter() {
|
|
||||||
values.push(chunk.collect());
|
|
||||||
}
|
|
||||||
SplitValue::Multiple(values)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SplitValue::Single(value)
|
SplitValue::Single(value)
|
||||||
@@ -104,10 +99,10 @@ fn split_value(value: Vec<u8>, max_value_size: Option<usize>) -> SplitValue {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl StateStore for KvStateStore {
|
impl StateStore for KvStateStore {
|
||||||
async fn put(&self, key: &str, value: Vec<u8>) -> ProcedureResult<()> {
|
async fn put(&self, key: &str, value: Vec<u8>) -> ProcedureResult<()> {
|
||||||
let split = split_value(value, self.max_value_size);
|
let split = split_value(&value, self.max_value_size);
|
||||||
let key = with_prefix(key);
|
let key = with_prefix(key);
|
||||||
match split {
|
match split {
|
||||||
SplitValue::Single(value) => {
|
SplitValue::Single(_) => {
|
||||||
self.kv_backend
|
self.kv_backend
|
||||||
.put(
|
.put(
|
||||||
PutRequest::new()
|
PutRequest::new()
|
||||||
|
|||||||
@@ -14,13 +14,14 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::{QueryRequest, RegionRequest};
|
||||||
pub use common_base::AffectedRows;
|
pub use common_base::AffectedRows;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
|
||||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||||
use crate::datanode_manager::{Datanode, DatanodeManager, DatanodeManagerRef, DatanodeRef};
|
use crate::datanode_manager::{
|
||||||
|
Datanode, DatanodeManager, DatanodeManagerRef, DatanodeRef, HandleResponse,
|
||||||
|
};
|
||||||
use crate::ddl::table_meta::TableMetadataAllocator;
|
use crate::ddl::table_meta::TableMetadataAllocator;
|
||||||
use crate::ddl::DdlContext;
|
use crate::ddl::DdlContext;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -34,7 +35,7 @@ use crate::wal_options_allocator::WalOptionsAllocator;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait MockDatanodeHandler: Sync + Send + Clone {
|
pub trait MockDatanodeHandler: Sync + Send + Clone {
|
||||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse>;
|
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse>;
|
||||||
|
|
||||||
async fn handle_query(
|
async fn handle_query(
|
||||||
&self,
|
&self,
|
||||||
@@ -64,7 +65,7 @@ struct MockDatanode<T> {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl<T: MockDatanodeHandler> Datanode for MockDatanode<T> {
|
impl<T: MockDatanodeHandler> Datanode for MockDatanode<T> {
|
||||||
async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
|
async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||||
self.handler.handle(&self.peer, request).await
|
self.handler.handle(&self.peer, request).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ mod tests {
|
|||||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||||
let mut topic_manager = KafkaTopicManager::new(config.clone(), kv_backend);
|
let mut topic_manager = KafkaTopicManager::new(config.clone(), kv_backend);
|
||||||
// Replaces the default topic pool with the constructed topics.
|
// Replaces the default topic pool with the constructed topics.
|
||||||
topic_manager.topic_pool.clone_from(&topics);
|
topic_manager.topic_pool = topics.clone();
|
||||||
// Replaces the default selector with a round-robin selector without shuffled.
|
// Replaces the default selector with a round-robin selector without shuffled.
|
||||||
topic_manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
topic_manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
||||||
|
|
||||||
|
|||||||
@@ -291,7 +291,7 @@ mod tests {
|
|||||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||||
let mut manager = TopicManager::new(config.clone(), kv_backend);
|
let mut manager = TopicManager::new(config.clone(), kv_backend);
|
||||||
// Replaces the default topic pool with the constructed topics.
|
// Replaces the default topic pool with the constructed topics.
|
||||||
manager.topic_pool.clone_from(&topics);
|
manager.topic_pool = topics.clone();
|
||||||
// Replaces the default selector with a round-robin selector without shuffled.
|
// Replaces the default selector with a round-robin selector without shuffled.
|
||||||
manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
||||||
manager.start().await.unwrap();
|
manager.start().await.unwrap();
|
||||||
|
|||||||
@@ -114,29 +114,3 @@ pub async fn execute_until_suspended_or_done(
|
|||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_test_procedure_context() -> Context {
|
|
||||||
Context {
|
|
||||||
procedure_id: ProcedureId::random(),
|
|
||||||
provider: Arc::new(MockContextProvider::default()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn execute_procedure_until<P: Procedure>(procedure: &mut P, until: impl Fn(&P) -> bool) {
|
|
||||||
let mut reached = false;
|
|
||||||
let context = new_test_procedure_context();
|
|
||||||
while !matches!(
|
|
||||||
procedure.execute(&context).await.unwrap(),
|
|
||||||
Status::Done { .. }
|
|
||||||
) {
|
|
||||||
if until(procedure) {
|
|
||||||
reached = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert!(
|
|
||||||
reached,
|
|
||||||
"procedure '{}' did not reach the expected state",
|
|
||||||
procedure.type_name()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,19 +19,19 @@ use std::sync::{Arc, Mutex};
|
|||||||
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
|
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
|
||||||
|
|
||||||
pub enum OwnedKeyRwLockGuard {
|
pub enum OwnedKeyRwLockGuard {
|
||||||
Read { _guard: OwnedRwLockReadGuard<()> },
|
Read(OwnedRwLockReadGuard<()>),
|
||||||
Write { _guard: OwnedRwLockWriteGuard<()> },
|
Write(OwnedRwLockWriteGuard<()>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<OwnedRwLockReadGuard<()>> for OwnedKeyRwLockGuard {
|
impl From<OwnedRwLockReadGuard<()>> for OwnedKeyRwLockGuard {
|
||||||
fn from(guard: OwnedRwLockReadGuard<()>) -> Self {
|
fn from(guard: OwnedRwLockReadGuard<()>) -> Self {
|
||||||
OwnedKeyRwLockGuard::Read { _guard: guard }
|
OwnedKeyRwLockGuard::Read(guard)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<OwnedRwLockWriteGuard<()>> for OwnedKeyRwLockGuard {
|
impl From<OwnedRwLockWriteGuard<()>> for OwnedKeyRwLockGuard {
|
||||||
fn from(guard: OwnedRwLockWriteGuard<()>) -> Self {
|
fn from(guard: OwnedRwLockWriteGuard<()>) -> Self {
|
||||||
OwnedKeyRwLockGuard::Write { _guard: guard }
|
OwnedKeyRwLockGuard::Write(guard)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use datatypes::prelude::ConcreteDataType;
|
|||||||
use datatypes::vectors::{Helper, VectorRef};
|
use datatypes::vectors::{Helper, VectorRef};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{self, GeneralDataFusionSnafu, IntoVectorSnafu, Result};
|
use crate::error::{self, IntoVectorSnafu, Result};
|
||||||
use crate::prelude::ScalarValue;
|
use crate::prelude::ScalarValue;
|
||||||
|
|
||||||
/// Represents the result from an expression
|
/// Represents the result from an expression
|
||||||
@@ -43,9 +43,7 @@ impl ColumnarValue {
|
|||||||
Ok(match self {
|
Ok(match self {
|
||||||
ColumnarValue::Vector(v) => v,
|
ColumnarValue::Vector(v) => v,
|
||||||
ColumnarValue::Scalar(s) => {
|
ColumnarValue::Scalar(s) => {
|
||||||
let v = s
|
let v = s.to_array_of_size(num_rows);
|
||||||
.to_array_of_size(num_rows)
|
|
||||||
.context(GeneralDataFusionSnafu)?;
|
|
||||||
let data_type = v.data_type().clone();
|
let data_type = v.data_type().clone();
|
||||||
Helper::try_into_vector(v).context(IntoVectorSnafu { data_type })?
|
Helper::try_into_vector(v).context(IntoVectorSnafu { data_type })?
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt::{Debug, Display, Formatter};
|
use std::fmt::{Debug, Formatter};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::greptime_proto::v1::add_column_location::LocationType;
|
use api::greptime_proto::v1::add_column_location::LocationType;
|
||||||
@@ -94,8 +94,8 @@ impl Debug for OutputData {
|
|||||||
OutputData::RecordBatches(recordbatches) => {
|
OutputData::RecordBatches(recordbatches) => {
|
||||||
write!(f, "OutputData::RecordBatches({recordbatches:?})")
|
write!(f, "OutputData::RecordBatches({recordbatches:?})")
|
||||||
}
|
}
|
||||||
OutputData::Stream(s) => {
|
OutputData::Stream(_) => {
|
||||||
write!(f, "OutputData::Stream(<{}>)", s.name())
|
write!(f, "OutputData::Stream(<stream>)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,17 +126,6 @@ pub enum AddColumnLocation {
|
|||||||
After { column_name: String },
|
After { column_name: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for AddColumnLocation {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
AddColumnLocation::First => write!(f, r#"FIRST"#),
|
|
||||||
AddColumnLocation::After { column_name } => {
|
|
||||||
write!(f, r#"AFTER {column_name}"#)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&AddColumnLocation> for Location {
|
impl From<&AddColumnLocation> for Location {
|
||||||
fn from(value: &AddColumnLocation) -> Self {
|
fn from(value: &AddColumnLocation) -> Self {
|
||||||
match value {
|
match value {
|
||||||
|
|||||||
@@ -72,7 +72,6 @@ pub fn create_aggregate_function(
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use datafusion_common::DFSchema;
|
|
||||||
use datafusion_expr::{
|
use datafusion_expr::{
|
||||||
ColumnarValue as DfColumnarValue, ScalarUDF as DfScalarUDF,
|
ColumnarValue as DfColumnarValue, ScalarUDF as DfScalarUDF,
|
||||||
TypeSignature as DfTypeSignature,
|
TypeSignature as DfTypeSignature,
|
||||||
@@ -136,17 +135,15 @@ mod tests {
|
|||||||
|
|
||||||
// test into_df_udf
|
// test into_df_udf
|
||||||
let df_udf: DfScalarUDF = udf.into();
|
let df_udf: DfScalarUDF = udf.into();
|
||||||
assert_eq!("and", df_udf.name());
|
assert_eq!("and", df_udf.name);
|
||||||
|
|
||||||
let types = vec![DataType::Boolean, DataType::Boolean];
|
let types = vec![DataType::Boolean, DataType::Boolean];
|
||||||
assert!(
|
assert!(
|
||||||
matches!(&df_udf.signature().type_signature, DfTypeSignature::Exact(ts) if ts.clone() == types)
|
matches!(&df_udf.signature.type_signature, DfTypeSignature::Exact(ts) if ts.clone() == types)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
DataType::Boolean,
|
Arc::new(DataType::Boolean),
|
||||||
df_udf
|
(df_udf.return_type)(&[]).unwrap()
|
||||||
.return_type_from_exprs(&[], &DFSchema::empty(), &[])
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let args = vec![
|
let args = vec![
|
||||||
@@ -155,7 +152,7 @@ mod tests {
|
|||||||
];
|
];
|
||||||
|
|
||||||
// call the function
|
// call the function
|
||||||
let result = (df_udf.fun())(&args).unwrap();
|
let result = (df_udf.fun)(&args).unwrap();
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
DfColumnarValue::Array(arr) => {
|
DfColumnarValue::Array(arr) => {
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ impl DfAccumulatorAdaptor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DfAccumulator for DfAccumulatorAdaptor {
|
impl DfAccumulator for DfAccumulatorAdaptor {
|
||||||
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
fn state(&self) -> DfResult<Vec<ScalarValue>> {
|
||||||
let state_values = self.accumulator.state()?;
|
let state_values = self.accumulator.state()?;
|
||||||
let state_types = self.creator.state_types()?;
|
let state_types = self.creator.state_types()?;
|
||||||
if state_values.len() != state_types.len() {
|
if state_values.len() != state_types.len() {
|
||||||
@@ -161,7 +161,7 @@ impl DfAccumulator for DfAccumulatorAdaptor {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
fn evaluate(&self) -> DfResult<ScalarValue> {
|
||||||
let value = self.accumulator.evaluate()?;
|
let value = self.accumulator.evaluate()?;
|
||||||
let output_type = self.creator.output_type()?;
|
let output_type = self.creator.output_type()?;
|
||||||
let scalar_value = value
|
let scalar_value = value
|
||||||
|
|||||||
@@ -44,7 +44,9 @@ pub fn build_filter_from_timestamp(
|
|||||||
ts_col_name: &str,
|
ts_col_name: &str,
|
||||||
time_range: Option<&TimestampRange>,
|
time_range: Option<&TimestampRange>,
|
||||||
) -> Option<Expr> {
|
) -> Option<Expr> {
|
||||||
let time_range = time_range?;
|
let Some(time_range) = time_range else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
let ts_col_expr = DfExpr::Column(Column {
|
let ts_col_expr = DfExpr::Column(Column {
|
||||||
relation: None,
|
relation: None,
|
||||||
name: ts_col_name.to_string(),
|
name: ts_col_name.to_string(),
|
||||||
@@ -92,10 +94,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_from_df_expr() {
|
fn test_from_df_expr() {
|
||||||
let df_expr = DfExpr::Wildcard { qualifier: None };
|
let df_expr = DfExpr::Wildcard;
|
||||||
|
|
||||||
let expr: Expr = df_expr.into();
|
let expr: Expr = df_expr.into();
|
||||||
|
|
||||||
assert_eq!(DfExpr::Wildcard { qualifier: None }, *expr.df_expr());
|
assert_eq!(DfExpr::Wildcard, *expr.df_expr());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,18 +16,15 @@
|
|||||||
//!
|
//!
|
||||||
//! Modified from DataFusion.
|
//! Modified from DataFusion.
|
||||||
|
|
||||||
use std::any::Any;
|
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::fmt::{self, Debug, Formatter};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use datafusion::arrow::datatypes::Field;
|
|
||||||
use datafusion_common::Result;
|
|
||||||
use datafusion_expr::function::AccumulatorArgs;
|
|
||||||
use datafusion_expr::{
|
use datafusion_expr::{
|
||||||
Accumulator, AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf, AggregateUDFImpl,
|
AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf,
|
||||||
|
StateTypeFunction as DfStateTypeFunction,
|
||||||
};
|
};
|
||||||
use datatypes::arrow::datatypes::DataType as ArrowDataType;
|
use datatypes::arrow::datatypes::DataType as ArrowDataType;
|
||||||
use datatypes::data_type::DataType;
|
use datatypes::prelude::*;
|
||||||
|
|
||||||
use crate::function::{
|
use crate::function::{
|
||||||
to_df_return_type, AccumulatorFunctionImpl, ReturnTypeFunction, StateTypeFunction,
|
to_df_return_type, AccumulatorFunctionImpl, ReturnTypeFunction, StateTypeFunction,
|
||||||
@@ -93,72 +90,13 @@ impl AggregateFunction {
|
|||||||
|
|
||||||
impl From<AggregateFunction> for DfAggregateUdf {
|
impl From<AggregateFunction> for DfAggregateUdf {
|
||||||
fn from(udaf: AggregateFunction) -> Self {
|
fn from(udaf: AggregateFunction) -> Self {
|
||||||
struct DfUdafAdapter {
|
DfAggregateUdf::new(
|
||||||
name: String,
|
&udaf.name,
|
||||||
signature: datafusion_expr::Signature,
|
&udaf.signature.into(),
|
||||||
return_type_func: datafusion_expr::ReturnTypeFunction,
|
&to_df_return_type(udaf.return_type),
|
||||||
accumulator: AccumulatorFactoryFunction,
|
&to_df_accumulator_func(udaf.accumulator, udaf.creator.clone()),
|
||||||
creator: AggregateFunctionCreatorRef,
|
&to_df_state_type(udaf.state_type),
|
||||||
}
|
)
|
||||||
|
|
||||||
impl Debug for DfUdafAdapter {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
|
||||||
f.debug_struct("DfUdafAdapter")
|
|
||||||
.field("name", &self.name)
|
|
||||||
.field("signature", &self.signature)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AggregateUDFImpl for DfUdafAdapter {
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
&self.name
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature(&self) -> &datafusion_expr::Signature {
|
|
||||||
&self.signature
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_type(&self, arg_types: &[ArrowDataType]) -> Result<ArrowDataType> {
|
|
||||||
(self.return_type_func)(arg_types).map(|x| x.as_ref().clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accumulator(&self, acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
|
|
||||||
(self.accumulator)(acc_args)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn state_fields(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
_value_type: ArrowDataType,
|
|
||||||
_ordering_fields: Vec<Field>,
|
|
||||||
) -> Result<Vec<Field>> {
|
|
||||||
self.creator
|
|
||||||
.state_types()
|
|
||||||
.map(|x| {
|
|
||||||
(0..x.len())
|
|
||||||
.zip(x)
|
|
||||||
.map(|(i, t)| {
|
|
||||||
Field::new(format!("{}_{}", name, i), t.as_arrow_type(), true)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
.map_err(|e| e.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DfUdafAdapter {
|
|
||||||
name: udaf.name,
|
|
||||||
signature: udaf.signature.into(),
|
|
||||||
return_type_func: to_df_return_type(udaf.return_type),
|
|
||||||
accumulator: to_df_accumulator_func(udaf.accumulator, udaf.creator.clone()),
|
|
||||||
creator: udaf.creator,
|
|
||||||
}
|
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,3 +110,19 @@ fn to_df_accumulator_func(
|
|||||||
Ok(Box::new(DfAccumulatorAdaptor::new(accumulator, creator)) as _)
|
Ok(Box::new(DfAccumulatorAdaptor::new(accumulator, creator)) as _)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_df_state_type(func: StateTypeFunction) -> DfStateTypeFunction {
|
||||||
|
let df_func = move |data_type: &ArrowDataType| {
|
||||||
|
// DataFusion DataType -> ConcreteDataType
|
||||||
|
let concrete_data_type = ConcreteDataType::from_arrow_type(data_type);
|
||||||
|
|
||||||
|
// evaluate ConcreteDataType
|
||||||
|
let eval_result = (func)(&concrete_data_type);
|
||||||
|
|
||||||
|
// ConcreteDataType -> DataFusion DataType
|
||||||
|
eval_result
|
||||||
|
.map(|ts| Arc::new(ts.iter().map(|t| t.as_arrow_type()).collect()))
|
||||||
|
.map_err(|e| e.into())
|
||||||
|
};
|
||||||
|
Arc::new(df_func)
|
||||||
|
}
|
||||||
|
|||||||
@@ -70,8 +70,6 @@ impl ScalarUdf {
|
|||||||
|
|
||||||
impl From<ScalarUdf> for DfScalarUDF {
|
impl From<ScalarUdf> for DfScalarUDF {
|
||||||
fn from(udf: ScalarUdf) -> Self {
|
fn from(udf: ScalarUdf) -> Self {
|
||||||
// TODO(LFC): remove deprecated
|
|
||||||
#[allow(deprecated)]
|
|
||||||
DfScalarUDF::new(
|
DfScalarUDF::new(
|
||||||
&udf.name,
|
&udf.name,
|
||||||
&udf.signature.into(),
|
&udf.signature.into(),
|
||||||
|
|||||||
@@ -21,9 +21,10 @@ use common_recordbatch::{DfSendableRecordBatchStream, SendableRecordBatchStream}
|
|||||||
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
||||||
use datafusion::error::Result as DfResult;
|
use datafusion::error::Result as DfResult;
|
||||||
pub use datafusion::execution::context::{SessionContext, TaskContext};
|
pub use datafusion::execution::context::{SessionContext, TaskContext};
|
||||||
|
use datafusion::physical_plan::expressions::PhysicalSortExpr;
|
||||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||||
pub use datafusion::physical_plan::Partitioning;
|
pub use datafusion::physical_plan::Partitioning;
|
||||||
use datafusion::physical_plan::{DisplayAs, DisplayFormatType, PlanProperties};
|
use datafusion::physical_plan::{DisplayAs, DisplayFormatType, Statistics};
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
@@ -46,9 +47,13 @@ pub trait PhysicalPlan: Debug + Send + Sync {
|
|||||||
/// Get the schema for this physical plan
|
/// Get the schema for this physical plan
|
||||||
fn schema(&self) -> SchemaRef;
|
fn schema(&self) -> SchemaRef;
|
||||||
|
|
||||||
/// Return properties of the output of the [PhysicalPlan], such as output
|
/// Specifies the output partitioning scheme of this plan
|
||||||
/// ordering(s), partitioning information etc.
|
fn output_partitioning(&self) -> Partitioning;
|
||||||
fn properties(&self) -> &PlanProperties;
|
|
||||||
|
/// returns `Some(keys)` that describes how the output was sorted.
|
||||||
|
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Get a list of child physical plans that provide the input for this plan. The returned list
|
/// Get a list of child physical plans that provide the input for this plan. The returned list
|
||||||
/// will be empty for leaf nodes, will contain a single value for unary nodes, or two
|
/// will be empty for leaf nodes, will contain a single value for unary nodes, or two
|
||||||
@@ -102,8 +107,8 @@ impl PhysicalPlan for PhysicalPlanAdapter {
|
|||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn properties(&self) -> &PlanProperties {
|
fn output_partitioning(&self) -> Partitioning {
|
||||||
self.df_plan.properties()
|
self.df_plan.output_partitioning()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn children(&self) -> Vec<PhysicalPlanRef> {
|
fn children(&self) -> Vec<PhysicalPlanRef> {
|
||||||
@@ -165,6 +170,14 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
|
|||||||
self.0.schema().arrow_schema().clone()
|
self.0.schema().arrow_schema().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn output_partitioning(&self) -> Partitioning {
|
||||||
|
self.0.output_partitioning()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
|
||||||
|
self.0.output_ordering()
|
||||||
|
}
|
||||||
|
|
||||||
fn children(&self) -> Vec<Arc<dyn DfPhysicalPlan>> {
|
fn children(&self) -> Vec<Arc<dyn DfPhysicalPlan>> {
|
||||||
self.0
|
self.0
|
||||||
.children()
|
.children()
|
||||||
@@ -200,12 +213,12 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
|
|||||||
Ok(Box::pin(DfRecordBatchStreamAdapter::new(stream)))
|
Ok(Box::pin(DfRecordBatchStreamAdapter::new(stream)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn metrics(&self) -> Option<MetricsSet> {
|
fn statistics(&self) -> Statistics {
|
||||||
self.0.metrics()
|
Statistics::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn properties(&self) -> &PlanProperties {
|
fn metrics(&self) -> Option<MetricsSet> {
|
||||||
self.0.properties()
|
self.0.metrics()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,12 +232,10 @@ impl DisplayAs for DfPhysicalPlanAdapter {
|
|||||||
mod test {
|
mod test {
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||||
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
|
||||||
use datafusion::datasource::{DefaultTableSource, TableProvider as DfTableProvider, TableType};
|
use datafusion::datasource::{DefaultTableSource, TableProvider as DfTableProvider, TableType};
|
||||||
use datafusion::execution::context::{SessionContext, SessionState};
|
use datafusion::execution::context::{SessionContext, SessionState};
|
||||||
use datafusion::physical_expr::EquivalenceProperties;
|
use datafusion::physical_plan::collect;
|
||||||
use datafusion::physical_plan::empty::EmptyExec;
|
use datafusion::physical_plan::empty::EmptyExec;
|
||||||
use datafusion::physical_plan::{collect, ExecutionMode};
|
|
||||||
use datafusion_expr::logical_plan::builder::LogicalPlanBuilder;
|
use datafusion_expr::logical_plan::builder::LogicalPlanBuilder;
|
||||||
use datafusion_expr::{Expr, TableSource};
|
use datafusion_expr::{Expr, TableSource};
|
||||||
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
|
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
|
||||||
@@ -261,13 +272,10 @@ mod test {
|
|||||||
_filters: &[Expr],
|
_filters: &[Expr],
|
||||||
_limit: Option<usize>,
|
_limit: Option<usize>,
|
||||||
) -> DfResult<Arc<dyn DfPhysicalPlan>> {
|
) -> DfResult<Arc<dyn DfPhysicalPlan>> {
|
||||||
let schema = Arc::new(Schema::try_from(self.schema()).unwrap());
|
let schema = Schema::try_from(self.schema()).unwrap();
|
||||||
let properties = PlanProperties::new(
|
let my_plan = Arc::new(MyExecutionPlan {
|
||||||
EquivalenceProperties::new(schema.arrow_schema().clone()),
|
schema: Arc::new(schema),
|
||||||
Partitioning::UnknownPartitioning(1),
|
});
|
||||||
ExecutionMode::Bounded,
|
|
||||||
);
|
|
||||||
let my_plan = Arc::new(MyExecutionPlan { schema, properties });
|
|
||||||
let df_plan = DfPhysicalPlanAdapter(my_plan);
|
let df_plan = DfPhysicalPlanAdapter(my_plan);
|
||||||
Ok(Arc::new(df_plan))
|
Ok(Arc::new(df_plan))
|
||||||
}
|
}
|
||||||
@@ -281,10 +289,9 @@ mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
struct MyExecutionPlan {
|
struct MyExecutionPlan {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
properties: PlanProperties,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PhysicalPlan for MyExecutionPlan {
|
impl PhysicalPlan for MyExecutionPlan {
|
||||||
@@ -296,8 +303,8 @@ mod test {
|
|||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn properties(&self) -> &PlanProperties {
|
fn output_partitioning(&self) -> Partitioning {
|
||||||
&self.properties
|
Partitioning::UnknownPartitioning(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn children(&self) -> Vec<PhysicalPlanRef> {
|
fn children(&self) -> Vec<PhysicalPlanRef> {
|
||||||
@@ -305,7 +312,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn with_new_children(&self, _children: Vec<PhysicalPlanRef>) -> Result<PhysicalPlanRef> {
|
fn with_new_children(&self, _children: Vec<PhysicalPlanRef>) -> Result<PhysicalPlanRef> {
|
||||||
Ok(Arc::new(self.clone()))
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn execute(
|
fn execute(
|
||||||
@@ -374,7 +381,7 @@ mod test {
|
|||||||
|
|
||||||
let plan = PhysicalPlanAdapter::new(
|
let plan = PhysicalPlanAdapter::new(
|
||||||
Arc::new(Schema::try_from(df_schema.clone()).unwrap()),
|
Arc::new(Schema::try_from(df_schema.clone()).unwrap()),
|
||||||
Arc::new(EmptyExec::new(df_schema.clone())),
|
Arc::new(EmptyExec::new(true, df_schema.clone())),
|
||||||
);
|
);
|
||||||
let _ = plan.df_plan.as_any().downcast_ref::<EmptyExec>().unwrap();
|
let _ = plan.df_plan.as_any().downcast_ref::<EmptyExec>().unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -31,8 +31,6 @@ pub enum TypeSignature {
|
|||||||
// A function such as `array` is `VariadicEqual`
|
// A function such as `array` is `VariadicEqual`
|
||||||
// The first argument decides the type used for coercion
|
// The first argument decides the type used for coercion
|
||||||
VariadicEqual,
|
VariadicEqual,
|
||||||
/// One or more arguments with arbitrary types
|
|
||||||
VariadicAny,
|
|
||||||
/// fixed number of arguments of an arbitrary but equal type out of a list of valid types
|
/// fixed number of arguments of an arbitrary but equal type out of a list of valid types
|
||||||
// A function of one argument of f64 is `Uniform(1, vec![ConcreteDataType::Float64])`
|
// A function of one argument of f64 is `Uniform(1, vec![ConcreteDataType::Float64])`
|
||||||
// A function of one argument of f64 or f32 is `Uniform(1, vec![ConcreteDataType::Float32, ConcreteDataType::Float64])`
|
// A function of one argument of f64 or f32 is `Uniform(1, vec![ConcreteDataType::Float32, ConcreteDataType::Float64])`
|
||||||
@@ -81,15 +79,6 @@ impl Signature {
|
|||||||
volatility,
|
volatility,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// variadic_any - Creates a variadic signature that represents an arbitrary number of arguments of any type.
|
|
||||||
pub fn variadic_any(volatility: Volatility) -> Self {
|
|
||||||
Self {
|
|
||||||
type_signature: TypeSignature::VariadicAny,
|
|
||||||
volatility,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// uniform - Creates a function with a fixed number of arguments of the same type, which must be from valid_types.
|
/// uniform - Creates a function with a fixed number of arguments of the same type, which must be from valid_types.
|
||||||
pub fn uniform(
|
pub fn uniform(
|
||||||
arg_count: usize,
|
arg_count: usize,
|
||||||
@@ -142,7 +131,6 @@ impl From<TypeSignature> for DfTypeSignature {
|
|||||||
TypeSignature::OneOf(ts) => {
|
TypeSignature::OneOf(ts) => {
|
||||||
DfTypeSignature::OneOf(ts.into_iter().map(Into::into).collect())
|
DfTypeSignature::OneOf(ts.into_iter().map(Into::into).collect())
|
||||||
}
|
}
|
||||||
TypeSignature::VariadicAny => DfTypeSignature::VariadicAny,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ workspace = true
|
|||||||
arc-swap = "1.6"
|
arc-swap = "1.6"
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-telemetry.workspace = true
|
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datafusion-common.workspace = true
|
datafusion-common.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ where
|
|||||||
"Trying to cast a RecordBatch into an incompatible schema. RecordBatch: {}, Target: {}",
|
"Trying to cast a RecordBatch into an incompatible schema. RecordBatch: {}, Target: {}",
|
||||||
projected_column.schema(),
|
projected_column.schema(),
|
||||||
projected_schema,
|
projected_schema,
|
||||||
)), None));
|
))));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut columns = Vec::with_capacity(projected_schema.fields.len());
|
let mut columns = Vec::with_capacity(projected_schema.fields.len());
|
||||||
@@ -218,10 +218,6 @@ impl RecordBatchStreamAdapter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RecordBatchStream for RecordBatchStreamAdapter {
|
impl RecordBatchStream for RecordBatchStreamAdapter {
|
||||||
fn name(&self) -> &str {
|
|
||||||
"RecordBatchStreamAdapter"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema(&self) -> SchemaRef {
|
fn schema(&self) -> SchemaRef {
|
||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ use std::any::Any;
|
|||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use datafusion_common::ScalarValue;
|
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
@@ -70,9 +69,8 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to convert {v:?} to Arrow scalar"))]
|
#[snafu(display("Failed to init Recordbatch stream"))]
|
||||||
ToArrowScalar {
|
InitRecordbatchStream {
|
||||||
v: ScalarValue,
|
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: datafusion_common::DataFusionError,
|
error: datafusion_common::DataFusionError,
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -130,7 +128,7 @@ impl ErrorExt for Error {
|
|||||||
| Error::CreateRecordBatches { .. }
|
| Error::CreateRecordBatches { .. }
|
||||||
| Error::PollStream { .. }
|
| Error::PollStream { .. }
|
||||||
| Error::Format { .. }
|
| Error::Format { .. }
|
||||||
| Error::ToArrowScalar { .. }
|
| Error::InitRecordbatchStream { .. }
|
||||||
| Error::ColumnNotExists { .. }
|
| Error::ColumnNotExists { .. }
|
||||||
| Error::ProjectArrowRecordBatch { .. }
|
| Error::ProjectArrowRecordBatch { .. }
|
||||||
| Error::ArrowCompute { .. } => StatusCode::Internal,
|
| Error::ArrowCompute { .. } => StatusCode::Internal,
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use datafusion_common::ScalarValue;
|
|||||||
use datatypes::vectors::VectorRef;
|
use datatypes::vectors::VectorRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{ArrowComputeSnafu, Result, ToArrowScalarSnafu, UnsupportedOperationSnafu};
|
use crate::error::{ArrowComputeSnafu, Result, UnsupportedOperationSnafu};
|
||||||
|
|
||||||
/// An inplace expr evaluator for simple filter. Only support
|
/// An inplace expr evaluator for simple filter. Only support
|
||||||
/// - `col` `op` `literal`
|
/// - `col` `op` `literal`
|
||||||
@@ -69,10 +69,9 @@ impl SimpleFilterEvaluator {
|
|||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let literal = rhs.to_scalar().ok()?;
|
|
||||||
Some(Self {
|
Some(Self {
|
||||||
column_name: lhs.name.clone(),
|
column_name: lhs.name.clone(),
|
||||||
literal,
|
literal: rhs.clone().to_scalar(),
|
||||||
op,
|
op,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -86,10 +85,7 @@ impl SimpleFilterEvaluator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn evaluate_scalar(&self, input: &ScalarValue) -> Result<bool> {
|
pub fn evaluate_scalar(&self, input: &ScalarValue) -> Result<bool> {
|
||||||
let input = input
|
let result = self.evaluate_datum(&input.to_scalar())?;
|
||||||
.to_scalar()
|
|
||||||
.with_context(|_| ToArrowScalarSnafu { v: input.clone() })?;
|
|
||||||
let result = self.evaluate_datum(&input)?;
|
|
||||||
Ok(result.value(0))
|
Ok(result.value(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -37,10 +37,6 @@ pub use recordbatch::RecordBatch;
|
|||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
|
|
||||||
pub trait RecordBatchStream: Stream<Item = Result<RecordBatch>> {
|
pub trait RecordBatchStream: Stream<Item = Result<RecordBatch>> {
|
||||||
fn name(&self) -> &str {
|
|
||||||
"RecordBatchStream"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema(&self) -> SchemaRef;
|
fn schema(&self) -> SchemaRef;
|
||||||
|
|
||||||
fn output_ordering(&self) -> Option<&[OrderOption]>;
|
fn output_ordering(&self) -> Option<&[OrderOption]>;
|
||||||
@@ -247,10 +243,6 @@ impl<S> RecordBatchStreamWrapper<S> {
|
|||||||
impl<S: Stream<Item = Result<RecordBatch>> + Unpin> RecordBatchStream
|
impl<S: Stream<Item = Result<RecordBatch>> + Unpin> RecordBatchStream
|
||||||
for RecordBatchStreamWrapper<S>
|
for RecordBatchStreamWrapper<S>
|
||||||
{
|
{
|
||||||
fn name(&self) -> &str {
|
|
||||||
"RecordBatchStreamWrapper"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema(&self) -> SchemaRef {
|
fn schema(&self) -> SchemaRef {
|
||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,9 +12,7 @@ async-trait.workspace = true
|
|||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
catalog.workspace = true
|
catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-function.workspace = true
|
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-telemetry.workspace = true
|
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datafusion-common.workspace = true
|
datafusion-common.workspace = true
|
||||||
datafusion-expr.workspace = true
|
datafusion-expr.workspace = true
|
||||||
@@ -22,7 +20,6 @@ datafusion-substrait.workspace = true
|
|||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
promql.workspace = true
|
promql.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
session.workspace = true
|
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|
||||||
[dependencies.substrait_proto]
|
[dependencies.substrait_proto]
|
||||||
|
|||||||
@@ -16,24 +16,18 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::{Buf, Bytes, BytesMut};
|
use bytes::{Buf, Bytes, BytesMut};
|
||||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
use datafusion::catalog::CatalogList;
|
||||||
use common_function::scalars::udf::create_udf;
|
|
||||||
use datafusion::catalog::CatalogProviderList;
|
|
||||||
use datafusion::execution::context::SessionState;
|
use datafusion::execution::context::SessionState;
|
||||||
use datafusion::execution::runtime_env::RuntimeEnv;
|
use datafusion::execution::runtime_env::RuntimeEnv;
|
||||||
use datafusion::execution::FunctionRegistry;
|
|
||||||
use datafusion::prelude::{SessionConfig, SessionContext};
|
use datafusion::prelude::{SessionConfig, SessionContext};
|
||||||
use datafusion_expr::LogicalPlan;
|
use datafusion_expr::LogicalPlan;
|
||||||
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
|
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
|
||||||
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
|
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
|
||||||
use datafusion_substrait::substrait::proto::Plan;
|
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use substrait_proto::proto::Plan;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error};
|
||||||
DFInternalSnafu, DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error,
|
|
||||||
};
|
|
||||||
use crate::extension_serializer::ExtensionSerializer;
|
use crate::extension_serializer::ExtensionSerializer;
|
||||||
use crate::SubstraitPlan;
|
use crate::SubstraitPlan;
|
||||||
|
|
||||||
@@ -48,20 +42,17 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
|||||||
async fn decode<B: Buf + Send>(
|
async fn decode<B: Buf + Send>(
|
||||||
&self,
|
&self,
|
||||||
message: B,
|
message: B,
|
||||||
catalog_list: Arc<dyn CatalogProviderList>,
|
catalog_list: Arc<dyn CatalogList>,
|
||||||
mut state: SessionState,
|
catalog: &str,
|
||||||
query_ctx: QueryContextRef,
|
schema: &str,
|
||||||
) -> Result<Self::Plan, Self::Error> {
|
) -> Result<Self::Plan, Self::Error> {
|
||||||
// substrait decoder will look up the UDFs in SessionState, so we need to register them
|
let state_config = SessionConfig::new().with_default_catalog_and_schema(catalog, schema);
|
||||||
for func in FUNCTION_REGISTRY.functions() {
|
let state = SessionState::new_with_config_rt(state_config, Arc::new(RuntimeEnv::default()))
|
||||||
let udf = Arc::new(create_udf(func, query_ctx.clone(), Default::default()).into());
|
.with_serializer_registry(Arc::new(ExtensionSerializer));
|
||||||
state.register_udf(udf).context(DFInternalSnafu)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut context = SessionContext::new_with_state(state);
|
let mut context = SessionContext::new_with_state(state);
|
||||||
context.register_catalog_list(catalog_list);
|
context.register_catalog_list(catalog_list);
|
||||||
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
||||||
let df_plan = from_substrait_plan(&context, &plan)
|
let df_plan = from_substrait_plan(&mut context, &plan)
|
||||||
.await
|
.await
|
||||||
.context(DecodeDfPlanSnafu)?;
|
.context(DecodeDfPlanSnafu)?;
|
||||||
Ok(df_plan)
|
Ok(df_plan)
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use datafusion::execution::registry::SerializerRegistry;
|
|||||||
use datafusion_common::DataFusionError;
|
use datafusion_common::DataFusionError;
|
||||||
use datafusion_expr::UserDefinedLogicalNode;
|
use datafusion_expr::UserDefinedLogicalNode;
|
||||||
use promql::extension_plan::{
|
use promql::extension_plan::{
|
||||||
EmptyMetric, InstantManipulate, RangeManipulate, ScalarCalculate, SeriesDivide, SeriesNormalize,
|
EmptyMetric, InstantManipulate, RangeManipulate, SeriesDivide, SeriesNormalize,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct ExtensionSerializer;
|
pub struct ExtensionSerializer;
|
||||||
@@ -50,13 +50,6 @@ impl SerializerRegistry for ExtensionSerializer {
|
|||||||
.expect("Failed to downcast to RangeManipulate");
|
.expect("Failed to downcast to RangeManipulate");
|
||||||
Ok(range_manipulate.serialize())
|
Ok(range_manipulate.serialize())
|
||||||
}
|
}
|
||||||
name if name == ScalarCalculate::name() => {
|
|
||||||
let scalar_calculate = node
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<ScalarCalculate>()
|
|
||||||
.expect("Failed to downcast to ScalarCalculate");
|
|
||||||
Ok(scalar_calculate.serialize())
|
|
||||||
}
|
|
||||||
name if name == SeriesDivide::name() => {
|
name if name == SeriesDivide::name() => {
|
||||||
let series_divide = node
|
let series_divide = node
|
||||||
.as_any()
|
.as_any()
|
||||||
@@ -99,10 +92,6 @@ impl SerializerRegistry for ExtensionSerializer {
|
|||||||
let series_divide = SeriesDivide::deserialize(bytes)?;
|
let series_divide = SeriesDivide::deserialize(bytes)?;
|
||||||
Ok(Arc::new(series_divide))
|
Ok(Arc::new(series_divide))
|
||||||
}
|
}
|
||||||
name if name == ScalarCalculate::name() => {
|
|
||||||
let scalar_calculate = ScalarCalculate::deserialize(bytes)?;
|
|
||||||
Ok(Arc::new(scalar_calculate))
|
|
||||||
}
|
|
||||||
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
||||||
"EmptyMetric should not be deserialized".to_string(),
|
"EmptyMetric should not be deserialized".to_string(),
|
||||||
)),
|
)),
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user