mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 16:32:54 +00:00
Compare commits
55 Commits
tantivy-po
...
flow_p3_re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f24cca83a1 | ||
|
|
533ae3378f | ||
|
|
843ef050d6 | ||
|
|
96b2871750 | ||
|
|
ea117e1b4c | ||
|
|
e8eb3f6756 | ||
|
|
cce40f6a48 | ||
|
|
874d756dba | ||
|
|
bba3108e0d | ||
|
|
9524ec83bc | ||
|
|
e0b5f52c2a | ||
|
|
1272bc9afc | ||
|
|
df01ac05a1 | ||
|
|
659d34a170 | ||
|
|
62037ee4c8 | ||
|
|
8d229dda98 | ||
|
|
42e7403fcc | ||
|
|
20a933e395 | ||
|
|
b619950c70 | ||
|
|
4685b59ef1 | ||
|
|
86a989517e | ||
|
|
0aaf7621bd | ||
|
|
924c52af7c | ||
|
|
f5e5a89e44 | ||
|
|
778e195f07 | ||
|
|
f764fd5847 | ||
|
|
19a9035f4b | ||
|
|
96c01a3bf0 | ||
|
|
bf21527f18 | ||
|
|
9e1441e48b | ||
|
|
eeb4e26c71 | ||
|
|
7ca0fa52d4 | ||
|
|
443722597b | ||
|
|
d4b814f698 | ||
|
|
d0b2a11f2b | ||
|
|
54432df92f | ||
|
|
8f2ce4abe8 | ||
|
|
d077892e1c | ||
|
|
cfed466fcd | ||
|
|
0c5f4801b7 | ||
|
|
2114b153e7 | ||
|
|
314f2704d4 | ||
|
|
510782261d | ||
|
|
20e8c3d864 | ||
|
|
2a2a44883f | ||
|
|
4248dfcf36 | ||
|
|
64945533dd | ||
|
|
ffc8074556 | ||
|
|
7e56bf250b | ||
|
|
50ae4dc174 | ||
|
|
16aef70089 | ||
|
|
786f43da91 | ||
|
|
3e9bda3267 | ||
|
|
89d58538c7 | ||
|
|
d12379106e |
@@ -22,15 +22,15 @@ inputs:
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -47,7 +47,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -58,7 +58,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -72,5 +72,5 @@ runs:
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ inputs:
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
default: "false"
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -68,7 +68,7 @@ runs:
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
@@ -79,7 +79,7 @@ runs:
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
|
||||
@@ -26,8 +26,6 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
57
.github/workflows/develop.yml
vendored
57
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
@@ -147,8 +147,9 @@ jobs:
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -184,13 +185,13 @@ jobs:
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: /tmp/sqlness-*
|
||||
retention-days: 3
|
||||
|
||||
sqlness-kafka-wal:
|
||||
@@ -214,13 +215,13 @@ jobs:
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/greptime-*.log
|
||||
path: /tmp/sqlness-*
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -314,10 +315,10 @@ jobs:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
@@ -330,20 +331,20 @@ jobs:
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
compat:
|
||||
name: Compatibility Test
|
||||
needs: build
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
mkdir -p ./bins/current
|
||||
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||
# compat:
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-20.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Download pre-built binaries
|
||||
# uses: actions/download-artifact@v4
|
||||
# with:
|
||||
# name: bins
|
||||
# path: .
|
||||
# - name: Unzip binaries
|
||||
# run: |
|
||||
# mkdir -p ./bins/current
|
||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
||||
|
||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -12,7 +12,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
@@ -85,10 +85,10 @@ jobs:
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
|
||||
@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
|
||||
2480
Cargo.lock
generated
2480
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
53
Cargo.toml
53
Cargo.toml
@@ -70,16 +70,24 @@ license = "Apache-2.0"
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
|
||||
[workspace.dependencies]
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||
#
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-ipc = { version = "47.0", features = ["lz4"] }
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-flight = "51.0"
|
||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
|
||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
axum = { version = "0.6", features = ["headers"] }
|
||||
@@ -90,21 +98,24 @@ bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.12"
|
||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "04d78b6e025ceb518040fdd10858c2a9d9345820" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -115,12 +126,12 @@ moka = "0.12"
|
||||
notify = "6.1"
|
||||
num_cpus = "1.16"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
opentelemetry-proto = { version = "0.5", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
] }
|
||||
parquet = "47.0"
|
||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
@@ -144,18 +155,18 @@ serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio = { version = "1.36", features = ["full"] }
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
tonic = { version = "0.11", features = ["tls"] }
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
|
||||
## workspaces members
|
||||
|
||||
4
Makefile
4
Makefile
@@ -54,8 +54,10 @@ ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
@@ -215,37 +215,7 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
| DataType::Int8
|
||||
| DataType::Int16
|
||||
| DataType::Int32
|
||||
| DataType::UInt8
|
||||
| DataType::UInt16
|
||||
| DataType::UInt32
|
||||
| DataType::UInt64
|
||||
| DataType::Float16
|
||||
| DataType::Float32
|
||||
| DataType::Date32
|
||||
| DataType::Date64
|
||||
| DataType::Time32(_)
|
||||
| DataType::Time64(_)
|
||||
| DataType::Duration(_)
|
||||
| DataType::Interval(_)
|
||||
| DataType::Binary
|
||||
| DataType::FixedSizeBinary(_)
|
||||
| DataType::LargeBinary
|
||||
| DataType::LargeUtf8
|
||||
| DataType::List(_)
|
||||
| DataType::FixedSizeList(_, _)
|
||||
| DataType::LargeList(_)
|
||||
| DataType::Struct(_)
|
||||
| DataType::Union(_, _)
|
||||
| DataType::Dictionary(_, _)
|
||||
| DataType::Decimal128(_, _)
|
||||
| DataType::Decimal256(_, _)
|
||||
| DataType::RunEndEncoded(_, _)
|
||||
| DataType::Map(_, _) => todo!(),
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -444,7 +414,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
|
||||
136
docs/how-to/how-to-write-fuzz-tests.md
Normal file
136
docs/how-to/how-to-write-fuzz-tests.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# How to write fuzz tests
|
||||
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
- Integrate with other tests (e.g., e2e)
|
||||
|
||||
## Resources
|
||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
||||
|
||||
### Fundamental components
|
||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
||||
|
||||
### Test targets
|
||||
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
|
||||
|
||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
||||
```
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
|
|
||||
|
|
||||
v
|
||||
Fuzz Test
|
||||
|
||||
```
|
||||
(Figure1: Overview of fuzz tests)
|
||||
|
||||
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
|
||||
|
||||
## How to add a fuzz test target
|
||||
|
||||
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
|
||||
|
||||
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
|
||||
|
||||
```toml
|
||||
[[bin]]
|
||||
name = "<fuzz-target>"
|
||||
path = "targets/<fuzz-target>.rs"
|
||||
test = false
|
||||
bench = false
|
||||
doc = false
|
||||
```
|
||||
|
||||
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
#![no_main]
|
||||
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct FuzzInput {
|
||||
seed: u64,
|
||||
}
|
||||
|
||||
impl Arbitrary<'_> for FuzzInput {
|
||||
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
|
||||
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
|
||||
Ok(FuzzInput { seed })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use snafu::ResultExt;
|
||||
use sqlx::{MySql, Pool};
|
||||
use tests_fuzz::fake::{
|
||||
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
|
||||
MappedGenerator, WordGenerator,
|
||||
};
|
||||
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
|
||||
use tests_fuzz::generator::Generator;
|
||||
use tests_fuzz::ir::CreateTableExpr;
|
||||
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
|
||||
use tests_fuzz::translator::DslTranslator;
|
||||
use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
let create_table_generator = CreateTableExprGeneratorBuilder::default()
|
||||
.name_generator(Box::new(MappedGenerator::new(
|
||||
WordGenerator,
|
||||
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
|
||||
)))
|
||||
.columns(columns)
|
||||
.engine("mito")
|
||||
.if_not_exists(if_not_exists)
|
||||
.build()
|
||||
.unwrap();
|
||||
let ir = create_table_generator.generate(&mut rng);
|
||||
let translator = CreateTableExprTranslator;
|
||||
let sql = translator.translate(&expr).unwrap();
|
||||
mysql.execute(&sql).await
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
5. Run your fuzz test target
|
||||
|
||||
```bash
|
||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
||||
```
|
||||
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
@@ -73,7 +73,7 @@ CREATE TABLE cpu (
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
@@ -249,7 +249,7 @@ CREATE TABLE cpu (
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
46
docs/style-guide.md
Normal file
46
docs/style-guide.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# GreptimeDB Style Guide
|
||||
|
||||
This style guide is intended to help contributors to GreptimeDB write code that is consistent with the rest of the codebase. It is a living document and will be updated as the codebase evolves.
|
||||
|
||||
It's mainly an complement to the [Rust Style Guide](https://pingcap.github.io/style-guide/rust/).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- Formatting
|
||||
- Modules
|
||||
- Comments
|
||||
|
||||
## Formatting
|
||||
|
||||
- Place all `mod` declaration before any `use`.
|
||||
- Use `unimplemented!()` instead of `todo!()` for things that aren't likely to be implemented.
|
||||
- Add an empty line before and after declaration blocks.
|
||||
- Place comment before attributes (`#[]`) and derive (`#[derive]`).
|
||||
|
||||
## Modules
|
||||
|
||||
- Use the file with same name instead of `mod.rs` to define a module. E.g.:
|
||||
|
||||
```
|
||||
.
|
||||
├── cache
|
||||
│ ├── cache_size.rs
|
||||
│ └── write_cache.rs
|
||||
└── cache.rs
|
||||
```
|
||||
|
||||
## Comments
|
||||
|
||||
- Add comments for public functions and structs.
|
||||
- Prefer document comment (`///`) over normal comment (`//`) for structs, fields, functions etc.
|
||||
- Add link (`[]`) to struct, method, or any other reference. And make sure that link works.
|
||||
|
||||
## Error handling
|
||||
|
||||
- Define a custom error type for the module if needed.
|
||||
- Prefer `with_context()` over `context()` when allocation is needed to construct an error.
|
||||
- Use `error!()` or `warn!()` macros in the `common_telemetry` crate to log errors. E.g.:
|
||||
|
||||
```rust
|
||||
error!(e; "Failed to do something");
|
||||
```
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-12-19"
|
||||
channel = "nightly-2024-04-18"
|
||||
|
||||
@@ -21,6 +21,7 @@ pub mod prom_store {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod region;
|
||||
pub mod v1;
|
||||
|
||||
pub use greptime_proto;
|
||||
|
||||
42
src/api/src/region.rs
Normal file
42
src/api/src/region.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_base::AffectedRows;
|
||||
use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
|
||||
/// This result struct is derived from [RegionResponseV1]
|
||||
#[derive(Debug)]
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extension: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extension: region_response.extension,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response without extension
|
||||
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||
Self {
|
||||
affected_rows,
|
||||
extension: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
|
||||
|
||||
impl MockUserProvider {
|
||||
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
|
||||
self.catalog = info.catalog.to_owned();
|
||||
self.schema = info.schema.to_owned();
|
||||
self.username = info.username.to_owned();
|
||||
info.catalog.clone_into(&mut self.catalog);
|
||||
info.schema.clone_into(&mut self.schema);
|
||||
info.username.clone_into(&mut self.username);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -109,11 +109,7 @@ impl Predicate {
|
||||
};
|
||||
}
|
||||
Predicate::Not(p) => {
|
||||
let Some(b) = p.eval(row) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
return Some(!b);
|
||||
return Some(!p.eval(row)?);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,13 +121,7 @@ impl Predicate {
|
||||
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
||||
match expr {
|
||||
// NOT expr
|
||||
DfExpr::Not(expr) => {
|
||||
let Some(p) = Self::from_expr(*expr) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
Some(Predicate::Not(Box::new(p)))
|
||||
}
|
||||
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||
// expr LIKE pattern
|
||||
DfExpr::Like(Like {
|
||||
negated,
|
||||
@@ -178,25 +168,15 @@ impl Predicate {
|
||||
}
|
||||
// left AND right
|
||||
(left, Operator::And, right) => {
|
||||
let Some(left) = Self::from_expr(left) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let Some(right) = Self::from_expr(right) else {
|
||||
return None;
|
||||
};
|
||||
let left = Self::from_expr(left)?;
|
||||
let right = Self::from_expr(right)?;
|
||||
|
||||
Some(Predicate::And(Box::new(left), Box::new(right)))
|
||||
}
|
||||
// left OR right
|
||||
(left, Operator::Or, right) => {
|
||||
let Some(left) = Self::from_expr(left) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let Some(right) = Self::from_expr(right) else {
|
||||
return None;
|
||||
};
|
||||
let left = Self::from_expr(left)?;
|
||||
let right = Self::from_expr(right)?;
|
||||
|
||||
Some(Predicate::Or(Box::new(left), Box::new(right)))
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::fmt::Debug;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use std::usize;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
@@ -506,32 +505,32 @@ mod tests {
|
||||
}
|
||||
|
||||
async fn range(&self, _req: RangeRequest) -> Result<RangeResponse, Self::Error> {
|
||||
todo!()
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
|
||||
todo!()
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
_req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||
todo!()
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
_req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse, Self::Error> {
|
||||
todo!()
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn batch_delete(
|
||||
&self,
|
||||
_req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse, Self::Error> {
|
||||
todo!()
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -49,10 +49,7 @@ impl DfTableSourceProvider {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolve_table_ref<'a>(
|
||||
&'a self,
|
||||
table_ref: TableReference<'a>,
|
||||
) -> Result<ResolvedTableReference<'a>> {
|
||||
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
|
||||
if self.disallow_cross_catalog_query {
|
||||
match &table_ref {
|
||||
TableReference::Bare { .. } => (),
|
||||
@@ -76,7 +73,7 @@ impl DfTableSourceProvider {
|
||||
|
||||
pub async fn resolve_table(
|
||||
&mut self,
|
||||
table_ref: TableReference<'_>,
|
||||
table_ref: TableReference,
|
||||
) -> Result<Arc<dyn TableSource>> {
|
||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||
|
||||
@@ -106,8 +103,6 @@ impl DfTableSourceProvider {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
@@ -120,68 +115,37 @@ mod tests {
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
||||
|
||||
let table_ref = TableReference::Bare {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let table_ref = TableReference::partial("public", "table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("wrong_schema"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let table_ref = TableReference::partial("wrong_schema", "table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let table_ref = TableReference::full("greptime", "public", "table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("wrong_catalog"),
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let table_ref = TableReference::full("wrong_catalog", "public", "table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let table_ref = TableReference::partial("information_schema", "columns");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let table_ref = TableReference::full("greptime", "information_schema", "columns");
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("dummy"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let table_ref = TableReference::full("dummy", "information_schema", "columns");
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_err());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("greptime_private"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
use api::v1::ResponseHeader;
|
||||
use arc_swap::ArcSwapOption;
|
||||
@@ -23,7 +24,7 @@ use async_trait::async_trait;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::datanode_manager::{Datanode, HandleResponse};
|
||||
use common_meta::datanode_manager::Datanode;
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
@@ -46,7 +47,7 @@ pub struct RegionRequester {
|
||||
|
||||
#[async_trait]
|
||||
impl Datanode for RegionRequester {
|
||||
async fn handle(&self, request: RegionRequest) -> MetaResult<HandleResponse> {
|
||||
async fn handle(&self, request: RegionRequest) -> MetaResult<RegionResponse> {
|
||||
self.handle_inner(request).await.map_err(|err| {
|
||||
if err.should_retry() {
|
||||
meta_error::Error::RetryLater {
|
||||
@@ -165,7 +166,7 @@ impl RegionRequester {
|
||||
Ok(Box::pin(record_batch_stream))
|
||||
}
|
||||
|
||||
async fn handle_inner(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle_inner(&self, request: RegionRequest) -> Result<RegionResponse> {
|
||||
let request_type = request
|
||||
.body
|
||||
.as_ref()
|
||||
@@ -194,10 +195,10 @@ impl RegionRequester {
|
||||
|
||||
check_response_header(&response.header)?;
|
||||
|
||||
Ok(HandleResponse::from_region_response(response))
|
||||
Ok(RegionResponse::from_region_response(response))
|
||||
}
|
||||
|
||||
pub async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||
pub async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
|
||||
self.handle_inner(request).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ common-telemetry = { workspace = true, features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
config = "0.13"
|
||||
datanode.workspace = true
|
||||
|
||||
@@ -22,6 +22,7 @@ use cmd::options::{CliOptions, Options};
|
||||
use cmd::{
|
||||
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
||||
};
|
||||
use common_version::{short_version, version};
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
@@ -105,7 +106,8 @@ async fn main() -> Result<()> {
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
|
||||
let cli = greptimedb_cli();
|
||||
let version = version!();
|
||||
let cli = greptimedb_cli().version(version);
|
||||
|
||||
let cli = SubCommand::augment_subcommands(cli);
|
||||
|
||||
@@ -129,7 +131,7 @@ async fn main() -> Result<()> {
|
||||
opts.node_id(),
|
||||
);
|
||||
|
||||
log_versions();
|
||||
log_versions(version, short_version!());
|
||||
|
||||
let app = subcmd.build(opts).await?;
|
||||
|
||||
|
||||
@@ -84,10 +84,10 @@ impl Command {
|
||||
let mut logging_opts = LoggingOptions::default();
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
logging_opts.dir = dir.clone();
|
||||
logging_opts.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
logging_opts.level = cli_options.log_level.clone();
|
||||
logging_opts.level.clone_from(&cli_options.log_level);
|
||||
|
||||
Ok(Options::Cli(Box::new(logging_opts)))
|
||||
}
|
||||
|
||||
@@ -107,14 +107,11 @@ impl TableMetadataBencher {
|
||||
.unwrap();
|
||||
let start = Instant::now();
|
||||
let table_info = table_info.unwrap();
|
||||
let table_route = table_route.unwrap();
|
||||
let table_id = table_info.table_info.ident.table_id;
|
||||
let _ = self
|
||||
.table_metadata_manager
|
||||
.delete_table_metadata(
|
||||
table_id,
|
||||
&table_info.table_name(),
|
||||
table_route.unwrap().region_routes().unwrap(),
|
||||
)
|
||||
.delete_table_metadata(table_id, &table_info.table_name(), &table_route)
|
||||
.await;
|
||||
start.elapsed()
|
||||
},
|
||||
|
||||
@@ -492,9 +492,7 @@ mod tests {
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 1
|
||||
);
|
||||
;
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
|
||||
@@ -192,10 +192,10 @@ impl MigrateTableMetadata {
|
||||
let key = v1SchemaKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok((key, ()))
|
||||
Ok(key)
|
||||
}),
|
||||
);
|
||||
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_schema_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
@@ -244,10 +244,10 @@ impl MigrateTableMetadata {
|
||||
let key = v1CatalogKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok((key, ()))
|
||||
Ok(key)
|
||||
}),
|
||||
);
|
||||
while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_catalog_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
|
||||
@@ -139,19 +139,19 @@ impl StartCommand {
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.rpc_addr = addr.clone();
|
||||
opts.rpc_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if self.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||
opts.rpc_hostname.clone_from(&self.rpc_hostname);
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
@@ -161,7 +161,8 @@ impl StartCommand {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
@@ -173,7 +174,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
// `wal_dir` only affects raft-engine config.
|
||||
@@ -191,7 +192,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr = http_addr.clone();
|
||||
opts.http.addr.clone_from(http_addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
|
||||
@@ -157,11 +157,11 @@ impl StartCommand {
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -171,7 +171,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
opts.http.addr = addr.clone()
|
||||
opts.http.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
@@ -183,24 +183,24 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr = addr.clone()
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr = addr.clone();
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr = addr.clone();
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr = addr.clone();
|
||||
opts.opentsdb.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
@@ -210,11 +210,12 @@ impl StartCommand {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
}
|
||||
|
||||
@@ -64,26 +64,23 @@ pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn log_versions() {
|
||||
/// Log the versions of the application, and the arguments passed to the cli.
|
||||
/// `version_string` should be the same as the output of cli "--version";
|
||||
/// and the `app_version` is the short version of the codes, often consist of git branch and commit.
|
||||
pub fn log_versions(version_string: &str, app_version: &str) {
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[short_version(), full_version()])
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), app_version])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
info!(
|
||||
"short_version: {}, full_version: {}",
|
||||
short_version(),
|
||||
full_version()
|
||||
);
|
||||
info!("GreptimeDB version: {}", version_string);
|
||||
|
||||
log_env_flags();
|
||||
}
|
||||
|
||||
pub fn greptimedb_cli() -> clap::Command {
|
||||
let cmd = clap::Command::new("greptimedb")
|
||||
.version(print_version())
|
||||
.subcommand_required(true);
|
||||
let cmd = clap::Command::new("greptimedb").subcommand_required(true);
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
|
||||
@@ -91,35 +88,6 @@ pub fn greptimedb_cli() -> clap::Command {
|
||||
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
|
||||
}
|
||||
|
||||
fn print_version() -> &'static str {
|
||||
concat!(
|
||||
"\nbranch: ",
|
||||
env!("GIT_BRANCH"),
|
||||
"\ncommit: ",
|
||||
env!("GIT_COMMIT"),
|
||||
"\ndirty: ",
|
||||
env!("GIT_DIRTY"),
|
||||
"\nversion: ",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)
|
||||
}
|
||||
|
||||
fn short_version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
// {app_name}-{branch_name}-{commit_short}
|
||||
// The branch name (tag) of a release build should already contain the short
|
||||
// version so the full version doesn't concat the short version explicitly.
|
||||
fn full_version() -> &'static str {
|
||||
concat!(
|
||||
"greptimedb-",
|
||||
env!("GIT_BRANCH"),
|
||||
"-",
|
||||
env!("GIT_COMMIT_SHORT")
|
||||
)
|
||||
}
|
||||
|
||||
fn log_env_flags() {
|
||||
info!("command line arguments");
|
||||
for argument in std::env::args() {
|
||||
|
||||
@@ -134,23 +134,23 @@ impl StartCommand {
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
opts.bind_addr = addr.clone();
|
||||
opts.bind_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.server_addr {
|
||||
opts.server_addr = addr.clone();
|
||||
opts.server_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.store_addr {
|
||||
opts.store_addr = addr.clone();
|
||||
opts.store_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(selector_type) = &self.selector {
|
||||
@@ -168,7 +168,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr = http_addr.clone();
|
||||
opts.http.addr.clone_from(http_addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
@@ -176,11 +176,11 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.data_home = data_home.clone();
|
||||
opts.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
if !self.store_key_prefix.is_empty() {
|
||||
opts.store_key_prefix = self.store_key_prefix.clone()
|
||||
opts.store_key_prefix.clone_from(&self.store_key_prefix)
|
||||
}
|
||||
|
||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||
|
||||
@@ -293,11 +293,11 @@ impl StartCommand {
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
opts.logging.level.clone_from(&cli_options.log_level);
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -307,11 +307,11 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
opts.http.addr = addr.clone()
|
||||
opts.http.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
@@ -325,31 +325,31 @@ impl StartCommand {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
opts.grpc.addr = addr.clone()
|
||||
opts.grpc.addr.clone_from(addr)
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr = addr.clone();
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr = addr.clone();
|
||||
opts.postgres.addr.clone_from(addr);
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr = addr.clone();
|
||||
opts.opentsdb.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
|
||||
let metadata_store = opts.metadata_store.clone();
|
||||
let procedure = opts.procedure.clone();
|
||||
|
||||
@@ -30,7 +30,7 @@ derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store.workspace = true
|
||||
orc-rust = "0.2"
|
||||
orc-rust = { git = "https://github.com/MichaelScofield/orc-rs.git", rev = "17347f5f084ac937863317df882218055c4ea8c1" }
|
||||
parquet.workspace = true
|
||||
paste = "1.0"
|
||||
regex = "1.7"
|
||||
|
||||
@@ -117,7 +117,7 @@ impl CsvConfig {
|
||||
let mut builder = csv::ReaderBuilder::new(self.file_schema.clone())
|
||||
.with_delimiter(self.delimiter)
|
||||
.with_batch_size(self.batch_size)
|
||||
.has_header(self.has_header);
|
||||
.with_header(self.has_header);
|
||||
|
||||
if let Some(proj) = &self.file_projection {
|
||||
builder = builder.with_projection(proj.clone());
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::vec;
|
||||
|
||||
use common_test_util::find_workspace_path;
|
||||
use datafusion::assert_batches_eq;
|
||||
use datafusion::config::TableParquetOptions;
|
||||
use datafusion::datasource::physical_plan::{FileOpener, FileScanConfig, FileStream, ParquetExec};
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
@@ -166,7 +167,7 @@ async fn test_parquet_exec() {
|
||||
.to_string();
|
||||
let base_config = scan_config(schema.clone(), None, path);
|
||||
|
||||
let exec = ParquetExec::new(base_config, None, None)
|
||||
let exec = ParquetExec::new(base_config, None, None, TableParquetOptions::default())
|
||||
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datafusion::common::Statistics;
|
||||
use datafusion::datasource::listing::PartitionedFile;
|
||||
use datafusion::datasource::object_store::ObjectStoreUrl;
|
||||
use datafusion::datasource::physical_plan::{FileScanConfig, FileStream};
|
||||
@@ -72,17 +73,16 @@ pub fn test_basic_schema() -> SchemaRef {
|
||||
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
|
||||
// object_store only recognize the Unix style path, so make it happy.
|
||||
let filename = &filename.replace('\\', "/");
|
||||
|
||||
let statistics = Statistics::new_unknown(file_schema.as_ref());
|
||||
FileScanConfig {
|
||||
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
|
||||
file_schema,
|
||||
file_groups: vec![vec![PartitionedFile::new(filename.to_string(), 10)]],
|
||||
statistics: Default::default(),
|
||||
statistics,
|
||||
projection: None,
|
||||
limit,
|
||||
table_partition_cols: vec![],
|
||||
output_ordering: vec![],
|
||||
infinite_source: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@ pub enum StatusCode {
|
||||
RegionNotFound = 4005,
|
||||
RegionAlreadyExists = 4006,
|
||||
RegionReadonly = 4007,
|
||||
/// Region is not in a proper state to handle specific request.
|
||||
RegionNotReady = 4008,
|
||||
// If mutually exclusive operations are reached at the same time,
|
||||
// only one can be executed, another one will get region busy.
|
||||
|
||||
@@ -56,7 +56,7 @@ where
|
||||
.map(|&n| n.into())
|
||||
.collect::<Vec<Value>>();
|
||||
Ok(vec![Value::List(ListValue::new(
|
||||
Some(Box::new(nums)),
|
||||
nums,
|
||||
I::LogicalType::build_data_type(),
|
||||
))])
|
||||
}
|
||||
@@ -120,10 +120,7 @@ where
|
||||
O::from_native(native).into()
|
||||
})
|
||||
.collect::<Vec<Value>>();
|
||||
let diff = Value::List(ListValue::new(
|
||||
Some(Box::new(diff)),
|
||||
O::LogicalType::build_data_type(),
|
||||
));
|
||||
let diff = Value::List(ListValue::new(diff, O::LogicalType::build_data_type()));
|
||||
Ok(diff)
|
||||
}
|
||||
}
|
||||
@@ -218,10 +215,7 @@ mod test {
|
||||
let values = vec![Value::from(2_i64), Value::from(1_i64)];
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(values)),
|
||||
ConcreteDataType::int64_datatype()
|
||||
)),
|
||||
Value::List(ListValue::new(values, ConcreteDataType::int64_datatype())),
|
||||
diff.evaluate().unwrap()
|
||||
);
|
||||
|
||||
@@ -236,10 +230,7 @@ mod test {
|
||||
let values = vec![Value::from(5_i64), Value::from(1_i64)];
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(values)),
|
||||
ConcreteDataType::int64_datatype()
|
||||
)),
|
||||
Value::List(ListValue::new(values, ConcreteDataType::int64_datatype())),
|
||||
diff.evaluate().unwrap()
|
||||
);
|
||||
|
||||
@@ -252,10 +243,7 @@ mod test {
|
||||
let values = vec![Value::from(0_i64), Value::from(0_i64), Value::from(0_i64)];
|
||||
diff.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(values)),
|
||||
ConcreteDataType::int64_datatype()
|
||||
)),
|
||||
Value::List(ListValue::new(values, ConcreteDataType::int64_datatype())),
|
||||
diff.evaluate().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -104,10 +104,7 @@ where
|
||||
.map(|&n| n.into())
|
||||
.collect::<Vec<Value>>();
|
||||
Ok(vec![
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(nums)),
|
||||
T::LogicalType::build_data_type(),
|
||||
)),
|
||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
||||
self.p.into(),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -72,10 +72,7 @@ where
|
||||
.map(|&n| n.into())
|
||||
.collect::<Vec<Value>>();
|
||||
Ok(vec![
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(nums)),
|
||||
T::LogicalType::build_data_type(),
|
||||
)),
|
||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
||||
self.x.into(),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -56,10 +56,7 @@ where
|
||||
.map(|&x| x.into())
|
||||
.collect::<Vec<Value>>();
|
||||
Ok(vec![
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(nums)),
|
||||
T::LogicalType::build_data_type(),
|
||||
)),
|
||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
||||
self.x.into(),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -56,10 +56,7 @@ where
|
||||
.map(|&x| x.into())
|
||||
.collect::<Vec<Value>>();
|
||||
Ok(vec![
|
||||
Value::List(ListValue::new(
|
||||
Some(Box::new(nums)),
|
||||
T::LogicalType::build_data_type(),
|
||||
)),
|
||||
Value::List(ListValue::new(nums, T::LogicalType::build_data_type())),
|
||||
self.x.into(),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ impl Function for RangeFunction {
|
||||
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
|
||||
/// In fact, the arguments loaded by `range_fn` are very complicated, and it is difficult to use `Signature` to describe
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::any(0, Volatility::Immutable)
|
||||
Signature::variadic_any(Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod channel_manager;
|
||||
pub mod error;
|
||||
pub mod flight;
|
||||
pub mod precision;
|
||||
pub mod select;
|
||||
pub mod writer;
|
||||
|
||||
pub use error::Error;
|
||||
|
||||
141
src/common/grpc/src/precision.rs
Normal file
141
src/common/grpc/src/precision.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_time::timestamp::TimeUnit;
|
||||
|
||||
use crate::Error;
|
||||
|
||||
/// Precision represents the precision of a timestamp.
|
||||
/// It is used to convert timestamps between different precisions.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Precision {
|
||||
Nanosecond,
|
||||
Microsecond,
|
||||
Millisecond,
|
||||
Second,
|
||||
Minute,
|
||||
Hour,
|
||||
}
|
||||
|
||||
impl Precision {
|
||||
pub fn to_nanos(&self, amount: i64) -> Option<i64> {
|
||||
match self {
|
||||
Precision::Nanosecond => Some(amount),
|
||||
Precision::Microsecond => amount.checked_mul(1_000),
|
||||
Precision::Millisecond => amount.checked_mul(1_000_000),
|
||||
Precision::Second => amount.checked_mul(1_000_000_000),
|
||||
Precision::Minute => amount
|
||||
.checked_mul(60)
|
||||
.and_then(|a| a.checked_mul(1_000_000_000)),
|
||||
Precision::Hour => amount
|
||||
.checked_mul(3600)
|
||||
.and_then(|a| a.checked_mul(1_000_000_000)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_millis(&self, amount: i64) -> Option<i64> {
|
||||
match self {
|
||||
Precision::Nanosecond => amount.checked_div(1_000_000),
|
||||
Precision::Microsecond => amount.checked_div(1_000),
|
||||
Precision::Millisecond => Some(amount),
|
||||
Precision::Second => amount.checked_mul(1_000),
|
||||
Precision::Minute => amount.checked_mul(60_000),
|
||||
Precision::Hour => amount.checked_mul(3_600_000),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Precision {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Precision::Nanosecond => write!(f, "Precision::Nanosecond"),
|
||||
Precision::Microsecond => write!(f, "Precision::Microsecond"),
|
||||
Precision::Millisecond => write!(f, "Precision::Millisecond"),
|
||||
Precision::Second => write!(f, "Precision::Second"),
|
||||
Precision::Minute => write!(f, "Precision::Minute"),
|
||||
Precision::Hour => write!(f, "Precision::Hour"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Precision> for TimeUnit {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(precision: Precision) -> Result<Self, Self::Error> {
|
||||
Ok(match precision {
|
||||
Precision::Second => TimeUnit::Second,
|
||||
Precision::Millisecond => TimeUnit::Millisecond,
|
||||
Precision::Microsecond => TimeUnit::Microsecond,
|
||||
Precision::Nanosecond => TimeUnit::Nanosecond,
|
||||
_ => {
|
||||
return Err(Error::NotSupported {
|
||||
feat: format!("convert {precision} into TimeUnit"),
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::precision::Precision;
|
||||
|
||||
#[test]
|
||||
fn test_to_nanos() {
|
||||
assert_eq!(Precision::Nanosecond.to_nanos(1).unwrap(), 1);
|
||||
assert_eq!(Precision::Microsecond.to_nanos(1).unwrap(), 1_000);
|
||||
assert_eq!(Precision::Millisecond.to_nanos(1).unwrap(), 1_000_000);
|
||||
assert_eq!(Precision::Second.to_nanos(1).unwrap(), 1_000_000_000);
|
||||
assert_eq!(Precision::Minute.to_nanos(1).unwrap(), 60 * 1_000_000_000);
|
||||
assert_eq!(
|
||||
Precision::Hour.to_nanos(1).unwrap(),
|
||||
60 * 60 * 1_000_000_000
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_millis() {
|
||||
assert_eq!(Precision::Nanosecond.to_millis(1_000_000).unwrap(), 1);
|
||||
assert_eq!(Precision::Microsecond.to_millis(1_000).unwrap(), 1);
|
||||
assert_eq!(Precision::Millisecond.to_millis(1).unwrap(), 1);
|
||||
assert_eq!(Precision::Second.to_millis(1).unwrap(), 1_000);
|
||||
assert_eq!(Precision::Minute.to_millis(1).unwrap(), 60 * 1_000);
|
||||
assert_eq!(Precision::Hour.to_millis(1).unwrap(), 60 * 60 * 1_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_nanos_basic() {
|
||||
assert_eq!(Precision::Second.to_nanos(1), Some(1_000_000_000));
|
||||
assert_eq!(Precision::Minute.to_nanos(1), Some(60 * 1_000_000_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_millis_basic() {
|
||||
assert_eq!(Precision::Second.to_millis(1), Some(1_000));
|
||||
assert_eq!(Precision::Minute.to_millis(1), Some(60_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_nanos_overflow() {
|
||||
assert_eq!(Precision::Hour.to_nanos(i64::MAX / 100), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_input() {
|
||||
assert_eq!(Precision::Second.to_nanos(0), Some(0));
|
||||
assert_eq!(Precision::Minute.to_millis(0), Some(0));
|
||||
}
|
||||
}
|
||||
@@ -1,441 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use api::helper::values_with_capacity;
|
||||
use api::v1::{Column, ColumnDataType, ColumnDataTypeExtension, SemanticType};
|
||||
use common_base::BitVec;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::{Result, TypeMismatchSnafu};
|
||||
use crate::Error;
|
||||
|
||||
type ColumnName = String;
|
||||
|
||||
type RowCount = u32;
|
||||
|
||||
// TODO(fys): will remove in the future.
|
||||
#[derive(Default)]
|
||||
pub struct LinesWriter {
|
||||
column_name_index: HashMap<ColumnName, usize>,
|
||||
null_masks: Vec<BitVec>,
|
||||
batch: (Vec<Column>, RowCount),
|
||||
lines: usize,
|
||||
}
|
||||
|
||||
impl LinesWriter {
|
||||
pub fn with_lines(lines: usize) -> Self {
|
||||
Self {
|
||||
lines,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_ts(&mut self, column_name: &str, value: (i64, Precision)) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::TimestampMillisecond,
|
||||
SemanticType::Timestamp,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::TimestampMillisecond as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "timestamp",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values
|
||||
.timestamp_millisecond_values
|
||||
.push(to_ms_ts(value.1, value.0));
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_tag(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag, None);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::String as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "string",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values.string_values.push(value.to_string());
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_u64(&mut self, column_name: &str, value: u64) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Uint64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Uint64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "u64",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values.u64_values.push(value);
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_i64(&mut self, column_name: &str, value: i64) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Int64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Int64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "i64",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values.i64_values.push(value);
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_f64(&mut self, column_name: &str, value: f64) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Float64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Float64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "f64",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values.f64_values.push(value);
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_string(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::String,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::String as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "string",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values.string_values.push(value.to_string());
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_bool(&mut self, column_name: &str, value: bool) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Boolean,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Boolean as i32,
|
||||
TypeMismatchSnafu {
|
||||
column_name,
|
||||
expected: "boolean",
|
||||
actual: format!("{:?}", column.datatype)
|
||||
}
|
||||
);
|
||||
// It is safe to use unwrap here, because values has been initialized in mut_column()
|
||||
let values = column.values.as_mut().unwrap();
|
||||
values.bool_values.push(value);
|
||||
self.null_masks[idx].push(false);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn commit(&mut self) {
|
||||
let batch = &mut self.batch;
|
||||
batch.1 += 1;
|
||||
|
||||
for i in 0..batch.0.len() {
|
||||
let null_mask = &mut self.null_masks[i];
|
||||
if batch.1 as usize > null_mask.len() {
|
||||
null_mask.push(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish(mut self) -> (Vec<Column>, RowCount) {
|
||||
let null_masks = self.null_masks;
|
||||
for (i, null_mask) in null_masks.into_iter().enumerate() {
|
||||
let columns = &mut self.batch.0;
|
||||
columns[i].null_mask = null_mask.into_vec();
|
||||
}
|
||||
self.batch
|
||||
}
|
||||
|
||||
fn mut_column(
|
||||
&mut self,
|
||||
column_name: &str,
|
||||
datatype: ColumnDataType,
|
||||
semantic_type: SemanticType,
|
||||
datatype_extension: Option<ColumnDataTypeExtension>,
|
||||
) -> (usize, &mut Column) {
|
||||
let column_names = &mut self.column_name_index;
|
||||
let column_idx = match column_names.get(column_name) {
|
||||
Some(i) => *i,
|
||||
None => {
|
||||
let new_idx = column_names.len();
|
||||
let batch = &mut self.batch;
|
||||
let to_insert = self.lines;
|
||||
let mut null_mask = BitVec::with_capacity(to_insert);
|
||||
null_mask.extend(BitVec::repeat(true, batch.1 as usize));
|
||||
self.null_masks.push(null_mask);
|
||||
batch.0.push(Column {
|
||||
column_name: column_name.to_string(),
|
||||
semantic_type: semantic_type.into(),
|
||||
values: Some(values_with_capacity(datatype, to_insert)),
|
||||
datatype: datatype as i32,
|
||||
null_mask: Vec::default(),
|
||||
datatype_extension,
|
||||
});
|
||||
let _ = column_names.insert(column_name.to_string(), new_idx);
|
||||
new_idx
|
||||
}
|
||||
};
|
||||
(column_idx, &mut self.batch.0[column_idx])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_ms_ts(p: Precision, ts: i64) -> i64 {
|
||||
match p {
|
||||
Precision::Nanosecond => ts / 1_000_000,
|
||||
Precision::Microsecond => ts / 1000,
|
||||
Precision::Millisecond => ts,
|
||||
Precision::Second => ts * 1000,
|
||||
Precision::Minute => ts * 1000 * 60,
|
||||
Precision::Hour => ts * 1000 * 60 * 60,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Precision {
|
||||
Nanosecond,
|
||||
Microsecond,
|
||||
Millisecond,
|
||||
Second,
|
||||
Minute,
|
||||
Hour,
|
||||
}
|
||||
|
||||
impl Display for Precision {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Precision::Nanosecond => write!(f, "Precision::Nanosecond"),
|
||||
Precision::Microsecond => write!(f, "Precision::Microsecond"),
|
||||
Precision::Millisecond => write!(f, "Precision::Millisecond"),
|
||||
Precision::Second => write!(f, "Precision::Second"),
|
||||
Precision::Minute => write!(f, "Precision::Minute"),
|
||||
Precision::Hour => write!(f, "Precision::Hour"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Precision> for TimeUnit {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(precision: Precision) -> std::result::Result<Self, Self::Error> {
|
||||
Ok(match precision {
|
||||
Precision::Second => TimeUnit::Second,
|
||||
Precision::Millisecond => TimeUnit::Millisecond,
|
||||
Precision::Microsecond => TimeUnit::Microsecond,
|
||||
Precision::Nanosecond => TimeUnit::Nanosecond,
|
||||
_ => {
|
||||
return Err(Error::NotSupported {
|
||||
feat: format!("convert {precision} into TimeUnit"),
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_base::BitVec;
|
||||
|
||||
use super::LinesWriter;
|
||||
use crate::writer::{to_ms_ts, Precision};
|
||||
|
||||
#[test]
|
||||
fn test_lines_writer() {
|
||||
let mut writer = LinesWriter::with_lines(3);
|
||||
|
||||
writer.write_tag("host", "host1").unwrap();
|
||||
writer.write_f64("cpu", 0.5).unwrap();
|
||||
writer.write_f64("memory", 0.4).unwrap();
|
||||
writer.write_string("name", "name1").unwrap();
|
||||
writer
|
||||
.write_ts("ts", (101011000, Precision::Millisecond))
|
||||
.unwrap();
|
||||
writer.commit();
|
||||
|
||||
writer.write_tag("host", "host2").unwrap();
|
||||
writer
|
||||
.write_ts("ts", (102011001, Precision::Millisecond))
|
||||
.unwrap();
|
||||
writer.write_bool("enable_reboot", true).unwrap();
|
||||
writer.write_u64("year_of_service", 2).unwrap();
|
||||
writer.write_i64("temperature", 4).unwrap();
|
||||
writer.commit();
|
||||
|
||||
writer.write_tag("host", "host3").unwrap();
|
||||
writer.write_f64("cpu", 0.4).unwrap();
|
||||
writer.write_u64("cpu_core_num", 16).unwrap();
|
||||
writer
|
||||
.write_ts("ts", (103011002, Precision::Millisecond))
|
||||
.unwrap();
|
||||
writer.commit();
|
||||
|
||||
let insert_batch = writer.finish();
|
||||
assert_eq!(3, insert_batch.1);
|
||||
|
||||
let columns = insert_batch.0;
|
||||
assert_eq!(9, columns.len());
|
||||
|
||||
let column = &columns[0];
|
||||
assert_eq!("host", columns[0].column_name);
|
||||
assert_eq!(ColumnDataType::String as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Tag as i32, column.semantic_type);
|
||||
assert_eq!(
|
||||
vec!["host1", "host2", "host3"],
|
||||
column.values.as_ref().unwrap().string_values
|
||||
);
|
||||
verify_null_mask(&column.null_mask, vec![false, false, false]);
|
||||
|
||||
let column = &columns[1];
|
||||
assert_eq!("cpu", column.column_name);
|
||||
assert_eq!(ColumnDataType::Float64 as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec![0.5, 0.4], column.values.as_ref().unwrap().f64_values);
|
||||
verify_null_mask(&column.null_mask, vec![false, true, false]);
|
||||
|
||||
let column = &columns[2];
|
||||
assert_eq!("memory", column.column_name);
|
||||
assert_eq!(ColumnDataType::Float64 as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec![0.4], column.values.as_ref().unwrap().f64_values);
|
||||
verify_null_mask(&column.null_mask, vec![false, true, true]);
|
||||
|
||||
let column = &columns[3];
|
||||
assert_eq!("name", column.column_name);
|
||||
assert_eq!(ColumnDataType::String as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec!["name1"], column.values.as_ref().unwrap().string_values);
|
||||
verify_null_mask(&column.null_mask, vec![false, true, true]);
|
||||
|
||||
let column = &columns[4];
|
||||
assert_eq!("ts", column.column_name);
|
||||
assert_eq!(ColumnDataType::TimestampMillisecond as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Timestamp as i32, column.semantic_type);
|
||||
assert_eq!(
|
||||
vec![101011000, 102011001, 103011002],
|
||||
column.values.as_ref().unwrap().timestamp_millisecond_values
|
||||
);
|
||||
verify_null_mask(&column.null_mask, vec![false, false, false]);
|
||||
|
||||
let column = &columns[5];
|
||||
assert_eq!("enable_reboot", column.column_name);
|
||||
assert_eq!(ColumnDataType::Boolean as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec![true], column.values.as_ref().unwrap().bool_values);
|
||||
verify_null_mask(&column.null_mask, vec![true, false, true]);
|
||||
|
||||
let column = &columns[6];
|
||||
assert_eq!("year_of_service", column.column_name);
|
||||
assert_eq!(ColumnDataType::Uint64 as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec![2], column.values.as_ref().unwrap().u64_values);
|
||||
verify_null_mask(&column.null_mask, vec![true, false, true]);
|
||||
|
||||
let column = &columns[7];
|
||||
assert_eq!("temperature", column.column_name);
|
||||
assert_eq!(ColumnDataType::Int64 as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec![4], column.values.as_ref().unwrap().i64_values);
|
||||
verify_null_mask(&column.null_mask, vec![true, false, true]);
|
||||
|
||||
let column = &columns[8];
|
||||
assert_eq!("cpu_core_num", column.column_name);
|
||||
assert_eq!(ColumnDataType::Uint64 as i32, column.datatype);
|
||||
assert_eq!(SemanticType::Field as i32, column.semantic_type);
|
||||
assert_eq!(vec![16], column.values.as_ref().unwrap().u64_values);
|
||||
verify_null_mask(&column.null_mask, vec![true, true, false]);
|
||||
}
|
||||
|
||||
fn verify_null_mask(data: &[u8], expected: Vec<bool>) {
|
||||
let bitvec = BitVec::from_slice(data);
|
||||
for (idx, b) in expected.iter().enumerate() {
|
||||
assert_eq!(b, bitvec.get(idx).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_ms() {
|
||||
assert_eq!(100, to_ms_ts(Precision::Nanosecond, 100110000));
|
||||
assert_eq!(100110, to_ms_ts(Precision::Microsecond, 100110000));
|
||||
assert_eq!(100110000, to_ms_ts(Precision::Millisecond, 100110000));
|
||||
assert_eq!(
|
||||
100110000 * 1000 * 60,
|
||||
to_ms_ts(Precision::Minute, 100110000)
|
||||
);
|
||||
assert_eq!(
|
||||
100110000 * 1000 * 60 * 60,
|
||||
to_ms_ts(Precision::Hour, 100110000)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -119,15 +119,17 @@ fn build_struct(
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
ScalarUDF {
|
||||
name: Self::name().to_string(),
|
||||
signature: Signature::new(
|
||||
// TODO(LFC): Use the new Datafusion UDF impl.
|
||||
#[allow(deprecated)]
|
||||
ScalarUDF::new(
|
||||
Self::name(),
|
||||
&Signature::new(
|
||||
TypeSignature::Exact(Self::input_type()),
|
||||
Volatility::Immutable,
|
||||
),
|
||||
return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
|
||||
fun: Arc::new(Self::calc),
|
||||
}
|
||||
&(Arc::new(|_: &_| Ok(Arc::new(Self::return_type()))) as _),
|
||||
&(Arc::new(Self::calc) as _),
|
||||
)
|
||||
}
|
||||
|
||||
fn input_type() -> Vec<DataType> {
|
||||
|
||||
@@ -18,6 +18,7 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
@@ -107,6 +108,10 @@ where
|
||||
let key: TableNameKey = (&table_name).into();
|
||||
self.invalidate_key(&key.as_raw_key()).await
|
||||
}
|
||||
CacheIdent::SchemaName(schema_name) => {
|
||||
let key: SchemaNameKey = (&schema_name).into();
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
pub use common_base::AffectedRows;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::peer::Peer;
|
||||
#[async_trait::async_trait]
|
||||
pub trait Datanode: Send + Sync {
|
||||
/// Handles DML, and DDL requests.
|
||||
async fn handle(&self, request: RegionRequest) -> Result<HandleResponse>;
|
||||
async fn handle(&self, request: RegionRequest) -> Result<RegionResponse>;
|
||||
|
||||
/// Handles query requests
|
||||
async fn handle_query(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
|
||||
@@ -42,27 +42,3 @@ pub trait DatanodeManager: Send + Sync {
|
||||
}
|
||||
|
||||
pub type DatanodeManagerRef = Arc<dyn DatanodeManager>;
|
||||
|
||||
/// This result struct is derived from [RegionResponse]
|
||||
#[derive(Debug)]
|
||||
pub struct HandleResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extension: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl HandleResponse {
|
||||
pub fn from_region_response(region_response: RegionResponse) -> Self {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extension: region_response.extension,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response without extension
|
||||
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||
Self {
|
||||
affected_rows,
|
||||
extension: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ impl AlterTableProcedure {
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. } => {}
|
||||
AlterKind::DropColumns { .. } | AlterKind::ChangeColumnTypes { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
|
||||
@@ -271,7 +271,7 @@ impl CreateTableProcedure {
|
||||
///
|
||||
/// Abort(not-retry):
|
||||
/// - Failed to create table metadata.
|
||||
async fn on_create_metadata(&self) -> Result<Status> {
|
||||
async fn on_create_metadata(&mut self) -> Result<Status> {
|
||||
let table_id = self.table_id();
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
@@ -285,6 +285,7 @@ impl CreateTableProcedure {
|
||||
.await?;
|
||||
info!("Created table metadata for table {table_id}");
|
||||
|
||||
self.creator.opening_regions.clear();
|
||||
Ok(Status::done_with_output(table_id))
|
||||
}
|
||||
}
|
||||
@@ -385,7 +386,7 @@ impl TableCreator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
|
||||
pub enum CreateTableState {
|
||||
/// Prepares to create the table
|
||||
Prepare,
|
||||
|
||||
@@ -76,6 +76,7 @@ impl DropDatabaseCursor {
|
||||
.await?;
|
||||
Ok((
|
||||
Box::new(DropDatabaseExecutor::new(
|
||||
table_id,
|
||||
table_id,
|
||||
TableName::new(&ctx.catalog, &ctx.schema, &table_name),
|
||||
table_route.region_routes,
|
||||
@@ -86,6 +87,7 @@ impl DropDatabaseCursor {
|
||||
}
|
||||
(DropTableTarget::Physical, TableRouteValue::Physical(table_route)) => Ok((
|
||||
Box::new(DropDatabaseExecutor::new(
|
||||
table_id,
|
||||
table_id,
|
||||
TableName::new(&ctx.catalog, &ctx.schema, &table_name),
|
||||
table_route.region_routes,
|
||||
@@ -163,7 +165,7 @@ mod tests {
|
||||
async fn test_next_without_logical_tables() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||
create_physical_table(&ddl_context, 0, "phy").await;
|
||||
// It always starts from Logical
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
@@ -197,7 +199,7 @@ mod tests {
|
||||
async fn test_next_with_logical_tables() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
|
||||
// It always starts from Logical
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||
@@ -220,7 +222,7 @@ mod tests {
|
||||
.get_physical_table_route(physical_table_id)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table_route.region_routes, executor.region_routes);
|
||||
assert_eq!(table_route.region_routes, executor.physical_region_routes);
|
||||
assert_eq!(executor.target, DropTableTarget::Logical);
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ use crate::ddl::drop_database::State;
|
||||
use crate::ddl::drop_table::executor::DropTableExecutor;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
@@ -33,8 +34,10 @@ use crate::table_name::TableName;
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropDatabaseExecutor {
|
||||
table_id: TableId,
|
||||
physical_table_id: TableId,
|
||||
table_name: TableName,
|
||||
pub(crate) region_routes: Vec<RegionRoute>,
|
||||
/// The physical table region routes.
|
||||
pub(crate) physical_region_routes: Vec<RegionRoute>,
|
||||
pub(crate) target: DropTableTarget,
|
||||
#[serde(skip)]
|
||||
dropping_regions: Vec<OperatingRegionGuard>,
|
||||
@@ -44,14 +47,16 @@ impl DropDatabaseExecutor {
|
||||
/// Returns a new [DropDatabaseExecutor].
|
||||
pub fn new(
|
||||
table_id: TableId,
|
||||
physical_table_id: TableId,
|
||||
table_name: TableName,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
physical_region_routes: Vec<RegionRoute>,
|
||||
target: DropTableTarget,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_name,
|
||||
table_id,
|
||||
region_routes,
|
||||
physical_table_id,
|
||||
table_name,
|
||||
physical_region_routes,
|
||||
target,
|
||||
dropping_regions: vec![],
|
||||
}
|
||||
@@ -60,7 +65,7 @@ impl DropDatabaseExecutor {
|
||||
|
||||
impl DropDatabaseExecutor {
|
||||
fn register_dropping_regions(&mut self, ddl_ctx: &DdlContext) -> Result<()> {
|
||||
let dropping_regions = operating_leader_regions(&self.region_routes);
|
||||
let dropping_regions = operating_leader_regions(&self.physical_region_routes);
|
||||
let mut dropping_region_guards = Vec::with_capacity(dropping_regions.len());
|
||||
for (region_id, datanode_id) in dropping_regions {
|
||||
let guard = ddl_ctx
|
||||
@@ -87,12 +92,18 @@ impl State for DropDatabaseExecutor {
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
self.register_dropping_regions(ddl_ctx)?;
|
||||
let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true);
|
||||
// Deletes metadata for table permanently.
|
||||
let table_route_value = TableRouteValue::new(
|
||||
self.table_id,
|
||||
self.physical_table_id,
|
||||
self.physical_region_routes.clone(),
|
||||
);
|
||||
executor
|
||||
.on_remove_metadata(ddl_ctx, &self.region_routes)
|
||||
.on_destroy_metadata(ddl_ctx, &table_route_value)
|
||||
.await?;
|
||||
executor.invalidate_table_cache(ddl_ctx).await?;
|
||||
executor
|
||||
.on_drop_regions(ddl_ctx, &self.region_routes)
|
||||
.on_drop_regions(ddl_ctx, &self.physical_region_routes)
|
||||
.await?;
|
||||
info!("Table: {}({}) is dropped", self.table_name, self.table_id);
|
||||
|
||||
@@ -111,18 +122,20 @@ impl State for DropDatabaseExecutor {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
|
||||
use crate::datanode_manager::HandleResponse;
|
||||
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
||||
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
||||
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
|
||||
use crate::ddl::test_util::{create_logical_table, create_physical_table};
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::key::datanode_table::DatanodeTableKey;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::region_distribution;
|
||||
use crate::table_name::TableName;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
|
||||
|
||||
@@ -131,8 +144,8 @@ mod tests {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for NaiveDatanodeHandler {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
|
||||
Ok(HandleResponse::new(0))
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
async fn handle_query(
|
||||
@@ -148,7 +161,7 @@ mod tests {
|
||||
async fn test_next_with_physical_table() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -157,6 +170,7 @@ mod tests {
|
||||
.unwrap();
|
||||
{
|
||||
let mut state = DropDatabaseExecutor::new(
|
||||
physical_table_id,
|
||||
physical_table_id,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
|
||||
table_route.region_routes.clone(),
|
||||
@@ -181,9 +195,10 @@ mod tests {
|
||||
tables: None,
|
||||
};
|
||||
let mut state = DropDatabaseExecutor::new(
|
||||
physical_table_id,
|
||||
physical_table_id,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
|
||||
table_route.region_routes,
|
||||
table_route.region_routes.clone(),
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
||||
@@ -196,7 +211,7 @@ mod tests {
|
||||
async fn test_next_logical_table() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
|
||||
let logical_table_id = physical_table_id + 1;
|
||||
let (_, table_route) = ddl_context
|
||||
@@ -207,6 +222,7 @@ mod tests {
|
||||
.unwrap();
|
||||
{
|
||||
let mut state = DropDatabaseExecutor::new(
|
||||
logical_table_id,
|
||||
physical_table_id,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "metric"),
|
||||
table_route.region_routes.clone(),
|
||||
@@ -231,8 +247,9 @@ mod tests {
|
||||
tables: None,
|
||||
};
|
||||
let mut state = DropDatabaseExecutor::new(
|
||||
logical_table_id,
|
||||
physical_table_id,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "metric"),
|
||||
table_route.region_routes,
|
||||
DropTableTarget::Logical,
|
||||
);
|
||||
@@ -240,6 +257,33 @@ mod tests {
|
||||
assert!(!status.need_persist());
|
||||
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
|
||||
assert_eq!(cursor.target, DropTableTarget::Logical);
|
||||
// Checks table info
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(physical_table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
// Checks table route
|
||||
let table_route = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.get(physical_table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_routes = table_route.region_routes().unwrap();
|
||||
for datanode_id in region_distribution(region_routes).into_keys() {
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.get(&DatanodeTableKey::new(datanode_id, physical_table_id))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -247,7 +291,7 @@ mod tests {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
||||
Err(Error::RetryLater {
|
||||
source: BoxedError::new(
|
||||
error::UnexpectedSnafu {
|
||||
@@ -271,7 +315,7 @@ mod tests {
|
||||
async fn test_next_retryable_err() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -279,6 +323,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
let mut state = DropDatabaseExecutor::new(
|
||||
physical_table_id,
|
||||
physical_table_id,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
|
||||
table_route.region_routes,
|
||||
|
||||
@@ -18,10 +18,12 @@ use common_procedure::Status;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::end::DropDatabaseEnd;
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::schema_name::{SchemaName, SchemaNameKey};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropDatabaseRemoveMetadata;
|
||||
@@ -40,7 +42,53 @@ impl State for DropDatabaseRemoveMetadata {
|
||||
.delete(SchemaNameKey::new(&ctx.catalog, &ctx.schema))
|
||||
.await?;
|
||||
|
||||
return Ok((Box::new(DropDatabaseEnd), Status::done()));
|
||||
return Ok((Box::new(DropMetadataBroadcast), Status::executing(true)));
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropMetadataBroadcast;
|
||||
|
||||
impl DropMetadataBroadcast {
|
||||
/// Invalidates frontend caches
|
||||
async fn invalidate_schema_cache(
|
||||
&self,
|
||||
ddl_ctx: &DdlContext,
|
||||
db_ctx: &mut DropDatabaseContext,
|
||||
) -> Result<()> {
|
||||
let cache_invalidator = &ddl_ctx.cache_invalidator;
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate schema cache by dropping database".to_string()),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
vec![CacheIdent::SchemaName(SchemaName {
|
||||
catalog_name: db_ctx.catalog.clone(),
|
||||
schema_name: db_ctx.schema.clone(),
|
||||
})],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for DropMetadataBroadcast {
|
||||
async fn next(
|
||||
&mut self,
|
||||
ddl_ctx: &DdlContext,
|
||||
ctx: &mut DropDatabaseContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
self.invalidate_schema_cache(ddl_ctx, ctx).await?;
|
||||
Ok((Box::new(DropDatabaseEnd), Status::done()))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -53,7 +101,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::ddl::drop_database::end::DropDatabaseEnd;
|
||||
use crate::ddl::drop_database::metadata::DropDatabaseRemoveMetadata;
|
||||
use crate::ddl::drop_database::metadata::{DropDatabaseRemoveMetadata, DropMetadataBroadcast};
|
||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||
use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
@@ -76,14 +124,23 @@ mod tests {
|
||||
tables: None,
|
||||
};
|
||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
||||
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
|
||||
assert!(status.is_done());
|
||||
state
|
||||
.as_any()
|
||||
.downcast_ref::<DropMetadataBroadcast>()
|
||||
.unwrap();
|
||||
assert!(!status.is_done());
|
||||
assert!(!ddl_context
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.exists(SchemaNameKey::new("foo", "bar"))
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let mut state = DropMetadataBroadcast;
|
||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
||||
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
|
||||
assert!(status.is_done());
|
||||
|
||||
// Schema not exists
|
||||
let mut state = DropDatabaseRemoveMetadata;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
@@ -93,7 +150,10 @@ mod tests {
|
||||
tables: None,
|
||||
};
|
||||
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
|
||||
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
|
||||
assert!(status.is_done());
|
||||
state
|
||||
.as_any()
|
||||
.downcast_ref::<DropMetadataBroadcast>()
|
||||
.unwrap();
|
||||
assert!(!status.is_done());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,9 +18,11 @@ mod metadata;
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure,
|
||||
Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
@@ -31,9 +33,7 @@ use self::executor::DropTableExecutor;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
@@ -46,47 +46,51 @@ pub struct DropTableProcedure {
|
||||
/// The serializable data.
|
||||
pub data: DropTableData,
|
||||
/// The guards of opening regions.
|
||||
pub dropping_regions: Vec<OperatingRegionGuard>,
|
||||
pub(crate) dropping_regions: Vec<OperatingRegionGuard>,
|
||||
/// The drop table executor.
|
||||
executor: DropTableExecutor,
|
||||
}
|
||||
|
||||
impl DropTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
|
||||
|
||||
pub fn new(cluster_id: u64, task: DropTableTask, context: DdlContext) -> Self {
|
||||
let data = DropTableData::new(cluster_id, task);
|
||||
let executor = data.build_executor();
|
||||
Self {
|
||||
context,
|
||||
data: DropTableData::new(cluster_id, task),
|
||||
data,
|
||||
dropping_regions: vec![],
|
||||
executor,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
let data: DropTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
let executor = data.build_executor();
|
||||
Ok(Self {
|
||||
context,
|
||||
data,
|
||||
dropping_regions: vec![],
|
||||
executor,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn on_prepare<'a>(&mut self, executor: &DropTableExecutor) -> Result<Status> {
|
||||
if executor.on_prepare(&self.context).await?.stop() {
|
||||
pub(crate) async fn on_prepare<'a>(&mut self) -> Result<Status> {
|
||||
if self.executor.on_prepare(&self.context).await?.stop() {
|
||||
return Ok(Status::done());
|
||||
}
|
||||
self.fill_table_metadata().await?;
|
||||
self.data.state = DropTableState::RemoveMetadata;
|
||||
self.data.state = DropTableState::DeleteMetadata;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
/// Register dropping regions if doesn't exist.
|
||||
fn register_dropping_regions(&mut self) -> Result<()> {
|
||||
// Safety: filled in `on_prepare`.
|
||||
let region_routes = self.data.region_routes().unwrap()?;
|
||||
let dropping_regions = operating_leader_regions(&self.data.physical_region_routes);
|
||||
|
||||
let dropping_regions = operating_leader_regions(region_routes);
|
||||
|
||||
if self.dropping_regions.len() == dropping_regions.len() {
|
||||
if !self.dropping_regions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -109,7 +113,7 @@ impl DropTableProcedure {
|
||||
}
|
||||
|
||||
/// Removes the table metadata.
|
||||
async fn on_remove_metadata(&mut self, executor: &DropTableExecutor) -> Result<Status> {
|
||||
pub(crate) async fn on_delete_metadata(&mut self) -> Result<Status> {
|
||||
self.register_dropping_regions()?;
|
||||
// NOTES: If the meta server is crashed after the `RemoveMetadata`,
|
||||
// Corresponding regions of this table on the Datanode will be closed automatically.
|
||||
@@ -117,12 +121,15 @@ impl DropTableProcedure {
|
||||
|
||||
// TODO(weny): Considers introducing a RegionStatus to indicate the region is dropping.
|
||||
let table_id = self.data.table_id();
|
||||
executor
|
||||
.on_remove_metadata(
|
||||
&self.context,
|
||||
// Safety: filled in `on_prepare`.
|
||||
self.data.region_routes().unwrap()?,
|
||||
)
|
||||
let table_route_value = &TableRouteValue::new(
|
||||
self.data.task.table_id,
|
||||
// Safety: checked
|
||||
self.data.physical_table_id.unwrap(),
|
||||
self.data.physical_region_routes.clone(),
|
||||
);
|
||||
// Deletes table metadata logically.
|
||||
self.executor
|
||||
.on_delete_metadata(&self.context, table_route_value)
|
||||
.await?;
|
||||
info!("Deleted table metadata for table {table_id}");
|
||||
self.data.state = DropTableState::InvalidateTableCache;
|
||||
@@ -130,30 +137,35 @@ impl DropTableProcedure {
|
||||
}
|
||||
|
||||
/// Broadcasts invalidate table cache instruction.
|
||||
async fn on_broadcast(&mut self, executor: &DropTableExecutor) -> Result<Status> {
|
||||
executor.invalidate_table_cache(&self.context).await?;
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
self.executor.invalidate_table_cache(&self.context).await?;
|
||||
self.data.state = DropTableState::DatanodeDropRegions;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub async fn on_datanode_drop_regions(&self, executor: &DropTableExecutor) -> Result<Status> {
|
||||
executor
|
||||
.on_drop_regions(
|
||||
&self.context,
|
||||
// Safety: filled in `on_prepare`.
|
||||
self.data.region_routes().unwrap()?,
|
||||
)
|
||||
pub async fn on_datanode_drop_regions(&mut self) -> Result<Status> {
|
||||
self.executor
|
||||
.on_drop_regions(&self.context, &self.data.physical_region_routes)
|
||||
.await?;
|
||||
Ok(Status::done())
|
||||
self.data.state = DropTableState::DeleteTombstone;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub(crate) fn executor(&self) -> DropTableExecutor {
|
||||
DropTableExecutor::new(
|
||||
self.data.task.table_name(),
|
||||
self.data.table_id(),
|
||||
self.data.task.drop_if_exists,
|
||||
)
|
||||
/// Deletes metadata tombstone.
|
||||
async fn on_delete_metadata_tombstone(&mut self) -> Result<Status> {
|
||||
let table_route_value = &TableRouteValue::new(
|
||||
self.data.task.table_id,
|
||||
// Safety: checked
|
||||
self.data.physical_table_id.unwrap(),
|
||||
self.data.physical_region_routes.clone(),
|
||||
);
|
||||
self.executor
|
||||
.on_delete_metadata_tombstone(&self.context, table_route_value)
|
||||
.await?;
|
||||
|
||||
self.dropping_regions.clear();
|
||||
Ok(Status::done())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,17 +176,17 @@ impl Procedure for DropTableProcedure {
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let executor = self.executor();
|
||||
let state = &self.data.state;
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_DROP_TABLE
|
||||
.with_label_values(&[state.as_ref()])
|
||||
.start_timer();
|
||||
|
||||
match self.data.state {
|
||||
DropTableState::Prepare => self.on_prepare(&executor).await,
|
||||
DropTableState::RemoveMetadata => self.on_remove_metadata(&executor).await,
|
||||
DropTableState::InvalidateTableCache => self.on_broadcast(&executor).await,
|
||||
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions(&executor).await,
|
||||
DropTableState::Prepare => self.on_prepare().await,
|
||||
DropTableState::DeleteMetadata => self.on_delete_metadata().await,
|
||||
DropTableState::InvalidateTableCache => self.on_broadcast().await,
|
||||
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions().await,
|
||||
DropTableState::DeleteTombstone => self.on_delete_metadata_tombstone().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
@@ -194,6 +206,28 @@ impl Procedure for DropTableProcedure {
|
||||
|
||||
LockKey::new(lock_key)
|
||||
}
|
||||
|
||||
fn rollback_supported(&self) -> bool {
|
||||
!matches!(self.data.state, DropTableState::Prepare)
|
||||
}
|
||||
|
||||
async fn rollback(&mut self, _: &ProcedureContext) -> ProcedureResult<()> {
|
||||
warn!(
|
||||
"Rolling back the drop table procedure, table: {}",
|
||||
self.data.table_id()
|
||||
);
|
||||
|
||||
let table_route_value = &TableRouteValue::new(
|
||||
self.data.task.table_id,
|
||||
// Safety: checked
|
||||
self.data.physical_table_id.unwrap(),
|
||||
self.data.physical_region_routes.clone(),
|
||||
);
|
||||
self.executor
|
||||
.on_restore_metadata(&self.context, table_route_value)
|
||||
.await
|
||||
.map_err(ProcedureError::external)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -201,8 +235,8 @@ pub struct DropTableData {
|
||||
pub state: DropTableState,
|
||||
pub cluster_id: u64,
|
||||
pub task: DropTableTask,
|
||||
pub table_route_value: Option<DeserializedValueWithBytes<TableRouteValue>>,
|
||||
pub table_info_value: Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
pub physical_region_routes: Vec<RegionRoute>,
|
||||
pub physical_table_id: Option<TableId>,
|
||||
}
|
||||
|
||||
impl DropTableData {
|
||||
@@ -211,8 +245,8 @@ impl DropTableData {
|
||||
state: DropTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_route_value: None,
|
||||
table_info_value: None,
|
||||
physical_region_routes: vec![],
|
||||
physical_table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,24 +254,30 @@ impl DropTableData {
|
||||
self.task.table_ref()
|
||||
}
|
||||
|
||||
fn region_routes(&self) -> Option<Result<&Vec<RegionRoute>>> {
|
||||
self.table_route_value.as_ref().map(|v| v.region_routes())
|
||||
}
|
||||
|
||||
fn table_id(&self) -> TableId {
|
||||
self.task.table_id
|
||||
}
|
||||
|
||||
fn build_executor(&self) -> DropTableExecutor {
|
||||
DropTableExecutor::new(
|
||||
self.task.table_name(),
|
||||
self.task.table_id,
|
||||
self.task.drop_if_exists,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of drop table.
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr, PartialEq)]
|
||||
pub enum DropTableState {
|
||||
/// Prepares to drop the table
|
||||
Prepare,
|
||||
/// Removes metadata
|
||||
RemoveMetadata,
|
||||
/// Deletes metadata logically
|
||||
DeleteMetadata,
|
||||
/// Invalidates Table Cache
|
||||
InvalidateTableCache,
|
||||
/// Drops regions on Datanode
|
||||
DatanodeDropRegions,
|
||||
/// Deletes metadata tombstone permanently
|
||||
DeleteTombstone,
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
@@ -99,14 +100,47 @@ impl DropTableExecutor {
|
||||
Ok(Control::Continue(()))
|
||||
}
|
||||
|
||||
/// Removes the table metadata.
|
||||
pub async fn on_remove_metadata(
|
||||
/// Deletes the table metadata **logically**.
|
||||
pub async fn on_delete_metadata(
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
region_routes: &[RegionRoute],
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.delete_table_metadata(self.table_id, &self.table, region_routes)
|
||||
.delete_table_metadata(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Deletes the table metadata tombstone **permanently**.
|
||||
pub async fn on_delete_metadata_tombstone(
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.delete_table_metadata_tombstone(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Deletes metadata for table **permanently**.
|
||||
pub async fn on_destroy_metadata(
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.destroy_table_metadata(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Restores the table metadata.
|
||||
pub async fn on_restore_metadata(
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.restore_table_metadata(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
@@ -12,35 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::Result;
|
||||
|
||||
impl DropTableProcedure {
|
||||
/// Fetches the table info and table route.
|
||||
/// Fetches the table info and physical table route.
|
||||
pub(crate) async fn fill_table_metadata(&mut self) -> Result<()> {
|
||||
let task = &self.data.task;
|
||||
let table_info_value = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(task.table_id)
|
||||
.await?
|
||||
.with_context(|| error::TableInfoNotFoundSnafu {
|
||||
table: format_full_table_name(&task.catalog, &task.schema, &task.table),
|
||||
})?;
|
||||
let (_, table_route_value) = self
|
||||
let (physical_table_id, physical_table_route_value) = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.get_raw_physical_table_route(task.table_id)
|
||||
.get_physical_table_route(task.table_id)
|
||||
.await?;
|
||||
|
||||
self.data.table_info_value = Some(table_info_value);
|
||||
self.data.table_route_value = Some(table_route_value);
|
||||
self.data.physical_region_routes = physical_table_route_value.region_routes;
|
||||
self.data.physical_table_id = Some(physical_table_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,5 +52,9 @@ pub(crate) fn build_new_physical_table_info(
|
||||
columns.push(col.column_schema.clone());
|
||||
}
|
||||
|
||||
if let Some(time_index) = *time_index {
|
||||
raw_table_info.meta.schema.column_schemas[time_index].set_time_index();
|
||||
}
|
||||
|
||||
raw_table_info
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ pub async fn create_physical_table_metadata(
|
||||
}
|
||||
|
||||
pub async fn create_physical_table(
|
||||
ddl_context: DdlContext,
|
||||
ddl_context: &DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
name: &str,
|
||||
) -> TableId {
|
||||
@@ -67,7 +67,7 @@ pub async fn create_physical_table(
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
create_physical_table_metadata(
|
||||
&ddl_context,
|
||||
ddl_context,
|
||||
create_physical_table_task.table_info.clone(),
|
||||
TableRouteValue::Physical(table_route),
|
||||
)
|
||||
@@ -81,7 +81,7 @@ pub async fn create_logical_table(
|
||||
cluster_id: ClusterId,
|
||||
physical_table_id: TableId,
|
||||
table_name: &str,
|
||||
) {
|
||||
) -> TableId {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
let tasks = vec![test_create_logical_table_task(table_name)];
|
||||
@@ -91,6 +91,14 @@ pub async fn create_logical_table(
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
let status = procedure.on_create_metadata().await.unwrap();
|
||||
assert_matches!(status, Status::Done { .. });
|
||||
|
||||
let Status::Done {
|
||||
output: Some(output),
|
||||
} = status
|
||||
else {
|
||||
panic!("Unexpected status: {:?}", status);
|
||||
};
|
||||
output.downcast_ref::<Vec<u32>>().unwrap()[0]
|
||||
}
|
||||
|
||||
pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
||||
use common_error::status_code::StatusCode;
|
||||
@@ -20,14 +21,13 @@ use common_telemetry::debug;
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::datanode_manager::HandleResponse;
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::peer::Peer;
|
||||
use crate::test_util::MockDatanodeHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for () {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
@@ -45,10 +45,10 @@ pub struct DatanodeWatcher(pub mpsc::Sender<(Peer, RegionRequest)>);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for DatanodeWatcher {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
|
||||
self.0.send((peer.clone(), request)).await.unwrap();
|
||||
Ok(HandleResponse::new(0))
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
async fn handle_query(
|
||||
@@ -65,7 +65,7 @@ pub struct RetryErrorDatanodeHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
debug!("Returning retry later for request: {request:?}, peer: {peer:?}");
|
||||
Err(Error::RetryLater {
|
||||
source: BoxedError::new(
|
||||
@@ -91,7 +91,7 @@ pub struct UnexpectedErrorDatanodeHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for UnexpectedErrorDatanodeHandler {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "mock error",
|
||||
@@ -135,7 +135,7 @@ impl ErrorExt for MockRequestOutdatedError {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for RequestOutdatedErrorDatanodeHandler {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
|
||||
Err(BoxedError::new(MockRequestOutdatedError)).context(error::ExternalSnafu)
|
||||
}
|
||||
@@ -154,9 +154,9 @@ pub struct NaiveDatanodeHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for NaiveDatanodeHandler {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
|
||||
Ok(HandleResponse::new(0))
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
async fn handle_query(
|
||||
|
||||
@@ -128,9 +128,9 @@ async fn test_on_prepare_different_physical_table() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
|
||||
let phy1_id = create_physical_table(ddl_context.clone(), cluster_id, "phy1").await;
|
||||
let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
|
||||
let phy2_id = create_physical_table(ddl_context.clone(), cluster_id, "phy2").await;
|
||||
let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
@@ -150,7 +150,7 @@ async fn test_on_prepare_logical_table_not_exists() {
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
|
||||
@@ -172,7 +172,7 @@ async fn test_on_prepare() {
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
@@ -196,7 +196,7 @@ async fn test_on_update_metadata() {
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
@@ -233,7 +233,7 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
|
||||
@@ -21,9 +21,12 @@ use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use common_procedure_test::{
|
||||
execute_procedure_until, execute_procedure_until_done, MockContextProvider,
|
||||
};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::create_table::{CreateTableProcedure, CreateTableState};
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::create_table::{
|
||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||
@@ -33,8 +36,9 @@ use crate::ddl::test_util::datanode_handler::{
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager};
|
||||
|
||||
fn test_create_table_task(name: &str) -> CreateTableTask {
|
||||
let create_table = TestCreateTableExprBuilder::default()
|
||||
@@ -244,3 +248,39 @@ async fn test_on_create_metadata() {
|
||||
let table_id = status.downcast_output_ref::<u32>().unwrap();
|
||||
assert_eq!(*table_id, 1024);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
||||
let cluster_id = 1;
|
||||
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend);
|
||||
|
||||
let task = test_create_table_task("foo");
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
|
||||
execute_procedure_until(&mut procedure, |p| {
|
||||
p.creator.data.state == CreateTableState::CreateMetadata
|
||||
})
|
||||
.await;
|
||||
|
||||
// Ensure that after running to the state `CreateMetadata`(just past `DatanodeCreateRegions`),
|
||||
// the opening regions should be recorded:
|
||||
let guards = &procedure.creator.opening_regions;
|
||||
assert_eq!(guards.len(), 1);
|
||||
let (datanode_id, region_id) = (0, RegionId::new(procedure.table_id(), 0));
|
||||
assert_eq!(guards[0].info(), (datanode_id, region_id));
|
||||
assert!(ddl_context
|
||||
.memory_region_keeper
|
||||
.contains(datanode_id, region_id));
|
||||
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
// Ensure that when run to the end, the opening regions should be cleared:
|
||||
let guards = &procedure.creator.opening_regions;
|
||||
assert!(guards.is_empty());
|
||||
assert!(!ddl_context
|
||||
.memory_region_keeper
|
||||
.contains(datanode_id, region_id));
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ async fn test_drop_database_with_logical_tables() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
@@ -91,7 +91,7 @@ async fn test_drop_database_retryable_error() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
|
||||
@@ -19,17 +19,29 @@ use api::v1::region::{region_request, RegionRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_procedure::Procedure;
|
||||
use common_procedure_test::{
|
||||
execute_procedure_until, execute_procedure_until_done, new_test_procedure_context,
|
||||
};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::drop_table::{DropTableProcedure, DropTableState};
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::ddl::test_util::datanode_handler::DatanodeWatcher;
|
||||
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
||||
use crate::ddl::test_util::{
|
||||
create_logical_table, create_physical_table, create_physical_table_metadata,
|
||||
test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::DropTableTask;
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_table_not_exists_err() {
|
||||
@@ -50,17 +62,9 @@ async fn test_on_prepare_table_not_exists_err() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let task = DropTableTask {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: "bar".to_string(),
|
||||
table_id,
|
||||
drop_if_exists: false,
|
||||
};
|
||||
|
||||
let task = new_drop_table_task("bar", table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let executor = procedure.executor();
|
||||
let err = procedure.on_prepare(&executor).await.unwrap_err();
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
|
||||
@@ -83,31 +87,15 @@ async fn test_on_prepare_table() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let task = DropTableTask {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: "bar".to_string(),
|
||||
table_id,
|
||||
drop_if_exists: true,
|
||||
};
|
||||
|
||||
let task = new_drop_table_task("bar", table_id, true);
|
||||
// Drop if exists
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let executor = procedure.executor();
|
||||
procedure.on_prepare(&executor).await.unwrap();
|
||||
|
||||
let task = DropTableTask {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: table_name.to_string(),
|
||||
table_id,
|
||||
drop_if_exists: false,
|
||||
};
|
||||
procedure.on_prepare().await.unwrap();
|
||||
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let executor = procedure.executor();
|
||||
procedure.on_prepare(&executor).await.unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -153,18 +141,11 @@ async fn test_on_datanode_drop_regions() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let task = DropTableTask {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: table_name.to_string(),
|
||||
table_id,
|
||||
drop_if_exists: false,
|
||||
};
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let executor = procedure.executor();
|
||||
procedure.on_prepare(&executor).await.unwrap();
|
||||
procedure.on_datanode_drop_regions(&executor).await.unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_datanode_drop_regions().await.unwrap();
|
||||
|
||||
let check = |peer: Peer,
|
||||
request: RegionRequest,
|
||||
@@ -191,3 +172,127 @@ async fn test_on_datanode_drop_regions() {
|
||||
let (peer, request) = results.remove(0);
|
||||
check(peer, request, 3, RegionId::new(table_id, 3));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_rollback() {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend.clone());
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
table_id,
|
||||
table_route,
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
create_physical_table_metadata(
|
||||
&ddl_context,
|
||||
create_physical_table_task.table_info.clone(),
|
||||
TableRouteValue::Physical(table_route),
|
||||
)
|
||||
.await;
|
||||
// The create logical table procedure.
|
||||
let physical_table_id = table_id;
|
||||
// Creates the logical table metadata.
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = new_test_procedure_context();
|
||||
procedure.execute(&ctx).await.unwrap();
|
||||
// Triggers procedure to create table metadata
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_ids = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*table_ids, vec![1025]);
|
||||
|
||||
let expected_kvs = kv_backend.dump();
|
||||
// Drops the physical table
|
||||
{
|
||||
let task = new_drop_table_task("phy_table", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_delete_metadata().await.unwrap();
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
// Rollback again
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
let kvs = kv_backend.dump();
|
||||
assert_eq!(kvs, expected_kvs);
|
||||
}
|
||||
|
||||
// Drops the logical table
|
||||
let task = new_drop_table_task("foo", table_ids[0], false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_delete_metadata().await.unwrap();
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
// Rollback again
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
let kvs = kv_backend.dump();
|
||||
assert_eq!(kvs, expected_kvs);
|
||||
}
|
||||
|
||||
fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool) -> DropTableTask {
|
||||
DropTableTask {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table: table_name.to_string(),
|
||||
table_id,
|
||||
drop_if_exists,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
||||
let cluster_id = 1;
|
||||
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let logical_table_id =
|
||||
create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await;
|
||||
|
||||
let inner_test = |task: DropTableTask| async {
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
execute_procedure_until(&mut procedure, |p| {
|
||||
p.data.state == DropTableState::InvalidateTableCache
|
||||
})
|
||||
.await;
|
||||
|
||||
// Ensure that after running to the state `InvalidateTableCache`(just past `DeleteMetadata`),
|
||||
// the dropping regions should be recorded:
|
||||
let guards = &procedure.dropping_regions;
|
||||
assert_eq!(guards.len(), 1);
|
||||
let (datanode_id, region_id) = (0, RegionId::new(physical_table_id, 0));
|
||||
assert_eq!(guards[0].info(), (datanode_id, region_id));
|
||||
assert!(ddl_context
|
||||
.memory_region_keeper
|
||||
.contains(datanode_id, region_id));
|
||||
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
// Ensure that when run to the end, the dropping regions should be cleared:
|
||||
let guards = &procedure.dropping_regions;
|
||||
assert!(guards.is_empty());
|
||||
assert!(!ddl_context
|
||||
.memory_region_keeper
|
||||
.contains(datanode_id, region_id));
|
||||
};
|
||||
|
||||
inner_test(new_drop_table_task("s", logical_table_id, false)).await;
|
||||
inner_test(new_drop_table_task("t", physical_table_id, false)).await;
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ pub enum Error {
|
||||
error: Utf8Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Table nod found, table: {}", table_name))]
|
||||
#[snafu(display("Table not found: '{}'", table_name))]
|
||||
TableNotFound {
|
||||
table_name: String,
|
||||
location: Location,
|
||||
@@ -421,6 +421,9 @@ pub enum Error {
|
||||
#[snafu(display("Invalid role: {}", role))]
|
||||
InvalidRole { role: i32, location: Location },
|
||||
|
||||
#[snafu(display("Failed to move values: {err_msg}"))]
|
||||
MoveValues { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to parse {} from utf8", name))]
|
||||
FromUtf8 {
|
||||
name: String,
|
||||
@@ -440,7 +443,8 @@ impl ErrorExt for Error {
|
||||
| EtcdTxnOpResponse { .. }
|
||||
| EtcdFailed { .. }
|
||||
| EtcdTxnFailed { .. }
|
||||
| ConnectEtcd { .. } => StatusCode::Internal,
|
||||
| ConnectEtcd { .. }
|
||||
| MoveValues { .. } => StatusCode::Internal,
|
||||
|
||||
SerdeJson { .. }
|
||||
| ParseOption { .. }
|
||||
|
||||
@@ -21,6 +21,7 @@ use store_api::storage::{RegionId, RegionNumber};
|
||||
use strum::Display;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::key::schema_name::SchemaName;
|
||||
use crate::table_name::TableName;
|
||||
use crate::{ClusterId, DatanodeId};
|
||||
|
||||
@@ -156,6 +157,7 @@ pub struct UpgradeRegion {
|
||||
pub enum CacheIdent {
|
||||
TableId(TableId),
|
||||
TableName(TableName),
|
||||
SchemaName(SchemaName),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Display, PartialEq)]
|
||||
|
||||
@@ -56,9 +56,12 @@ pub mod table_region;
|
||||
pub mod table_route;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_utils;
|
||||
// TODO(weny): remove it.
|
||||
#[allow(dead_code)]
|
||||
mod tombstone;
|
||||
mod txn_helper;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
@@ -83,11 +86,15 @@ use self::catalog_name::{CatalogManager, CatalogNameKey, CatalogNameValue};
|
||||
use self::datanode_table::RegionInfo;
|
||||
use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
|
||||
use self::table_route::{TableRouteManager, TableRouteValue};
|
||||
use self::tombstone::TombstoneManager;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||
use crate::kv_backend::txn::{Txn, TxnOpResponse};
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
use crate::table_name::TableName;
|
||||
use crate::DatanodeId;
|
||||
|
||||
@@ -97,7 +104,6 @@ pub const MAINTENANCE_KEY: &str = "maintenance";
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
pub const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
|
||||
@@ -145,6 +151,33 @@ pub trait TableMetaKey {
|
||||
fn as_raw_key(&self) -> Vec<u8>;
|
||||
}
|
||||
|
||||
pub(crate) trait TableMetaKeyGetTxnOp {
|
||||
fn build_get_op(
|
||||
&self,
|
||||
) -> (
|
||||
TxnOp,
|
||||
impl for<'a> FnMut(&'a mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||
);
|
||||
}
|
||||
|
||||
impl TableMetaKey for String {
|
||||
fn as_raw_key(&self) -> Vec<u8> {
|
||||
self.as_bytes().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl TableMetaKeyGetTxnOp for String {
|
||||
fn build_get_op(
|
||||
&self,
|
||||
) -> (
|
||||
TxnOp,
|
||||
impl for<'a> FnMut(&'a mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||
) {
|
||||
let key = self.as_raw_key();
|
||||
(TxnOp::Get(key.clone()), TxnOpGetResponseSet::filter(key))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TableMetaValue {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
|
||||
where
|
||||
@@ -162,6 +195,7 @@ pub struct TableMetadataManager {
|
||||
catalog_manager: CatalogManager,
|
||||
schema_manager: SchemaManager,
|
||||
table_route_manager: TableRouteManager,
|
||||
tombstone_manager: TombstoneManager,
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
@@ -303,6 +337,7 @@ impl TableMetadataManager {
|
||||
catalog_manager: CatalogManager::new(kv_backend.clone()),
|
||||
schema_manager: SchemaManager::new(kv_backend.clone()),
|
||||
table_route_manager: TableRouteManager::new(kv_backend.clone()),
|
||||
tombstone_manager: TombstoneManager::new(kv_backend.clone()),
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
@@ -363,19 +398,16 @@ impl TableMetadataManager {
|
||||
Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
Option<DeserializedValueWithBytes<TableRouteValue>>,
|
||||
)> {
|
||||
let (get_table_route_txn, table_route_decoder) = self
|
||||
.table_route_manager
|
||||
.table_route_storage()
|
||||
.build_get_txn(table_id);
|
||||
let (get_table_info_txn, table_info_decoder) =
|
||||
self.table_info_manager.build_get_txn(table_id);
|
||||
|
||||
let txn = Txn::merge_all(vec![get_table_route_txn, get_table_info_txn]);
|
||||
let res = self.kv_backend.txn(txn).await?;
|
||||
|
||||
let table_info_value = table_info_decoder(&res.responses)?;
|
||||
let table_route_value = table_route_decoder(&res.responses)?;
|
||||
let table_info_key = TableInfoKey::new(table_id);
|
||||
let table_route_key = TableRouteKey::new(table_id);
|
||||
let (table_info_txn, table_info_filter) = table_info_key.build_get_op();
|
||||
let (table_route_txn, table_route_filter) = table_route_key.build_get_op();
|
||||
|
||||
let txn = Txn::new().and_then(vec![table_info_txn, table_route_txn]);
|
||||
let mut res = self.kv_backend.txn(txn).await?;
|
||||
let mut set = TxnOpGetResponseSet::from(&mut res.responses);
|
||||
let table_info_value = TxnOpGetResponseSet::decode_with(table_info_filter)(&mut set)?;
|
||||
let table_route_value = TxnOpGetResponseSet::decode_with(table_route_filter)(&mut set)?;
|
||||
Ok((table_info_value, table_route_value))
|
||||
}
|
||||
|
||||
@@ -434,17 +466,18 @@ impl TableMetadataManager {
|
||||
txn = txn.merge(create_datanode_table_txn);
|
||||
}
|
||||
|
||||
let r = self.kv_backend.txn(txn).await?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
// Checks whether metadata was already created.
|
||||
if !r.succeeded {
|
||||
let remote_table_info = on_create_table_info_failure(&r.responses)?
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_create_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route = on_create_table_route_failure(&r.responses)?
|
||||
let remote_table_route = on_create_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
})?
|
||||
@@ -473,8 +506,8 @@ impl TableMetadataManager {
|
||||
let mut txns = Vec::with_capacity(3 * len);
|
||||
struct OnFailure<F1, R1, F2, R2>
|
||||
where
|
||||
F1: FnOnce(&Vec<TxnOpResponse>) -> R1,
|
||||
F2: FnOnce(&Vec<TxnOpResponse>) -> R2,
|
||||
F1: FnOnce(&mut TxnOpGetResponseSet) -> R1,
|
||||
F2: FnOnce(&mut TxnOpGetResponseSet) -> R2,
|
||||
{
|
||||
table_info_value: TableInfoValue,
|
||||
on_create_table_info_failure: F1,
|
||||
@@ -519,18 +552,19 @@ impl TableMetadataManager {
|
||||
}
|
||||
|
||||
let txn = Txn::merge_all(txns);
|
||||
let r = self.kv_backend.txn(txn).await?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
// Checks whether metadata was already created.
|
||||
if !r.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
for on_failure in on_failures {
|
||||
let remote_table_info = (on_failure.on_create_table_info_failure)(&r.responses)?
|
||||
let remote_table_info = (on_failure.on_create_table_info_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route = (on_failure.on_create_table_route_failure)(&r.responses)?
|
||||
let remote_table_route = (on_failure.on_create_table_route_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
})?
|
||||
@@ -545,48 +579,91 @@ impl TableMetadataManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes metadata for table.
|
||||
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
|
||||
pub async fn delete_table_metadata(
|
||||
fn table_metadata_keys(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<()> {
|
||||
// Deletes table name.
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<Vec<Vec<u8>>> {
|
||||
// Builds keys
|
||||
let datanode_ids = if table_route_value.is_physical() {
|
||||
region_distribution(table_route_value.region_routes()?)
|
||||
.into_keys()
|
||||
.collect()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let mut keys = Vec::with_capacity(3 + datanode_ids.len());
|
||||
let table_name = TableNameKey::new(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
let table_info_key = TableInfoKey::new(table_id);
|
||||
let table_route_key = TableRouteKey::new(table_id);
|
||||
let datanode_table_keys = datanode_ids
|
||||
.into_iter()
|
||||
.map(|datanode_id| DatanodeTableKey::new(datanode_id, table_id))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let delete_table_name_txn = self.table_name_manager().build_delete_txn(&table_name)?;
|
||||
keys.push(table_name.as_raw_key());
|
||||
keys.push(table_info_key.as_raw_key());
|
||||
keys.push(table_route_key.as_raw_key());
|
||||
for key in &datanode_table_keys {
|
||||
keys.push(key.as_raw_key());
|
||||
}
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
// Deletes table info.
|
||||
let delete_table_info_txn = self.table_info_manager().build_delete_txn(table_id)?;
|
||||
/// Deletes metadata for table **logically**.
|
||||
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
|
||||
pub async fn delete_table_metadata(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
self.tombstone_manager.create(keys).await
|
||||
}
|
||||
|
||||
// Deletes datanode table key value pairs.
|
||||
let distribution = region_distribution(region_routes);
|
||||
let delete_datanode_txn = self
|
||||
.datanode_table_manager()
|
||||
.build_delete_txn(table_id, distribution)?;
|
||||
/// Deletes metadata tombstone for table **permanently**.
|
||||
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
|
||||
pub async fn delete_table_metadata_tombstone(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
self.tombstone_manager.delete(keys).await
|
||||
}
|
||||
|
||||
// Deletes table route.
|
||||
let delete_table_route_txn = self
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
.build_delete_txn(table_id)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![
|
||||
delete_table_name_txn,
|
||||
delete_table_info_txn,
|
||||
delete_datanode_txn,
|
||||
delete_table_route_txn,
|
||||
]);
|
||||
|
||||
// It's always successes.
|
||||
let _ = self.kv_backend.txn(txn).await?;
|
||||
/// Restores metadata for table.
|
||||
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
|
||||
pub async fn restore_table_metadata(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
self.tombstone_manager.restore(keys).await
|
||||
}
|
||||
|
||||
/// Deletes metadata for table **permanently**.
|
||||
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
|
||||
pub async fn destroy_table_metadata(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<()> {
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
let _ = self
|
||||
.kv_backend
|
||||
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -633,11 +710,12 @@ impl TableMetadataManager {
|
||||
|
||||
let txn = Txn::merge_all(vec![update_table_name_txn, update_table_info_txn]);
|
||||
|
||||
let r = self.kv_backend.txn(txn).await?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_info = on_update_table_info_failure(&r.responses)?
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the rename table metadata",
|
||||
})?
|
||||
@@ -665,11 +743,12 @@ impl TableMetadataManager {
|
||||
.table_info_manager()
|
||||
.build_update_txn(table_id, current_table_info_value, &new_table_info_value)?;
|
||||
|
||||
let r = self.kv_backend.txn(update_table_info_txn).await?;
|
||||
let mut r = self.kv_backend.txn(update_table_info_txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_info = on_update_table_info_failure(&r.responses)?
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
})?
|
||||
@@ -693,7 +772,7 @@ impl TableMetadataManager {
|
||||
let mut txns = Vec::with_capacity(len);
|
||||
struct OnFailure<F, R>
|
||||
where
|
||||
F: FnOnce(&Vec<TxnOpResponse>) -> R,
|
||||
F: FnOnce(&mut TxnOpGetResponseSet) -> R,
|
||||
{
|
||||
table_info_value: TableInfoValue,
|
||||
on_update_table_info_failure: F,
|
||||
@@ -721,11 +800,12 @@ impl TableMetadataManager {
|
||||
}
|
||||
|
||||
let txn = Txn::merge_all(txns);
|
||||
let r = self.kv_backend.txn(txn).await?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
if !r.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
for on_failure in on_failures {
|
||||
let remote_table_info = (on_failure.on_update_table_info_failure)(&r.responses)?
|
||||
let remote_table_info = (on_failure.on_update_table_info_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
})?
|
||||
@@ -772,11 +852,12 @@ impl TableMetadataManager {
|
||||
|
||||
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
|
||||
|
||||
let r = self.kv_backend.txn(txn).await?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating table route",
|
||||
})?
|
||||
@@ -823,11 +904,12 @@ impl TableMetadataManager {
|
||||
.table_route_storage()
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||
let mut r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating leader region status",
|
||||
})?
|
||||
@@ -873,6 +955,38 @@ macro_rules! impl_table_meta_value {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_table_meta_key_get_txn_op {
|
||||
($($key: ty), *) => {
|
||||
$(
|
||||
impl $crate::key::TableMetaKeyGetTxnOp for $key {
|
||||
/// Returns a [TxnOp] to retrieve the corresponding value
|
||||
/// and a filter to retrieve the value from the [TxnOpGetResponseSet]
|
||||
fn build_get_op(
|
||||
&self,
|
||||
) -> (
|
||||
TxnOp,
|
||||
impl for<'a> FnMut(
|
||||
&'a mut TxnOpGetResponseSet,
|
||||
) -> Option<Vec<u8>>,
|
||||
) {
|
||||
let raw_key = self.as_raw_key();
|
||||
(
|
||||
TxnOp::Get(raw_key.clone()),
|
||||
TxnOpGetResponseSet::filter(raw_key),
|
||||
)
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
impl_table_meta_key_get_txn_op! {
|
||||
TableNameKey<'_>,
|
||||
TableInfoKey,
|
||||
TableRouteKey,
|
||||
DatanodeTableKey
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_optional_meta_value {
|
||||
($($val_ty: ty), *) => {
|
||||
@@ -907,6 +1021,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_time::util::current_time_millis;
|
||||
use futures::TryStreamExt;
|
||||
use store_api::storage::RegionId;
|
||||
@@ -914,6 +1029,7 @@ mod tests {
|
||||
|
||||
use super::datanode_table::DatanodeTableKey;
|
||||
use super::test_utils;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::Result;
|
||||
use crate::key::datanode_table::RegionInfo;
|
||||
@@ -1155,25 +1271,23 @@ mod tests {
|
||||
table_info.schema_name,
|
||||
table_info.name,
|
||||
);
|
||||
let table_route_value = &TableRouteValue::physical(region_routes.clone());
|
||||
// deletes metadata.
|
||||
table_metadata_manager
|
||||
.delete_table_metadata(table_id, &table_name, region_routes)
|
||||
.delete_table_metadata(table_id, &table_name, table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// if metadata was already deleted, it should be ok.
|
||||
// Should be ignored.
|
||||
table_metadata_manager
|
||||
.delete_table_metadata(table_id, &table_name, region_routes)
|
||||
.delete_table_metadata(table_id, &table_name, table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
|
||||
assert!(table_metadata_manager
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
@@ -1181,7 +1295,6 @@ mod tests {
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
|
||||
assert!(table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.tables(datanode_id)
|
||||
@@ -1196,7 +1309,6 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(table_info.is_none());
|
||||
|
||||
let table_route = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.table_route_storage()
|
||||
@@ -1559,4 +1671,125 @@ mod tests {
|
||||
.await
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_destroy_table_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||
let table_id = 1025;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
let options = [(0, "test".to_string())].into();
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info,
|
||||
TableRouteValue::physical(vec![
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 1)),
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
follower_peers: vec![Peer::empty(5)],
|
||||
leader_status: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 2)),
|
||||
leader_peer: Some(Peer::empty(2)),
|
||||
follower_peers: vec![Peer::empty(4)],
|
||||
leader_status: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 3)),
|
||||
leader_peer: Some(Peer::empty(3)),
|
||||
follower_peers: vec![],
|
||||
leader_status: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
]),
|
||||
options,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let table_name = TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name);
|
||||
let table_route_value = table_metadata_manager
|
||||
.table_route_manager
|
||||
.table_route_storage()
|
||||
.get_raw(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
table_metadata_manager
|
||||
.destroy_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(mem_kv.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_restore_table_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||
let table_id = 1025;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
let options = [(0, "test".to_string())].into();
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info,
|
||||
TableRouteValue::physical(vec![
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 1)),
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
follower_peers: vec![Peer::empty(5)],
|
||||
leader_status: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 2)),
|
||||
leader_peer: Some(Peer::empty(2)),
|
||||
follower_peers: vec![Peer::empty(4)],
|
||||
leader_status: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 3)),
|
||||
leader_peer: Some(Peer::empty(3)),
|
||||
follower_peers: vec![],
|
||||
leader_status: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
]),
|
||||
options,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let expected_result = mem_kv.dump();
|
||||
let table_route_value = table_metadata_manager
|
||||
.table_route_manager
|
||||
.table_route_storage()
|
||||
.get_raw(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_routes = table_route_value.region_routes().unwrap();
|
||||
let table_name = TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name);
|
||||
let table_route_value = TableRouteValue::physical(region_routes.clone());
|
||||
table_metadata_manager
|
||||
.delete_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
table_metadata_manager
|
||||
.restore_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
let kvs = mem_kv.dump();
|
||||
assert_eq!(kvs, expected_result);
|
||||
// Should be ignored.
|
||||
table_metadata_manager
|
||||
.restore_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
let kvs = mem_kv.dump();
|
||||
assert_eq!(kvs, expected_result);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use futures::stream::BoxStream;
|
||||
use futures::StreamExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
@@ -84,11 +83,11 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
|
||||
}
|
||||
|
||||
/// Decoder `KeyValue` to ({catalog},())
|
||||
pub fn catalog_decoder(kv: KeyValue) -> Result<(String, ())> {
|
||||
pub fn catalog_decoder(kv: KeyValue) -> Result<String> {
|
||||
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
|
||||
let catalog_name = CatalogNameKey::try_from(str)?;
|
||||
|
||||
Ok((catalog_name.catalog.to_string(), ()))
|
||||
Ok(catalog_name.catalog.to_string())
|
||||
}
|
||||
|
||||
pub struct CatalogManager {
|
||||
@@ -134,7 +133,7 @@ impl CatalogManager {
|
||||
Arc::new(catalog_decoder),
|
||||
);
|
||||
|
||||
Box::pin(stream.map(|kv| kv.map(|kv| kv.0)))
|
||||
Box::pin(stream)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use futures::StreamExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::RegionNumber;
|
||||
@@ -55,6 +54,7 @@ pub struct RegionInfo {
|
||||
pub region_wal_options: HashMap<RegionNumber, String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
pub struct DatanodeTableKey {
|
||||
pub datanode_id: DatanodeId,
|
||||
pub table_id: TableId,
|
||||
@@ -125,10 +125,8 @@ impl DatanodeTableValue {
|
||||
}
|
||||
|
||||
/// Decodes `KeyValue` to ((),`DatanodeTableValue`)
|
||||
pub fn datanode_table_value_decoder(kv: KeyValue) -> Result<((), DatanodeTableValue)> {
|
||||
let value = DatanodeTableValue::try_from_raw_value(&kv.value)?;
|
||||
|
||||
Ok(((), value))
|
||||
pub fn datanode_table_value_decoder(kv: KeyValue) -> Result<DatanodeTableValue> {
|
||||
DatanodeTableValue::try_from_raw_value(&kv.value)
|
||||
}
|
||||
|
||||
pub struct DatanodeTableManager {
|
||||
@@ -162,7 +160,7 @@ impl DatanodeTableManager {
|
||||
Arc::new(datanode_table_value_decoder),
|
||||
);
|
||||
|
||||
Box::pin(stream.map(|kv| kv.map(|kv| kv.1)))
|
||||
Box::pin(stream)
|
||||
}
|
||||
|
||||
/// Builds the create datanode table transactions. It only executes while the primary keys comparing successes.
|
||||
@@ -239,10 +237,14 @@ impl DatanodeTableManager {
|
||||
// FIXME(weny): add unit tests.
|
||||
let mut new_region_info = region_info.clone();
|
||||
if need_update_options {
|
||||
new_region_info.region_options = new_region_options.clone();
|
||||
new_region_info
|
||||
.region_options
|
||||
.clone_from(new_region_options);
|
||||
}
|
||||
if need_update_wal_options {
|
||||
new_region_info.region_wal_options = new_region_wal_options.clone();
|
||||
new_region_info
|
||||
.region_wal_options
|
||||
.clone_from(new_region_wal_options);
|
||||
}
|
||||
let val = DatanodeTableValue::new(table_id, regions, new_region_info)
|
||||
.try_as_raw_value()?;
|
||||
|
||||
@@ -19,7 +19,6 @@ use std::time::Duration;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures::stream::BoxStream;
|
||||
use futures::StreamExt;
|
||||
use humantime_serde::re::humantime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -103,11 +102,11 @@ impl TableMetaKey for SchemaNameKey<'_> {
|
||||
}
|
||||
|
||||
/// Decodes `KeyValue` to ({schema},())
|
||||
pub fn schema_decoder(kv: KeyValue) -> Result<(String, ())> {
|
||||
pub fn schema_decoder(kv: KeyValue) -> Result<String> {
|
||||
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
|
||||
let schema_name = SchemaNameKey::try_from(str)?;
|
||||
|
||||
Ok((schema_name.schema.to_string(), ()))
|
||||
Ok(schema_name.schema.to_string())
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
|
||||
@@ -193,7 +192,22 @@ impl SchemaManager {
|
||||
Arc::new(schema_decoder),
|
||||
);
|
||||
|
||||
Box::pin(stream.map(|kv| kv.map(|kv| kv.0)))
|
||||
Box::pin(stream)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub struct SchemaName {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a SchemaName> for SchemaNameKey<'a> {
|
||||
fn from(value: &'a SchemaName) -> Self {
|
||||
Self {
|
||||
catalog: &value.catalog_name,
|
||||
schema: &value.schema_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,10 +18,12 @@ use serde::{Deserialize, Serialize};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::{txn_helper, DeserializedValueWithBytes, TableMetaValue, TABLE_INFO_KEY_PREFIX};
|
||||
use super::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetaKey;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse};
|
||||
use crate::key::{
|
||||
txn_helper, DeserializedValueWithBytes, TableMetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
use crate::table_name::TableName;
|
||||
@@ -101,20 +103,6 @@ impl TableInfoManager {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
pub(crate) fn build_get_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> (
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
) {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Get(raw_key.clone())]);
|
||||
|
||||
(txn, txn_helper::build_txn_response_decoder_fn(raw_key))
|
||||
}
|
||||
|
||||
/// Builds a create table info transaction, it expected the `__table_info/{table_id}` wasn't occupied.
|
||||
pub(crate) fn build_create_txn(
|
||||
&self,
|
||||
@@ -122,7 +110,9 @@ impl TableInfoManager {
|
||||
table_info_value: &TableInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
)> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -132,7 +122,10 @@ impl TableInfoManager {
|
||||
table_info_value.try_as_raw_value()?,
|
||||
);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a update table info transaction, it expected the remote value equals the `current_current_table_info_value`.
|
||||
@@ -144,7 +137,9 @@ impl TableInfoManager {
|
||||
new_table_info_value: &TableInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
)> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -153,17 +148,10 @@ impl TableInfoManager {
|
||||
|
||||
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
}
|
||||
|
||||
/// Builds a delete table info transaction.
|
||||
pub(crate) fn build_delete_txn(&self, table_id: TableId) -> Result<Txn> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Delete(raw_key)]);
|
||||
|
||||
Ok(txn)
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
|
||||
@@ -194,14 +194,6 @@ impl TableNameManager {
|
||||
Ok(txn)
|
||||
}
|
||||
|
||||
/// Builds a delete table name transaction. It only executes while the primary keys comparing successes.
|
||||
pub(crate) fn build_delete_txn(&self, key: &TableNameKey<'_>) -> Result<Txn> {
|
||||
let raw_key = key.as_raw_key();
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Delete(raw_key)]);
|
||||
|
||||
Ok(txn)
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: TableNameKey<'_>) -> Result<Option<TableNameValue>> {
|
||||
let raw_key = key.as_raw_key();
|
||||
self.kv_backend
|
||||
|
||||
@@ -20,13 +20,16 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::{txn_helper, DeserializedValueWithBytes, TableMetaValue};
|
||||
use crate::error::{
|
||||
self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
|
||||
UnexpectedLogicalRouteTableSnafu,
|
||||
};
|
||||
use crate::key::{RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
txn_helper, DeserializedValueWithBytes, RegionDistribution, TableMetaKey, TableMetaValue,
|
||||
TABLE_ROUTE_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute};
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
@@ -61,6 +64,27 @@ pub struct LogicalTableRouteValue {
|
||||
}
|
||||
|
||||
impl TableRouteValue {
|
||||
/// Returns a [TableRouteValue::Physical] if `table_id` equals `physical_table_id`.
|
||||
/// Otherwise returns a [TableRouteValue::Logical].
|
||||
pub(crate) fn new(
|
||||
table_id: TableId,
|
||||
physical_table_id: TableId,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Self {
|
||||
if table_id == physical_table_id {
|
||||
TableRouteValue::physical(region_routes)
|
||||
} else {
|
||||
let region_routes = region_routes
|
||||
.into_iter()
|
||||
.map(|region| {
|
||||
debug_assert_eq!(region.region.id.table_id(), physical_table_id);
|
||||
RegionId::new(table_id, region.region.id.region_number())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
TableRouteValue::logical(physical_table_id, region_routes)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn physical(region_routes: Vec<RegionRoute>) -> Self {
|
||||
Self::Physical(PhysicalTableRouteValue::new(region_routes))
|
||||
}
|
||||
@@ -425,21 +449,6 @@ impl TableRouteStorage {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Builds a get table route transaction(readonly).
|
||||
pub(crate) fn build_get_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> (
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
) {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Get(raw_key.clone())]);
|
||||
|
||||
(txn, txn_helper::build_txn_response_decoder_fn(raw_key))
|
||||
}
|
||||
|
||||
/// Builds a create table route transaction,
|
||||
/// it expected the `__table_route/{table_id}` wasn't occupied.
|
||||
pub fn build_create_txn(
|
||||
@@ -448,7 +457,9 @@ impl TableRouteStorage {
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -458,7 +469,10 @@ impl TableRouteStorage {
|
||||
table_route_value.try_as_raw_value()?,
|
||||
);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a update table route transaction,
|
||||
@@ -471,7 +485,9 @@ impl TableRouteStorage {
|
||||
new_table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -480,18 +496,10 @@ impl TableRouteStorage {
|
||||
|
||||
let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value);
|
||||
|
||||
Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key)))
|
||||
}
|
||||
|
||||
/// Builds a delete table route transaction,
|
||||
/// it expected the remote value equals the `table_route_value`.
|
||||
pub(crate) fn build_delete_txn(&self, table_id: TableId) -> Result<Txn> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
|
||||
let txn = Txn::new().and_then(vec![TxnOp::Delete(raw_key)]);
|
||||
|
||||
Ok(txn)
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Returns the [`TableRouteValue`].
|
||||
|
||||
561
src/common/meta/src/key/tombstone.rs
Normal file
561
src/common/meta/src/key/tombstone.rs
Normal file
@@ -0,0 +1,561 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
|
||||
/// [TombstoneManager] provides the ability to:
|
||||
/// - logically delete values
|
||||
/// - restore the deleted values
|
||||
pub(crate) struct TombstoneManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
const TOMBSTONE_PREFIX: &str = "__tombstone/";
|
||||
|
||||
pub(crate) struct TombstoneKeyValue {
|
||||
pub(crate) origin_key: Vec<u8>,
|
||||
pub(crate) tombstone_key: Vec<u8>,
|
||||
pub(crate) value: Vec<u8>,
|
||||
}
|
||||
|
||||
fn to_tombstone(key: &[u8]) -> Vec<u8> {
|
||||
[TOMBSTONE_PREFIX.as_bytes(), key].concat()
|
||||
}
|
||||
|
||||
impl TombstoneManager {
|
||||
/// Returns [TombstoneManager].
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Moves value to `dest_key`.
|
||||
///
|
||||
/// Puts `value` to `dest_key` if the value of `src_key` equals `value`.
|
||||
///
|
||||
/// Otherwise retrieves the value of `src_key`.
|
||||
fn build_move_value_txn(
|
||||
&self,
|
||||
src_key: Vec<u8>,
|
||||
value: Vec<u8>,
|
||||
dest_key: Vec<u8>,
|
||||
) -> (Txn, impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>) {
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
src_key.clone(),
|
||||
CompareOp::Equal,
|
||||
value.clone(),
|
||||
)])
|
||||
.and_then(vec![
|
||||
TxnOp::Put(dest_key.clone(), value.clone()),
|
||||
TxnOp::Delete(src_key.clone()),
|
||||
])
|
||||
.or_else(vec![TxnOp::Get(src_key.clone())]);
|
||||
|
||||
(txn, TxnOpGetResponseSet::filter(src_key))
|
||||
}
|
||||
|
||||
async fn move_values_inner(&self, keys: &[Vec<u8>], dest_keys: &[Vec<u8>]) -> Result<()> {
|
||||
ensure!(
|
||||
keys.len() == dest_keys.len(),
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "The length of keys does not match the length of dest_keys."
|
||||
}
|
||||
);
|
||||
// The key -> dest key mapping.
|
||||
let lookup_table = keys.iter().zip(dest_keys.iter()).collect::<HashMap<_, _>>();
|
||||
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest::new().with_keys(keys.to_vec()))
|
||||
.await?;
|
||||
let mut results = resp
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.value))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
const MAX_RETRIES: usize = 8;
|
||||
for _ in 0..MAX_RETRIES {
|
||||
let (txns, (keys, filters)): (Vec<_>, (Vec<_>, Vec<_>)) = results
|
||||
.iter()
|
||||
.map(|(key, value)| {
|
||||
let (txn, filter) = self.build_move_value_txn(
|
||||
key.clone(),
|
||||
value.clone(),
|
||||
lookup_table[&key].clone(),
|
||||
);
|
||||
(txn, (key.clone(), filter))
|
||||
})
|
||||
.unzip();
|
||||
let mut resp = self.kv_backend.txn(Txn::merge_all(txns)).await?;
|
||||
if resp.succeeded {
|
||||
return Ok(());
|
||||
}
|
||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||
// Updates results.
|
||||
for (idx, mut filter) in filters.into_iter().enumerate() {
|
||||
if let Some(value) = filter(&mut set) {
|
||||
results.insert(keys[idx].clone(), value);
|
||||
} else {
|
||||
results.remove(&keys[idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error::MoveValuesSnafu {
|
||||
err_msg: format!(
|
||||
"keys: {:?}",
|
||||
keys.iter().map(|key| String::from_utf8_lossy(key)),
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Moves values to `dest_key`.
|
||||
async fn move_values(&self, keys: Vec<Vec<u8>>, dest_keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
let chunk_size = self.kv_backend.max_txn_ops() / 2;
|
||||
if keys.len() > chunk_size {
|
||||
let keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
let dest_keys_chunks = keys.chunks(chunk_size).collect::<Vec<_>>();
|
||||
for (keys, dest_keys) in keys_chunks.into_iter().zip(dest_keys_chunks) {
|
||||
self.move_values_inner(keys, dest_keys).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
self.move_values_inner(&keys, &dest_keys).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates tombstones for keys.
|
||||
///
|
||||
/// Preforms to:
|
||||
/// - deletes origin values.
|
||||
/// - stores tombstone values.
|
||||
pub(crate) async fn create(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = keys
|
||||
.into_iter()
|
||||
.map(|key| {
|
||||
let tombstone_key = to_tombstone(&key);
|
||||
(key, tombstone_key)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
self.move_values(keys, dest_keys).await
|
||||
}
|
||||
|
||||
/// Restores tombstones for keys.
|
||||
///
|
||||
/// Preforms to:
|
||||
/// - restore origin value.
|
||||
/// - deletes tombstone values.
|
||||
pub(crate) async fn restore(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = keys
|
||||
.into_iter()
|
||||
.map(|key| {
|
||||
let tombstone_key = to_tombstone(&key);
|
||||
(tombstone_key, key)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
self.move_values(keys, dest_keys).await
|
||||
}
|
||||
|
||||
/// Deletes tombstones values for the specified `keys`.
|
||||
pub(crate) async fn delete(&self, keys: Vec<Vec<u8>>) -> Result<()> {
|
||||
let operations = keys
|
||||
.iter()
|
||||
.map(|key| TxnOp::Delete(to_tombstone(key)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let txn = Txn::new().and_then(operations);
|
||||
// Always success.
|
||||
let _ = self.kv_backend.txn(txn).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::to_tombstone;
|
||||
use crate::error::Error;
|
||||
use crate::key::tombstone::TombstoneManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct MoveValue {
|
||||
key: Vec<u8>,
|
||||
dest_key: Vec<u8>,
|
||||
value: Vec<u8>,
|
||||
}
|
||||
|
||||
async fn check_moved_values(
|
||||
kv_backend: Arc<MemoryKvBackend<Error>>,
|
||||
move_values: &[MoveValue],
|
||||
) {
|
||||
for MoveValue {
|
||||
key,
|
||||
dest_key,
|
||||
value,
|
||||
} in move_values
|
||||
{
|
||||
assert!(kv_backend.get(key).await.unwrap().is_none());
|
||||
assert_eq!(
|
||||
&kv_backend.get(dest_key).await.unwrap().unwrap().value,
|
||||
value,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_tombstone() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||
.await
|
||||
.unwrap();
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||
.await
|
||||
.unwrap();
|
||||
tombstone_manager
|
||||
.create(vec![b"bar".to_vec(), b"foo".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!kv_backend.exists(b"bar").await.unwrap());
|
||||
assert!(!kv_backend.exists(b"foo").await.unwrap());
|
||||
assert_eq!(
|
||||
kv_backend
|
||||
.get(&to_tombstone(b"bar"))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.value,
|
||||
b"baz"
|
||||
);
|
||||
assert_eq!(
|
||||
kv_backend
|
||||
.get(&to_tombstone(b"foo"))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.value,
|
||||
b"hi"
|
||||
);
|
||||
assert_eq!(kv_backend.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_tombstone_with_non_exist_values() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||
.await
|
||||
.unwrap();
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tombstone_manager
|
||||
.create(vec![b"bar".to_vec(), b"baz".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(
|
||||
kv_backend.clone(),
|
||||
&[MoveValue {
|
||||
key: b"bar".to_vec(),
|
||||
dest_key: to_tombstone(b"bar"),
|
||||
value: b"baz".to_vec(),
|
||||
}],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_restore_tombstone() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||
.await
|
||||
.unwrap();
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||
.await
|
||||
.unwrap();
|
||||
let expected_kvs = kv_backend.dump();
|
||||
tombstone_manager
|
||||
.create(vec![b"bar".to_vec(), b"foo".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
tombstone_manager
|
||||
.restore(vec![b"bar".to_vec(), b"foo".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(expected_kvs, kv_backend.dump());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_tombstone() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("bar").with_value("baz"))
|
||||
.await
|
||||
.unwrap();
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("foo").with_value("hi"))
|
||||
.await
|
||||
.unwrap();
|
||||
tombstone_manager
|
||||
.create(vec![b"bar".to_vec(), b"foo".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
tombstone_manager
|
||||
.delete(vec![b"bar".to_vec(), b"foo".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(kv_backend.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"baz".to_vec()),
|
||||
(b"foo".to_vec(), b"hi".to_vec()),
|
||||
(b"baz".to_vec(), b"hello".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
// Moves again
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_with_non_exists_values() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"baz".to_vec()),
|
||||
(b"foo".to_vec(), b"hi".to_vec()),
|
||||
(b"baz".to_vec(), b"hello".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (mut keys, mut dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
keys.push(b"non-exists".to_vec());
|
||||
dest_keys.push(b"hi/non-exists".to_vec());
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
// Moves again
|
||||
tombstone_manager
|
||||
.move_values(keys.clone(), dest_keys.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_changed() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"baz".to_vec()),
|
||||
(b"foo".to_vec(), b"hi".to_vec()),
|
||||
(b"baz".to_vec(), b"hello".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("baz").with_value("changed"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_values_overwrite_dest_values() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let tombstone_manager = TombstoneManager::new(kv_backend.clone());
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"baz".to_vec()),
|
||||
(b"foo".to_vec(), b"hi".to_vec()),
|
||||
(b"baz".to_vec(), b"hello".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Prepares
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
|
||||
// Overwrites existing dest keys.
|
||||
let kvs = HashMap::from([
|
||||
(b"bar".to_vec(), b"new baz".to_vec()),
|
||||
(b"foo".to_vec(), b"new hi".to_vec()),
|
||||
(b"baz".to_vec(), b"new baz".to_vec()),
|
||||
]);
|
||||
for (key, value) in &kvs {
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key.clone())
|
||||
.with_value(value.clone()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let move_values = kvs
|
||||
.iter()
|
||||
.map(|(key, value)| MoveValue {
|
||||
key: key.clone(),
|
||||
dest_key: to_tombstone(key),
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let (keys, dest_keys): (Vec<_>, Vec<_>) = move_values
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.dest_key))
|
||||
.unzip();
|
||||
tombstone_manager
|
||||
.move_values(keys, dest_keys)
|
||||
.await
|
||||
.unwrap();
|
||||
check_moved_values(kv_backend.clone(), &move_values).await;
|
||||
}
|
||||
}
|
||||
@@ -18,27 +18,53 @@ use serde::Serialize;
|
||||
use crate::error::Result;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetaValue};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub(crate) fn build_txn_response_decoder_fn<T>(
|
||||
raw_key: Vec<u8>,
|
||||
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<T>>>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + TableMetaValue,
|
||||
{
|
||||
move |txn_res: &Vec<TxnOpResponse>| {
|
||||
txn_res
|
||||
.iter()
|
||||
.filter_map(|resp| {
|
||||
if let TxnOpResponse::ResponseGet(r) = resp {
|
||||
Some(r)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
/// The response set of [TxnOpResponse::ResponseGet]
|
||||
pub struct TxnOpGetResponseSet(Vec<KeyValue>);
|
||||
|
||||
impl TxnOpGetResponseSet {
|
||||
/// Returns a filter to consume a [KeyValue] where the key equals `key`.
|
||||
pub(crate) fn filter(key: Vec<u8>) -> impl FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>> {
|
||||
move |set| {
|
||||
let pos = set.0.iter().position(|kv| kv.key == key);
|
||||
match pos {
|
||||
Some(pos) => Some(set.0.remove(pos).value),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a decoder to decode bytes to `DeserializedValueWithBytes<T>`.
|
||||
pub(crate) fn decode_with<F, T>(
|
||||
mut f: F,
|
||||
) -> impl FnMut(&mut TxnOpGetResponseSet) -> Result<Option<DeserializedValueWithBytes<T>>>
|
||||
where
|
||||
F: FnMut(&mut TxnOpGetResponseSet) -> Option<Vec<u8>>,
|
||||
T: Serialize + DeserializeOwned + TableMetaValue,
|
||||
{
|
||||
move |set| {
|
||||
f(set)
|
||||
.map(|value| DeserializedValueWithBytes::from_inner_slice(&value))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&mut Vec<TxnOpResponse>> for TxnOpGetResponseSet {
|
||||
fn from(value: &mut Vec<TxnOpResponse>) -> Self {
|
||||
let value = value
|
||||
.extract_if(|resp| matches!(resp, TxnOpResponse::ResponseGet(_)))
|
||||
.flat_map(|resp| {
|
||||
// Safety: checked
|
||||
let TxnOpResponse::ResponseGet(r) = resp else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
r.kvs
|
||||
})
|
||||
.flat_map(|r| &r.kvs)
|
||||
.find(|kv| kv.key == raw_key)
|
||||
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||
.transpose()
|
||||
.collect::<Vec<_>>();
|
||||
TxnOpGetResponseSet(value)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -70,6 +70,25 @@ impl<T> MemoryKvBackend<T> {
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
kvs.clear();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Returns true if the `kvs` is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.kvs.read().unwrap().is_empty()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Returns the `kvs`.
|
||||
pub fn dump(&self) -> BTreeMap<Vec<u8>, Vec<u8>> {
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
kvs.clone()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Returns the length of `kvs`
|
||||
pub fn len(&self) -> usize {
|
||||
self.kvs.read().unwrap().len()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#![feature(btree_extract_if)]
|
||||
#![feature(async_closure)]
|
||||
#![feature(let_chains)]
|
||||
#![feature(extract_if)]
|
||||
|
||||
pub mod cache_invalidator;
|
||||
pub mod cluster;
|
||||
|
||||
@@ -28,13 +28,13 @@ use crate::rpc::store::{RangeRequest, RangeResponse};
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::util::get_next_prefix_key;
|
||||
|
||||
pub type KeyValueDecoderFn<K, V> = dyn Fn(KeyValue) -> Result<(K, V)> + Send + Sync;
|
||||
pub type KeyValueDecoderFn<T> = dyn Fn(KeyValue) -> Result<T> + Send + Sync;
|
||||
|
||||
enum PaginationStreamState<K, V> {
|
||||
enum PaginationStreamState<T> {
|
||||
/// At the start of reading.
|
||||
Init,
|
||||
/// Decoding key value pairs.
|
||||
Decoding(SimpleKeyValueDecoder<K, V>),
|
||||
Decoding(SimpleKeyValueDecoder<T>),
|
||||
/// Retrieving data from backend.
|
||||
Reading(BoxFuture<'static, Result<(PaginationStreamFactory, Option<RangeResponse>)>>),
|
||||
/// Error
|
||||
@@ -77,7 +77,7 @@ struct PaginationStreamFactory {
|
||||
}
|
||||
|
||||
impl PaginationStreamFactory {
|
||||
pub fn new(
|
||||
fn new(
|
||||
kv: &KvBackendRef,
|
||||
key: Vec<u8>,
|
||||
range_end: Vec<u8>,
|
||||
@@ -137,7 +137,7 @@ impl PaginationStreamFactory {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
|
||||
async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
|
||||
if self.more {
|
||||
let resp = self
|
||||
.adaptive_range(RangeRequest {
|
||||
@@ -174,18 +174,19 @@ impl PaginationStreamFactory {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PaginationStream<K, V> {
|
||||
state: PaginationStreamState<K, V>,
|
||||
decoder_fn: Arc<KeyValueDecoderFn<K, V>>,
|
||||
pub struct PaginationStream<T> {
|
||||
state: PaginationStreamState<T>,
|
||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
||||
factory: Option<PaginationStreamFactory>,
|
||||
}
|
||||
|
||||
impl<K, V> PaginationStream<K, V> {
|
||||
impl<T> PaginationStream<T> {
|
||||
/// Returns a new [PaginationStream].
|
||||
pub fn new(
|
||||
kv: KvBackendRef,
|
||||
req: RangeRequest,
|
||||
page_size: usize,
|
||||
decoder_fn: Arc<KeyValueDecoderFn<K, V>>,
|
||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: PaginationStreamState::Init,
|
||||
@@ -202,13 +203,13 @@ impl<K, V> PaginationStream<K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
struct SimpleKeyValueDecoder<K, V> {
|
||||
struct SimpleKeyValueDecoder<T> {
|
||||
kv: VecDeque<KeyValue>,
|
||||
decoder: Arc<KeyValueDecoderFn<K, V>>,
|
||||
decoder: Arc<KeyValueDecoderFn<T>>,
|
||||
}
|
||||
|
||||
impl<K, V> Iterator for SimpleKeyValueDecoder<K, V> {
|
||||
type Item = Result<(K, V)>;
|
||||
impl<T> Iterator for SimpleKeyValueDecoder<T> {
|
||||
type Item = Result<T>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(kv) = self.kv.pop_front() {
|
||||
@@ -219,8 +220,8 @@ impl<K, V> Iterator for SimpleKeyValueDecoder<K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Stream for PaginationStream<K, V> {
|
||||
type Item = Result<(K, V)>;
|
||||
impl<T> Stream for PaginationStream<T> {
|
||||
type Item = Result<T>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
|
||||
@@ -97,7 +97,6 @@ impl Display for RangeRequest {
|
||||
}
|
||||
|
||||
impl RangeRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
key: vec![],
|
||||
@@ -114,7 +113,6 @@ impl RangeRequest {
|
||||
|
||||
/// key is the first key for the range, If range_end is not given, the
|
||||
/// request only looks up key.
|
||||
#[inline]
|
||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self
|
||||
@@ -129,7 +127,6 @@ impl RangeRequest {
|
||||
/// then the range request gets all keys prefixed with key.
|
||||
/// If both key and range_end are '\0', then the range request returns all
|
||||
/// keys.
|
||||
#[inline]
|
||||
pub fn with_range(mut self, key: impl Into<Vec<u8>>, range_end: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self.range_end = range_end.into();
|
||||
@@ -138,7 +135,6 @@ impl RangeRequest {
|
||||
|
||||
/// Gets all keys prefixed with key.
|
||||
/// range_end is the key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||
#[inline]
|
||||
pub fn with_prefix(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self.range_end = util::get_prefix_end_key(&self.key);
|
||||
@@ -147,14 +143,12 @@ impl RangeRequest {
|
||||
|
||||
/// limit is a limit on the number of keys returned for the request. When
|
||||
/// limit is set to 0, it is treated as no limit.
|
||||
#[inline]
|
||||
pub fn with_limit(mut self, limit: i64) -> Self {
|
||||
self.limit = limit;
|
||||
self
|
||||
}
|
||||
|
||||
/// keys_only when set returns only the keys and not the values.
|
||||
#[inline]
|
||||
pub fn with_keys_only(mut self) -> Self {
|
||||
self.keys_only = true;
|
||||
self
|
||||
@@ -204,7 +198,6 @@ impl RangeResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_kvs(&mut self) -> Vec<KeyValue> {
|
||||
self.kvs.drain(..).collect()
|
||||
}
|
||||
@@ -244,7 +237,6 @@ impl From<PbPutRequest> for PutRequest {
|
||||
}
|
||||
|
||||
impl PutRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
key: vec![],
|
||||
@@ -254,7 +246,6 @@ impl PutRequest {
|
||||
}
|
||||
|
||||
/// key is the key, in bytes, to put into the key-value store.
|
||||
#[inline]
|
||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self
|
||||
@@ -262,7 +253,6 @@ impl PutRequest {
|
||||
|
||||
/// value is the value, in bytes, to associate with the key in the
|
||||
/// key-value store.
|
||||
#[inline]
|
||||
pub fn with_value(mut self, value: impl Into<Vec<u8>>) -> Self {
|
||||
self.value = value.into();
|
||||
self
|
||||
@@ -270,7 +260,6 @@ impl PutRequest {
|
||||
|
||||
/// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||
/// The previous key-value pair will be returned in the put response.
|
||||
#[inline]
|
||||
pub fn with_prev_kv(mut self) -> Self {
|
||||
self.prev_kv = true;
|
||||
self
|
||||
@@ -330,18 +319,15 @@ impl Default for BatchGetRequest {
|
||||
}
|
||||
|
||||
impl BatchGetRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self { keys: vec![] }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn with_keys(mut self, keys: Vec<Vec<u8>>) -> Self {
|
||||
self.keys = keys;
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.keys.push(key.into());
|
||||
self
|
||||
@@ -416,7 +402,6 @@ impl From<PbBatchPutRequest> for BatchPutRequest {
|
||||
}
|
||||
|
||||
impl BatchPutRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
kvs: vec![],
|
||||
@@ -424,7 +409,6 @@ impl BatchPutRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_kv(mut self, key: impl Into<Vec<u8>>, value: impl Into<Vec<u8>>) -> Self {
|
||||
self.kvs.push(KeyValue {
|
||||
key: key.into(),
|
||||
@@ -435,7 +419,6 @@ impl BatchPutRequest {
|
||||
|
||||
/// If prev_kv is set, gets the previous key-value pair before changing it.
|
||||
/// The previous key-value pair will be returned in the put response.
|
||||
#[inline]
|
||||
pub fn with_prev_kv(mut self) -> Self {
|
||||
self.prev_kv = true;
|
||||
self
|
||||
@@ -467,7 +450,6 @@ impl BatchPutResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
||||
self.prev_kvs.drain(..).collect()
|
||||
}
|
||||
@@ -501,7 +483,6 @@ impl From<PbBatchDeleteRequest> for BatchDeleteRequest {
|
||||
}
|
||||
|
||||
impl BatchDeleteRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
keys: vec![],
|
||||
@@ -509,7 +490,12 @@ impl BatchDeleteRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Sets `keys`.
|
||||
pub fn with_keys(mut self, keys: Vec<Vec<u8>>) -> Self {
|
||||
self.keys = keys;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.keys.push(key.into());
|
||||
self
|
||||
@@ -517,7 +503,6 @@ impl BatchDeleteRequest {
|
||||
|
||||
/// If prev_kv is set, gets the previous key-value pair before deleting it.
|
||||
/// The previous key-value pair will be returned in the batch delete response.
|
||||
#[inline]
|
||||
pub fn with_prev_kv(mut self) -> Self {
|
||||
self.prev_kv = true;
|
||||
self
|
||||
@@ -582,7 +567,6 @@ impl From<PbCompareAndPutRequest> for CompareAndPutRequest {
|
||||
}
|
||||
|
||||
impl CompareAndPutRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
key: vec![],
|
||||
@@ -592,14 +576,12 @@ impl CompareAndPutRequest {
|
||||
}
|
||||
|
||||
/// key is the key, in bytes, to put into the key-value store.
|
||||
#[inline]
|
||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self
|
||||
}
|
||||
|
||||
/// expect is the previous value, in bytes
|
||||
#[inline]
|
||||
pub fn with_expect(mut self, expect: impl Into<Vec<u8>>) -> Self {
|
||||
self.expect = expect.into();
|
||||
self
|
||||
@@ -607,7 +589,6 @@ impl CompareAndPutRequest {
|
||||
|
||||
/// value is the value, in bytes, to associate with the key in the
|
||||
/// key-value store.
|
||||
#[inline]
|
||||
pub fn with_value(mut self, value: impl Into<Vec<u8>>) -> Self {
|
||||
self.value = value.into();
|
||||
self
|
||||
@@ -649,12 +630,10 @@ impl CompareAndPutResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_success(&self) -> bool {
|
||||
self.success
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_prev_kv(&mut self) -> Option<KeyValue> {
|
||||
self.prev_kv.take()
|
||||
}
|
||||
@@ -703,7 +682,6 @@ impl From<PbDeleteRangeRequest> for DeleteRangeRequest {
|
||||
}
|
||||
|
||||
impl DeleteRangeRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
key: vec![],
|
||||
@@ -719,7 +697,6 @@ impl DeleteRangeRequest {
|
||||
|
||||
/// key is the first key to delete in the range. If range_end is not given,
|
||||
/// the range is defined to contain only the key argument.
|
||||
#[inline]
|
||||
pub fn with_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self
|
||||
@@ -735,7 +712,6 @@ impl DeleteRangeRequest {
|
||||
/// the keys with the prefix (the given key).
|
||||
/// If range_end is '\0', the range is all keys greater than or equal to the
|
||||
/// key argument.
|
||||
#[inline]
|
||||
pub fn with_range(mut self, key: impl Into<Vec<u8>>, range_end: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self.range_end = range_end.into();
|
||||
@@ -744,7 +720,6 @@ impl DeleteRangeRequest {
|
||||
|
||||
/// Deletes all keys prefixed with key.
|
||||
/// range_end is one bit larger than the given key.
|
||||
#[inline]
|
||||
pub fn with_prefix(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.key = key.into();
|
||||
self.range_end = util::get_prefix_end_key(&self.key);
|
||||
@@ -753,7 +728,6 @@ impl DeleteRangeRequest {
|
||||
|
||||
/// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||
/// The previous key-value pairs will be returned in the delete response.
|
||||
#[inline]
|
||||
pub fn with_prev_kv(mut self) -> Self {
|
||||
self.prev_kv = true;
|
||||
self
|
||||
@@ -788,12 +762,10 @@ impl DeleteRangeResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deleted(&self) -> i64 {
|
||||
self.deleted
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
||||
self.prev_kvs.drain(..).collect()
|
||||
}
|
||||
|
||||
@@ -172,9 +172,7 @@ impl Inner {
|
||||
|
||||
if !res.success {
|
||||
if let Some(kv) = res.prev_kv {
|
||||
expect = kv.value.clone();
|
||||
|
||||
let v: [u8; 8] = match kv.value.try_into() {
|
||||
let v: [u8; 8] = match kv.value.clone().try_into() {
|
||||
Ok(a) => a,
|
||||
Err(v) => {
|
||||
return error::UnexpectedSequenceValueSnafu {
|
||||
@@ -184,13 +182,12 @@ impl Inner {
|
||||
}
|
||||
};
|
||||
let v = u64::from_le_bytes(v);
|
||||
|
||||
// If the existed value is smaller than the initial, we should start from the initial.
|
||||
start = v.max(self.initial);
|
||||
expect = kv.value;
|
||||
} else {
|
||||
expect = vec![];
|
||||
|
||||
start = self.initial;
|
||||
expect = vec![];
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_procedure::store::util::multiple_value_stream;
|
||||
use common_procedure::Result as ProcedureResult;
|
||||
use futures::future::try_join_all;
|
||||
use futures::StreamExt;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -79,17 +80,21 @@ fn decode_kv(kv: KeyValue) -> Result<(String, Vec<u8>)> {
|
||||
Ok((key, value))
|
||||
}
|
||||
|
||||
enum SplitValue<'a> {
|
||||
Single(&'a [u8]),
|
||||
Multiple(Vec<&'a [u8]>),
|
||||
enum SplitValue {
|
||||
Single(Vec<u8>),
|
||||
Multiple(Vec<Vec<u8>>),
|
||||
}
|
||||
|
||||
fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
|
||||
fn split_value(value: Vec<u8>, max_value_size: Option<usize>) -> SplitValue {
|
||||
if let Some(max_value_size) = max_value_size {
|
||||
if value.len() <= max_value_size {
|
||||
SplitValue::Single(value)
|
||||
} else {
|
||||
SplitValue::Multiple(value.chunks(max_value_size).collect::<Vec<_>>())
|
||||
let mut values = vec![];
|
||||
for chunk in value.into_iter().chunks(max_value_size).into_iter() {
|
||||
values.push(chunk.collect());
|
||||
}
|
||||
SplitValue::Multiple(values)
|
||||
}
|
||||
} else {
|
||||
SplitValue::Single(value)
|
||||
@@ -99,10 +104,10 @@ fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
|
||||
#[async_trait]
|
||||
impl StateStore for KvStateStore {
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> ProcedureResult<()> {
|
||||
let split = split_value(&value, self.max_value_size);
|
||||
let split = split_value(value, self.max_value_size);
|
||||
let key = with_prefix(key);
|
||||
match split {
|
||||
SplitValue::Single(_) => {
|
||||
SplitValue::Single(value) => {
|
||||
self.kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
|
||||
@@ -14,19 +14,19 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
pub use common_base::AffectedRows;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
use crate::datanode_manager::{
|
||||
Datanode, DatanodeManager, DatanodeManagerRef, DatanodeRef, HandleResponse,
|
||||
};
|
||||
use crate::datanode_manager::{Datanode, DatanodeManager, DatanodeManagerRef, DatanodeRef};
|
||||
use crate::ddl::table_meta::TableMetadataAllocator;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
use crate::region_keeper::MemoryRegionKeeper;
|
||||
use crate::sequence::SequenceBuilder;
|
||||
@@ -34,7 +34,7 @@ use crate::wal_options_allocator::WalOptionsAllocator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait MockDatanodeHandler: Sync + Send + Clone {
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse>;
|
||||
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<RegionResponse>;
|
||||
|
||||
async fn handle_query(
|
||||
&self,
|
||||
@@ -64,7 +64,7 @@ struct MockDatanode<T> {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T: MockDatanodeHandler> Datanode for MockDatanode<T> {
|
||||
async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||
async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
|
||||
self.handler.handle(&self.peer, request).await
|
||||
}
|
||||
|
||||
@@ -86,6 +86,14 @@ impl<T: MockDatanodeHandler + 'static> DatanodeManager for MockDatanodeManager<T
|
||||
/// Returns a test purpose [DdlContext].
|
||||
pub fn new_ddl_context(datanode_manager: DatanodeManagerRef) -> DdlContext {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
new_ddl_context_with_kv_backend(datanode_manager, kv_backend)
|
||||
}
|
||||
|
||||
/// Returns a test purpose [DdlContext] with a specified [KvBackendRef].
|
||||
pub fn new_ddl_context_with_kv_backend(
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> DdlContext {
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
DdlContext {
|
||||
|
||||
@@ -163,7 +163,7 @@ mod tests {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||
let mut topic_manager = KafkaTopicManager::new(config.clone(), kv_backend);
|
||||
// Replaces the default topic pool with the constructed topics.
|
||||
topic_manager.topic_pool = topics.clone();
|
||||
topic_manager.topic_pool.clone_from(&topics);
|
||||
// Replaces the default selector with a round-robin selector without shuffled.
|
||||
topic_manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
||||
|
||||
|
||||
@@ -291,7 +291,7 @@ mod tests {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||
let mut manager = TopicManager::new(config.clone(), kv_backend);
|
||||
// Replaces the default topic pool with the constructed topics.
|
||||
manager.topic_pool = topics.clone();
|
||||
manager.topic_pool.clone_from(&topics);
|
||||
// Replaces the default selector with a round-robin selector without shuffled.
|
||||
manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
|
||||
manager.start().await.unwrap();
|
||||
|
||||
@@ -114,3 +114,29 @@ pub async fn execute_until_suspended_or_done(
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn new_test_procedure_context() -> Context {
|
||||
Context {
|
||||
procedure_id: ProcedureId::random(),
|
||||
provider: Arc::new(MockContextProvider::default()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute_procedure_until<P: Procedure>(procedure: &mut P, until: impl Fn(&P) -> bool) {
|
||||
let mut reached = false;
|
||||
let context = new_test_procedure_context();
|
||||
while !matches!(
|
||||
procedure.execute(&context).await.unwrap(),
|
||||
Status::Done { .. }
|
||||
) {
|
||||
if until(procedure) {
|
||||
reached = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert!(
|
||||
reached,
|
||||
"procedure '{}' did not reach the expected state",
|
||||
procedure.type_name()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,19 +19,19 @@ use std::sync::{Arc, Mutex};
|
||||
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
|
||||
|
||||
pub enum OwnedKeyRwLockGuard {
|
||||
Read(OwnedRwLockReadGuard<()>),
|
||||
Write(OwnedRwLockWriteGuard<()>),
|
||||
Read { _guard: OwnedRwLockReadGuard<()> },
|
||||
Write { _guard: OwnedRwLockWriteGuard<()> },
|
||||
}
|
||||
|
||||
impl From<OwnedRwLockReadGuard<()>> for OwnedKeyRwLockGuard {
|
||||
fn from(guard: OwnedRwLockReadGuard<()>) -> Self {
|
||||
OwnedKeyRwLockGuard::Read(guard)
|
||||
OwnedKeyRwLockGuard::Read { _guard: guard }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OwnedRwLockWriteGuard<()>> for OwnedKeyRwLockGuard {
|
||||
fn from(guard: OwnedRwLockWriteGuard<()>) -> Self {
|
||||
OwnedKeyRwLockGuard::Write(guard)
|
||||
OwnedKeyRwLockGuard::Write { _guard: guard }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, IntoVectorSnafu, Result};
|
||||
use crate::error::{self, GeneralDataFusionSnafu, IntoVectorSnafu, Result};
|
||||
use crate::prelude::ScalarValue;
|
||||
|
||||
/// Represents the result from an expression
|
||||
@@ -43,7 +43,9 @@ impl ColumnarValue {
|
||||
Ok(match self {
|
||||
ColumnarValue::Vector(v) => v,
|
||||
ColumnarValue::Scalar(s) => {
|
||||
let v = s.to_array_of_size(num_rows);
|
||||
let v = s
|
||||
.to_array_of_size(num_rows)
|
||||
.context(GeneralDataFusionSnafu)?;
|
||||
let data_type = v.data_type().clone();
|
||||
Helper::try_into_vector(v).context(IntoVectorSnafu { data_type })?
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::greptime_proto::v1::add_column_location::LocationType;
|
||||
@@ -94,8 +94,8 @@ impl Debug for OutputData {
|
||||
OutputData::RecordBatches(recordbatches) => {
|
||||
write!(f, "OutputData::RecordBatches({recordbatches:?})")
|
||||
}
|
||||
OutputData::Stream(_) => {
|
||||
write!(f, "OutputData::Stream(<stream>)")
|
||||
OutputData::Stream(s) => {
|
||||
write!(f, "OutputData::Stream(<{}>)", s.name())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,6 +126,17 @@ pub enum AddColumnLocation {
|
||||
After { column_name: String },
|
||||
}
|
||||
|
||||
impl Display for AddColumnLocation {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AddColumnLocation::First => write!(f, r#"FIRST"#),
|
||||
AddColumnLocation::After { column_name } => {
|
||||
write!(f, r#"AFTER {column_name}"#)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&AddColumnLocation> for Location {
|
||||
fn from(value: &AddColumnLocation) -> Self {
|
||||
match value {
|
||||
|
||||
@@ -72,6 +72,7 @@ pub fn create_aggregate_function(
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion_common::DFSchema;
|
||||
use datafusion_expr::{
|
||||
ColumnarValue as DfColumnarValue, ScalarUDF as DfScalarUDF,
|
||||
TypeSignature as DfTypeSignature,
|
||||
@@ -135,15 +136,17 @@ mod tests {
|
||||
|
||||
// test into_df_udf
|
||||
let df_udf: DfScalarUDF = udf.into();
|
||||
assert_eq!("and", df_udf.name);
|
||||
assert_eq!("and", df_udf.name());
|
||||
|
||||
let types = vec![DataType::Boolean, DataType::Boolean];
|
||||
assert!(
|
||||
matches!(&df_udf.signature.type_signature, DfTypeSignature::Exact(ts) if ts.clone() == types)
|
||||
matches!(&df_udf.signature().type_signature, DfTypeSignature::Exact(ts) if ts.clone() == types)
|
||||
);
|
||||
assert_eq!(
|
||||
Arc::new(DataType::Boolean),
|
||||
(df_udf.return_type)(&[]).unwrap()
|
||||
DataType::Boolean,
|
||||
df_udf
|
||||
.return_type_from_exprs(&[], &DFSchema::empty(), &[])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let args = vec![
|
||||
@@ -152,7 +155,7 @@ mod tests {
|
||||
];
|
||||
|
||||
// call the function
|
||||
let result = (df_udf.fun)(&args).unwrap();
|
||||
let result = (df_udf.fun())(&args).unwrap();
|
||||
|
||||
match result {
|
||||
DfColumnarValue::Array(arr) => {
|
||||
|
||||
@@ -126,7 +126,7 @@ impl DfAccumulatorAdaptor {
|
||||
}
|
||||
|
||||
impl DfAccumulator for DfAccumulatorAdaptor {
|
||||
fn state(&self) -> DfResult<Vec<ScalarValue>> {
|
||||
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||
let state_values = self.accumulator.state()?;
|
||||
let state_types = self.creator.state_types()?;
|
||||
if state_values.len() != state_types.len() {
|
||||
@@ -161,7 +161,7 @@ impl DfAccumulator for DfAccumulatorAdaptor {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&self) -> DfResult<ScalarValue> {
|
||||
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||
let value = self.accumulator.evaluate()?;
|
||||
let output_type = self.creator.output_type()?;
|
||||
let scalar_value = value
|
||||
|
||||
@@ -44,9 +44,7 @@ pub fn build_filter_from_timestamp(
|
||||
ts_col_name: &str,
|
||||
time_range: Option<&TimestampRange>,
|
||||
) -> Option<Expr> {
|
||||
let Some(time_range) = time_range else {
|
||||
return None;
|
||||
};
|
||||
let time_range = time_range?;
|
||||
let ts_col_expr = DfExpr::Column(Column {
|
||||
relation: None,
|
||||
name: ts_col_name.to_string(),
|
||||
@@ -94,10 +92,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_from_df_expr() {
|
||||
let df_expr = DfExpr::Wildcard;
|
||||
let df_expr = DfExpr::Wildcard { qualifier: None };
|
||||
|
||||
let expr: Expr = df_expr.into();
|
||||
|
||||
assert_eq!(DfExpr::Wildcard, *expr.df_expr());
|
||||
assert_eq!(DfExpr::Wildcard { qualifier: None }, *expr.df_expr());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,15 +16,18 @@
|
||||
//!
|
||||
//! Modified from DataFusion.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{self, Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::arrow::datatypes::Field;
|
||||
use datafusion_common::Result;
|
||||
use datafusion_expr::function::AccumulatorArgs;
|
||||
use datafusion_expr::{
|
||||
AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf,
|
||||
StateTypeFunction as DfStateTypeFunction,
|
||||
Accumulator, AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf, AggregateUDFImpl,
|
||||
};
|
||||
use datatypes::arrow::datatypes::DataType as ArrowDataType;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::data_type::DataType;
|
||||
|
||||
use crate::function::{
|
||||
to_df_return_type, AccumulatorFunctionImpl, ReturnTypeFunction, StateTypeFunction,
|
||||
@@ -90,13 +93,72 @@ impl AggregateFunction {
|
||||
|
||||
impl From<AggregateFunction> for DfAggregateUdf {
|
||||
fn from(udaf: AggregateFunction) -> Self {
|
||||
DfAggregateUdf::new(
|
||||
&udaf.name,
|
||||
&udaf.signature.into(),
|
||||
&to_df_return_type(udaf.return_type),
|
||||
&to_df_accumulator_func(udaf.accumulator, udaf.creator.clone()),
|
||||
&to_df_state_type(udaf.state_type),
|
||||
)
|
||||
struct DfUdafAdapter {
|
||||
name: String,
|
||||
signature: datafusion_expr::Signature,
|
||||
return_type_func: datafusion_expr::ReturnTypeFunction,
|
||||
accumulator: AccumulatorFactoryFunction,
|
||||
creator: AggregateFunctionCreatorRef,
|
||||
}
|
||||
|
||||
impl Debug for DfUdafAdapter {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("DfUdafAdapter")
|
||||
.field("name", &self.name)
|
||||
.field("signature", &self.signature)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregateUDFImpl for DfUdafAdapter {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn signature(&self) -> &datafusion_expr::Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn return_type(&self, arg_types: &[ArrowDataType]) -> Result<ArrowDataType> {
|
||||
(self.return_type_func)(arg_types).map(|x| x.as_ref().clone())
|
||||
}
|
||||
|
||||
fn accumulator(&self, acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
|
||||
(self.accumulator)(acc_args)
|
||||
}
|
||||
|
||||
fn state_fields(
|
||||
&self,
|
||||
name: &str,
|
||||
_value_type: ArrowDataType,
|
||||
_ordering_fields: Vec<Field>,
|
||||
) -> Result<Vec<Field>> {
|
||||
self.creator
|
||||
.state_types()
|
||||
.map(|x| {
|
||||
(0..x.len())
|
||||
.zip(x)
|
||||
.map(|(i, t)| {
|
||||
Field::new(format!("{}_{}", name, i), t.as_arrow_type(), true)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
DfUdafAdapter {
|
||||
name: udaf.name,
|
||||
signature: udaf.signature.into(),
|
||||
return_type_func: to_df_return_type(udaf.return_type),
|
||||
accumulator: to_df_accumulator_func(udaf.accumulator, udaf.creator.clone()),
|
||||
creator: udaf.creator,
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,19 +172,3 @@ fn to_df_accumulator_func(
|
||||
Ok(Box::new(DfAccumulatorAdaptor::new(accumulator, creator)) as _)
|
||||
})
|
||||
}
|
||||
|
||||
fn to_df_state_type(func: StateTypeFunction) -> DfStateTypeFunction {
|
||||
let df_func = move |data_type: &ArrowDataType| {
|
||||
// DataFusion DataType -> ConcreteDataType
|
||||
let concrete_data_type = ConcreteDataType::from_arrow_type(data_type);
|
||||
|
||||
// evaluate ConcreteDataType
|
||||
let eval_result = (func)(&concrete_data_type);
|
||||
|
||||
// ConcreteDataType -> DataFusion DataType
|
||||
eval_result
|
||||
.map(|ts| Arc::new(ts.iter().map(|t| t.as_arrow_type()).collect()))
|
||||
.map_err(|e| e.into())
|
||||
};
|
||||
Arc::new(df_func)
|
||||
}
|
||||
|
||||
@@ -70,6 +70,8 @@ impl ScalarUdf {
|
||||
|
||||
impl From<ScalarUdf> for DfScalarUDF {
|
||||
fn from(udf: ScalarUdf) -> Self {
|
||||
// TODO(LFC): remove deprecated
|
||||
#[allow(deprecated)]
|
||||
DfScalarUDF::new(
|
||||
&udf.name,
|
||||
&udf.signature.into(),
|
||||
|
||||
@@ -21,10 +21,9 @@ use common_recordbatch::{DfSendableRecordBatchStream, SendableRecordBatchStream}
|
||||
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
||||
use datafusion::error::Result as DfResult;
|
||||
pub use datafusion::execution::context::{SessionContext, TaskContext};
|
||||
use datafusion::physical_plan::expressions::PhysicalSortExpr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
pub use datafusion::physical_plan::Partitioning;
|
||||
use datafusion::physical_plan::{DisplayAs, DisplayFormatType, Statistics};
|
||||
use datafusion::physical_plan::{DisplayAs, DisplayFormatType, PlanProperties};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -47,13 +46,9 @@ pub trait PhysicalPlan: Debug + Send + Sync {
|
||||
/// Get the schema for this physical plan
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Specifies the output partitioning scheme of this plan
|
||||
fn output_partitioning(&self) -> Partitioning;
|
||||
|
||||
/// returns `Some(keys)` that describes how the output was sorted.
|
||||
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
|
||||
None
|
||||
}
|
||||
/// Return properties of the output of the [PhysicalPlan], such as output
|
||||
/// ordering(s), partitioning information etc.
|
||||
fn properties(&self) -> &PlanProperties;
|
||||
|
||||
/// Get a list of child physical plans that provide the input for this plan. The returned list
|
||||
/// will be empty for leaf nodes, will contain a single value for unary nodes, or two
|
||||
@@ -107,8 +102,8 @@ impl PhysicalPlan for PhysicalPlanAdapter {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn output_partitioning(&self) -> Partitioning {
|
||||
self.df_plan.output_partitioning()
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
self.df_plan.properties()
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<PhysicalPlanRef> {
|
||||
@@ -170,14 +165,6 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
|
||||
self.0.schema().arrow_schema().clone()
|
||||
}
|
||||
|
||||
fn output_partitioning(&self) -> Partitioning {
|
||||
self.0.output_partitioning()
|
||||
}
|
||||
|
||||
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
|
||||
self.0.output_ordering()
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<Arc<dyn DfPhysicalPlan>> {
|
||||
self.0
|
||||
.children()
|
||||
@@ -213,13 +200,13 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
|
||||
Ok(Box::pin(DfRecordBatchStreamAdapter::new(stream)))
|
||||
}
|
||||
|
||||
fn statistics(&self) -> Statistics {
|
||||
Statistics::default()
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<MetricsSet> {
|
||||
self.0.metrics()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
self.0.properties()
|
||||
}
|
||||
}
|
||||
|
||||
impl DisplayAs for DfPhysicalPlanAdapter {
|
||||
@@ -232,10 +219,12 @@ impl DisplayAs for DfPhysicalPlanAdapter {
|
||||
mod test {
|
||||
use async_trait::async_trait;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
||||
use datafusion::datasource::{DefaultTableSource, TableProvider as DfTableProvider, TableType};
|
||||
use datafusion::execution::context::{SessionContext, SessionState};
|
||||
use datafusion::physical_plan::collect;
|
||||
use datafusion::physical_expr::EquivalenceProperties;
|
||||
use datafusion::physical_plan::empty::EmptyExec;
|
||||
use datafusion::physical_plan::{collect, ExecutionMode};
|
||||
use datafusion_expr::logical_plan::builder::LogicalPlanBuilder;
|
||||
use datafusion_expr::{Expr, TableSource};
|
||||
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
|
||||
@@ -272,10 +261,13 @@ mod test {
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> DfResult<Arc<dyn DfPhysicalPlan>> {
|
||||
let schema = Schema::try_from(self.schema()).unwrap();
|
||||
let my_plan = Arc::new(MyExecutionPlan {
|
||||
schema: Arc::new(schema),
|
||||
});
|
||||
let schema = Arc::new(Schema::try_from(self.schema()).unwrap());
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.arrow_schema().clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
ExecutionMode::Bounded,
|
||||
);
|
||||
let my_plan = Arc::new(MyExecutionPlan { schema, properties });
|
||||
let df_plan = DfPhysicalPlanAdapter(my_plan);
|
||||
Ok(Arc::new(df_plan))
|
||||
}
|
||||
@@ -289,9 +281,10 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
struct MyExecutionPlan {
|
||||
schema: SchemaRef,
|
||||
properties: PlanProperties,
|
||||
}
|
||||
|
||||
impl PhysicalPlan for MyExecutionPlan {
|
||||
@@ -303,8 +296,8 @@ mod test {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn output_partitioning(&self) -> Partitioning {
|
||||
Partitioning::UnknownPartitioning(1)
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<PhysicalPlanRef> {
|
||||
@@ -312,7 +305,7 @@ mod test {
|
||||
}
|
||||
|
||||
fn with_new_children(&self, _children: Vec<PhysicalPlanRef>) -> Result<PhysicalPlanRef> {
|
||||
unimplemented!()
|
||||
Ok(Arc::new(self.clone()))
|
||||
}
|
||||
|
||||
fn execute(
|
||||
@@ -381,7 +374,7 @@ mod test {
|
||||
|
||||
let plan = PhysicalPlanAdapter::new(
|
||||
Arc::new(Schema::try_from(df_schema.clone()).unwrap()),
|
||||
Arc::new(EmptyExec::new(true, df_schema.clone())),
|
||||
Arc::new(EmptyExec::new(df_schema.clone())),
|
||||
);
|
||||
let _ = plan.df_plan.as_any().downcast_ref::<EmptyExec>().unwrap();
|
||||
|
||||
|
||||
@@ -31,6 +31,8 @@ pub enum TypeSignature {
|
||||
// A function such as `array` is `VariadicEqual`
|
||||
// The first argument decides the type used for coercion
|
||||
VariadicEqual,
|
||||
/// One or more arguments with arbitrary types
|
||||
VariadicAny,
|
||||
/// fixed number of arguments of an arbitrary but equal type out of a list of valid types
|
||||
// A function of one argument of f64 is `Uniform(1, vec![ConcreteDataType::Float64])`
|
||||
// A function of one argument of f64 or f32 is `Uniform(1, vec![ConcreteDataType::Float32, ConcreteDataType::Float64])`
|
||||
@@ -79,6 +81,15 @@ impl Signature {
|
||||
volatility,
|
||||
}
|
||||
}
|
||||
|
||||
/// variadic_any - Creates a variadic signature that represents an arbitrary number of arguments of any type.
|
||||
pub fn variadic_any(volatility: Volatility) -> Self {
|
||||
Self {
|
||||
type_signature: TypeSignature::VariadicAny,
|
||||
volatility,
|
||||
}
|
||||
}
|
||||
|
||||
/// uniform - Creates a function with a fixed number of arguments of the same type, which must be from valid_types.
|
||||
pub fn uniform(
|
||||
arg_count: usize,
|
||||
@@ -131,6 +142,7 @@ impl From<TypeSignature> for DfTypeSignature {
|
||||
TypeSignature::OneOf(ts) => {
|
||||
DfTypeSignature::OneOf(ts.into_iter().map(Into::into).collect())
|
||||
}
|
||||
TypeSignature::VariadicAny => DfTypeSignature::VariadicAny,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ workspace = true
|
||||
arc-swap = "1.6"
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datatypes.workspace = true
|
||||
|
||||
@@ -103,7 +103,7 @@ where
|
||||
"Trying to cast a RecordBatch into an incompatible schema. RecordBatch: {}, Target: {}",
|
||||
projected_column.schema(),
|
||||
projected_schema,
|
||||
))));
|
||||
)), None));
|
||||
}
|
||||
|
||||
let mut columns = Vec::with_capacity(projected_schema.fields.len());
|
||||
@@ -218,6 +218,10 @@ impl RecordBatchStreamAdapter {
|
||||
}
|
||||
|
||||
impl RecordBatchStream for RecordBatchStreamAdapter {
|
||||
fn name(&self) -> &str {
|
||||
"RecordBatchStreamAdapter"
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -69,8 +70,9 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init Recordbatch stream"))]
|
||||
InitRecordbatchStream {
|
||||
#[snafu(display("Failed to convert {v:?} to Arrow scalar"))]
|
||||
ToArrowScalar {
|
||||
v: ScalarValue,
|
||||
#[snafu(source)]
|
||||
error: datafusion_common::DataFusionError,
|
||||
location: Location,
|
||||
@@ -128,7 +130,7 @@ impl ErrorExt for Error {
|
||||
| Error::CreateRecordBatches { .. }
|
||||
| Error::PollStream { .. }
|
||||
| Error::Format { .. }
|
||||
| Error::InitRecordbatchStream { .. }
|
||||
| Error::ToArrowScalar { .. }
|
||||
| Error::ColumnNotExists { .. }
|
||||
| Error::ProjectArrowRecordBatch { .. }
|
||||
| Error::ArrowCompute { .. } => StatusCode::Internal,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user