mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 12:22:55 +00:00
Compare commits
75 Commits
0.15.0-nig
...
flow/admin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ca3d72e3f | ||
|
|
80c5af0ecf | ||
|
|
7afb77fd35 | ||
|
|
0b9af77fe9 | ||
|
|
cbafb6e00b | ||
|
|
744a754246 | ||
|
|
9cd4a2c525 | ||
|
|
180920327b | ||
|
|
ee4f830be6 | ||
|
|
69975f1f71 | ||
|
|
38cac301f2 | ||
|
|
083c22b90a | ||
|
|
fdd164c0fa | ||
|
|
078afb2bd6 | ||
|
|
477e4cc344 | ||
|
|
078d83cec2 | ||
|
|
7705d84d83 | ||
|
|
0d81400bb4 | ||
|
|
1d7ae66e75 | ||
|
|
af6cf999c1 | ||
|
|
54869a1329 | ||
|
|
3104d49434 | ||
|
|
b4d00fb499 | ||
|
|
4ae6df607b | ||
|
|
183e1dc031 | ||
|
|
886c2dba76 | ||
|
|
4e615e8906 | ||
|
|
9afc61f778 | ||
|
|
d22084e90c | ||
|
|
5e9b5d981f | ||
|
|
b01fce95a0 | ||
|
|
9fbcf9b7e7 | ||
|
|
dc3591655e | ||
|
|
aca7ad82b1 | ||
|
|
10fa6d8736 | ||
|
|
92422dafca | ||
|
|
53752e4f6c | ||
|
|
40bfa98d4b | ||
|
|
49986b03d6 | ||
|
|
493440a802 | ||
|
|
77e2fee755 | ||
|
|
b85429c0f1 | ||
|
|
3d942f6763 | ||
|
|
3901863432 | ||
|
|
27e339f628 | ||
|
|
cf2712e6f4 | ||
|
|
4b71e493f7 | ||
|
|
bf496e05cc | ||
|
|
513ca951ee | ||
|
|
791f530a78 | ||
|
|
1de6d8c619 | ||
|
|
a4d0420727 | ||
|
|
fc6300a2ba | ||
|
|
f55af5838c | ||
|
|
5a0da5b6bb | ||
|
|
d5f0006864 | ||
|
|
ede82331b2 | ||
|
|
56e696bd55 | ||
|
|
bc0cdf62ba | ||
|
|
eaf7b4b9dd | ||
|
|
7ae0e150e5 | ||
|
|
43c30b55ae | ||
|
|
153e80450a | ||
|
|
1624dc41c5 | ||
|
|
300262562b | ||
|
|
b2377d4b87 | ||
|
|
8d36ffb4e1 | ||
|
|
955ad644f7 | ||
|
|
c2e3c3d398 | ||
|
|
400229c384 | ||
|
|
cd9b6990bf | ||
|
|
a56e6e04c2 | ||
|
|
d324439014 | ||
|
|
038acda7cd | ||
|
|
a0d89c9ed1 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,7 +4,7 @@
|
||||
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Databse Engine
|
||||
## [Module] Database Engine
|
||||
/src/index @zhongzc
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
|
||||
@@ -52,7 +52,7 @@ runs:
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -70,7 +70,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -22,7 +22,6 @@ datanode:
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
overwrite_entry_start_id = true
|
||||
frontend:
|
||||
configData: |-
|
||||
|
||||
5
.github/scripts/create-version.sh
vendored
5
.github/scripts/create-version.sh
vendored
@@ -8,7 +8,7 @@ set -e
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from envrionment variables.
|
||||
# Read from environment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||
exit 1
|
||||
@@ -16,7 +16,8 @@ function create_version() {
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||
export NEXT_RELEASE_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
# NOTE: Need a `v` prefix for the version string.
|
||||
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
|
||||
2
.github/scripts/deploy-greptimedb.sh
vendored
2
.github/scripts/deploy-greptimedb.sh
vendored
@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
|
||||
@@ -4,7 +4,7 @@ DEV_BUILDER_IMAGE_TAG=$1
|
||||
|
||||
update_dev_builder_version() {
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: Should specify the dev-builder image tag"
|
||||
echo "Error: Should specify the dev-builder image tag"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -17,7 +17,7 @@ update_dev_builder_version() {
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update the dev-builder image tag in the Makefile.
|
||||
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||
|
||||
# Commit the changes.
|
||||
git add Makefile
|
||||
|
||||
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -41,7 +41,7 @@ function upload_artifacts() {
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
|
||||
9
.github/workflows/dev-build.yml
vendored
9
.github/workflows/dev-build.yml
vendored
@@ -55,6 +55,11 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
upload_artifacts_to_s3:
|
||||
type: boolean
|
||||
description: Whether upload artifacts to s3
|
||||
required: false
|
||||
default: false
|
||||
cargo_profile:
|
||||
type: choice
|
||||
description: The cargo profile to use in building GreptimeDB.
|
||||
@@ -238,7 +243,7 @@ jobs:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
|
||||
- name: Echo Docker image tag to step summary
|
||||
run: |
|
||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -281,7 +286,7 @@ jobs:
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: false
|
||||
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
|
||||
18
.github/workflows/develop.yml
vendored
18
.github/workflows/develop.yml
vendored
@@ -195,6 +195,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
@@ -222,12 +223,12 @@ jobs:
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binariy
|
||||
- name: Download pre-built binary
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip bianry
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
@@ -275,7 +276,7 @@ jobs:
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime bianry
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
@@ -299,6 +300,7 @@ jobs:
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
@@ -328,9 +330,9 @@ jobs:
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
name: Setup Kafka cluster
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
- name: Setup Etcd cluster
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -431,6 +433,7 @@ jobs:
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
@@ -475,9 +478,9 @@ jobs:
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
name: Setup Kafka cluster
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
- name: Setup Etcd cluster
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -578,6 +581,7 @@ jobs:
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
|
||||
4
.github/workflows/nightly-ci.yml
vendored
4
.github/workflows/nightly-ci.yml
vendored
@@ -124,9 +124,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: cachix/install-nix-action@v31
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-24.11
|
||||
- run: nix develop --command cargo build --bin greptime
|
||||
- run: nix develop --command cargo check --bin greptime
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
|
||||
|
||||
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@@ -88,7 +88,7 @@ env:
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
jobs:
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -388,7 +388,7 @@ jobs:
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
@@ -444,7 +444,7 @@ jobs:
|
||||
bump-doc-version:
|
||||
name: Bump doc version
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [publish-github-release]
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
@@ -466,8 +466,8 @@ jobs:
|
||||
|
||||
bump-website-version:
|
||||
name: Bump website version
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
needs: [publish-github-release]
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
@@ -490,7 +490,7 @@ jobs:
|
||||
bump-helm-charts-version:
|
||||
name: Bump helm charts version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [publish-github-release]
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -511,7 +511,7 @@ jobs:
|
||||
bump-homebrew-greptime-version:
|
||||
name: Bump homebrew greptime version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [publish-github-release]
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
3
.github/workflows/semantic-pull-request.yml
vendored
3
.github/workflows/semantic-pull-request.yml
vendored
@@ -14,6 +14,9 @@ concurrency:
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write # Add permissions to modify PRs
|
||||
issues: write
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -58,3 +58,6 @@ tests-fuzz/corpus/
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
# github
|
||||
!/.github
|
||||
@@ -108,7 +108,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
|
||||
88
Cargo.lock
generated
88
Cargo.lock
generated
@@ -1852,8 +1852,9 @@ dependencies = [
|
||||
"futures",
|
||||
"humantime",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
"nu-ansi-term",
|
||||
"opendal 0.51.2",
|
||||
"object-store",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"reqwest",
|
||||
@@ -1889,6 +1890,7 @@ dependencies = [
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"datatypes",
|
||||
"enum_dispatch",
|
||||
"futures",
|
||||
"futures-util",
|
||||
@@ -2223,6 +2225,7 @@ version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
"bytes",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2320,6 +2323,7 @@ dependencies = [
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
"common-wal",
|
||||
"common-workload",
|
||||
@@ -2330,6 +2334,7 @@ dependencies = [
|
||||
"deadpool-postgres",
|
||||
"derive_builder 0.20.1",
|
||||
"etcd-client",
|
||||
"flexbuffers",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"hex",
|
||||
@@ -2338,6 +2343,7 @@ dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"lazy_static",
|
||||
"moka",
|
||||
"object-store",
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"rand 0.9.0",
|
||||
@@ -2536,6 +2542,7 @@ name = "common-test-util"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-grpc",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"once_cell",
|
||||
@@ -4258,6 +4265,19 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "flexbuffers"
|
||||
version = "25.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "935627e7bc8f083035d9faad09ffaed9128f73fb1f74a8798f115749c43378e8"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"byteorder",
|
||||
"num_enum 0.5.11",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "float-cmp"
|
||||
version = "0.10.0"
|
||||
@@ -4352,6 +4372,7 @@ dependencies = [
|
||||
"session",
|
||||
"smallvec",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
"substrait 0.15.0",
|
||||
@@ -4855,7 +4876,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=7668a882d57ca6a2333146e0574b8f0c9d5008ae#7668a882d57ca6a2333146e0574b8f0c9d5008ae"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2dca1dc67862d7b410838aef81232274c019b3f6#2dca1dc67862d7b410838aef81232274c019b3f6"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -6955,6 +6976,7 @@ dependencies = [
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
"common-function",
|
||||
"common-grpc",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-query",
|
||||
@@ -7572,13 +7594,34 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
|
||||
dependencies = [
|
||||
"num_enum_derive 0.5.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
|
||||
dependencies = [
|
||||
"num_enum_derive",
|
||||
"num_enum_derive 0.7.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum_derive"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
|
||||
dependencies = [
|
||||
"proc-macro-crate 1.3.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7632,7 +7675,7 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"md5",
|
||||
"moka",
|
||||
"opendal 0.52.0",
|
||||
"opendal",
|
||||
"prometheus",
|
||||
"tokio",
|
||||
"uuid",
|
||||
@@ -7671,7 +7714,7 @@ dependencies = [
|
||||
"futures",
|
||||
"futures-util",
|
||||
"object_store",
|
||||
"opendal 0.52.0",
|
||||
"opendal",
|
||||
"pin-project",
|
||||
"tokio",
|
||||
]
|
||||
@@ -7697,35 +7740,6 @@ version = "11.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
|
||||
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.51.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b1063ea459fa9e94584115743b06330f437902dd1d9f692b863ef1875a20548"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"backon",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"crc32c",
|
||||
"futures",
|
||||
"getrandom 0.2.15",
|
||||
"http 1.1.0",
|
||||
"log",
|
||||
"md-5",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"quick-xml 0.36.2",
|
||||
"reqsign",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.52.0"
|
||||
@@ -8077,7 +8091,7 @@ dependencies = [
|
||||
"arrow 53.4.1",
|
||||
"arrow-ipc 53.4.1",
|
||||
"lazy_static",
|
||||
"num_enum",
|
||||
"num_enum 0.7.3",
|
||||
"opentelemetry-proto 0.27.0",
|
||||
"paste",
|
||||
"prost 0.13.5",
|
||||
@@ -8414,9 +8428,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pgwire"
|
||||
version = "0.29.0"
|
||||
version = "0.30.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4e6fcdc2ae2173ef8ee1005b6e46453d45195ac3d97caac0db7ecf64ab4aa85"
|
||||
checksum = "ec79ee18e6cafde8698885646780b967ecc905120798b8359dd0da64f9688e89"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
|
||||
@@ -79,6 +79,7 @@ clippy.implicit_clone = "warn"
|
||||
clippy.result_large_err = "allow"
|
||||
clippy.large_enum_variant = "allow"
|
||||
clippy.doc_overindented_list_items = "allow"
|
||||
clippy.uninlined_format_args = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
@@ -131,7 +132,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7668a882d57ca6a2333146e0574b8f0c9d5008ae" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2dca1dc67862d7b410838aef81232274c019b3f6" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-04-15-1a517ec8-20250428023155
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
|
||||
11
README.md
11
README.md
@@ -121,7 +121,7 @@ docker pull greptime/greptimedb
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/greptimedb_data" \
|
||||
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
@@ -129,7 +129,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
--mysql-addr 0.0.0.0:4002 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
```
|
||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
|
||||
|
||||
**Troubleshooting:**
|
||||
@@ -167,7 +167,7 @@ cargo run -- standalone start
|
||||
|
||||
## Project Status
|
||||
|
||||
> **Status:** Beta.
|
||||
> **Status:** Beta.
|
||||
> **GA (v1.0):** Targeted for mid 2025.
|
||||
|
||||
- Being used in production by early adopters
|
||||
@@ -197,8 +197,8 @@ GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/license
|
||||
|
||||
## Commercial Support
|
||||
|
||||
Running GreptimeDB in your organization?
|
||||
We offer enterprise add-ons, services, training, and consulting.
|
||||
Running GreptimeDB in your organization?
|
||||
We offer enterprise add-ons, services, training, and consulting.
|
||||
[Contact us](https://greptime.com/contactus) for details.
|
||||
|
||||
## Contributing
|
||||
@@ -215,4 +215,3 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
|
||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
||||
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
||||
- [etcd](https://etcd.io/) (meta service)
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
@@ -154,6 +155,7 @@
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -225,6 +227,7 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
@@ -328,6 +331,10 @@
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
@@ -494,6 +501,7 @@
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
|
||||
@@ -499,6 +499,9 @@ content_cache_size = "128MiB"
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## Cache size for index result.
|
||||
result_cache_size = "128MiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
|
||||
@@ -37,6 +37,12 @@ enable_cors = true
|
||||
## Customize allowed origins for HTTP CORS.
|
||||
## @toml2docs:none-default
|
||||
cors_allowed_origins = ["https://example.com"]
|
||||
## Whether to enable validation for Prometheus remote write requests.
|
||||
## Available options:
|
||||
## - strict: deny invalid UTF-8 strings (default).
|
||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
||||
## - unchecked: do not valid strings.
|
||||
prom_validation_mode = "strict"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
|
||||
@@ -67,6 +67,17 @@ node_max_idle_time = "24hours"
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
|
||||
|
||||
@@ -43,6 +43,13 @@ enable_cors = true
|
||||
## @toml2docs:none-default
|
||||
cors_allowed_origins = ["https://example.com"]
|
||||
|
||||
## Whether to enable validation for Prometheus remote write requests.
|
||||
## Available options:
|
||||
## - strict: deny invalid UTF-8 strings (default).
|
||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
||||
## - unchecked: do not valid strings.
|
||||
prom_validation_mode = "strict"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -590,6 +597,9 @@ content_cache_size = "128MiB"
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## Cache size for index result.
|
||||
result_cache_size = "128MiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Profile memory usage of GreptimeDB
|
||||
|
||||
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](docs/how-to/memory-profile-scripts).
|
||||
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](./memory-profile-scripts/scripts).
|
||||
|
||||
## Prerequisites
|
||||
### jemalloc
|
||||
|
||||
8
flake.lock
generated
8
flake.lock
generated
@@ -41,16 +41,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1745487689,
|
||||
"narHash": "sha256-FQoi3R0NjQeBAsEOo49b5tbDPcJSMWc3QhhaIi9eddw=",
|
||||
"lastModified": 1748162331,
|
||||
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "5630cf13cceac06cefe9fc607e8dfa8fb342dde3",
|
||||
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.11",
|
||||
"ref": "nixos-25.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "Development environment flake";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -21,7 +21,7 @@
|
||||
lib = nixpkgs.lib;
|
||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||
sha256 = "sha256-arzEYlWLGGYeOhECHpBxQd2joZ4rPKV3qLNnZ+eql6A=";
|
||||
sha256 = "sha256-tJJr8oqX3YD+ohhPK7jlt/7kvKBnBqJVjYtoFr520d4=";
|
||||
};
|
||||
in
|
||||
{
|
||||
@@ -51,6 +51,7 @@
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
NIX_HARDENING_ENABLE = "";
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -46,6 +46,7 @@
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
|
||||
# Mito Engine
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -59,7 +60,7 @@
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))`<br/>`sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
@@ -67,6 +68,9 @@
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -371,6 +371,21 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
|
||||
- title: 'Frontend Handle Bulk Insert Elapsed Time '
|
||||
type: timeseries
|
||||
description: Per-stage time for frontend to handle bulk insert requests
|
||||
unit: s
|
||||
queries:
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- title: Mito Engine
|
||||
panels:
|
||||
- title: Request OPS per Instance
|
||||
@@ -472,7 +487,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{pod}}]'
|
||||
- title: Compaction P99 per Instance by Stage
|
||||
- title: Compaction Elapsed Time per Instance by Stage
|
||||
type: timeseries
|
||||
description: Compaction latency by stage
|
||||
unit: s
|
||||
@@ -482,6 +497,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
|
||||
- expr: sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
|
||||
- title: Compaction P99 per Instance
|
||||
type: timeseries
|
||||
description: Compaction P99 per Instance.
|
||||
@@ -562,6 +582,51 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]'
|
||||
- title: Compaction Input/Output Bytes
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-input'
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-output'
|
||||
- title: Region Worker Handle Bulk Insert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Region Worker Convert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to decode requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -46,6 +46,7 @@
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
|
||||
# Mito Engine
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -59,7 +60,7 @@
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))`<br/>`sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
@@ -67,6 +68,9 @@
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -371,6 +371,21 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
|
||||
- title: 'Frontend Handle Bulk Insert Elapsed Time '
|
||||
type: timeseries
|
||||
description: Per-stage time for frontend to handle bulk insert requests
|
||||
unit: s
|
||||
queries:
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- title: Mito Engine
|
||||
panels:
|
||||
- title: Request OPS per Instance
|
||||
@@ -472,7 +487,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{pod}}]'
|
||||
- title: Compaction P99 per Instance by Stage
|
||||
- title: Compaction Elapsed Time per Instance by Stage
|
||||
type: timeseries
|
||||
description: Compaction latency by stage
|
||||
unit: s
|
||||
@@ -482,6 +497,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
|
||||
- expr: sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
|
||||
- title: Compaction P99 per Instance
|
||||
type: timeseries
|
||||
description: Compaction P99 per Instance.
|
||||
@@ -562,6 +582,51 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]'
|
||||
- title: Compaction Input/Output Bytes
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-input'
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-output'
|
||||
- title: Region Worker Handle Bulk Insert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Region Worker Convert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to decode requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
|
||||
@@ -6,7 +6,7 @@ DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
|
||||
|
||||
remove_instance_filters() {
|
||||
# Remove the instance filters for the standalone dashboards.
|
||||
sed 's/instance=~\\"$datanode\\",//; s/instance=~\\"$datanode\\"//; s/instance=~\\"$frontend\\",//; s/instance=~\\"$frontend\\"//; s/instance=~\\"$metasrv\\",//; s/instance=~\\"$metasrv\\"//; s/instance=~\\"$flownode\\",//; s/instance=~\\"$flownode\\"//;' $CLUSTER_DASHBOARD_DIR/dashboard.json > $STANDALONE_DASHBOARD_DIR/dashboard.json
|
||||
sed -E 's/instance=~\\"(\$datanode|\$frontend|\$metasrv|\$flownode)\\",?//g' "$CLUSTER_DASHBOARD_DIR/dashboard.json" > "$STANDALONE_DASHBOARD_DIR/dashboard.json"
|
||||
}
|
||||
|
||||
generate_intermediate_dashboards_and_docs() {
|
||||
|
||||
@@ -26,6 +26,13 @@ excludes = [
|
||||
"src/common/base/src/secrets.rs",
|
||||
"src/servers/src/repeated_field.rs",
|
||||
"src/servers/src/http/test_helpers.rs",
|
||||
# enterprise
|
||||
"src/common/meta/src/rpc/ddl/trigger.rs",
|
||||
"src/operator/src/expr_helper/trigger.rs",
|
||||
"src/sql/src/statements/create/trigger.rs",
|
||||
"src/sql/src/statements/show/trigger.rs",
|
||||
"src/sql/src/parsers/create_parser/trigger.rs",
|
||||
"src/sql/src/parsers/show_parser/trigger.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2025-04-15"
|
||||
channel = "nightly-2025-05-19"
|
||||
|
||||
@@ -5,8 +5,12 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
|
||||
default = [
|
||||
"pg_kvbackend",
|
||||
"mysql_kvbackend",
|
||||
]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend", "meta-srv/pg_kvbackend"]
|
||||
mysql_kvbackend = ["common-meta/mysql_kvbackend", "meta-srv/mysql_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -43,11 +47,9 @@ etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
opendal = { version = "0.51.1", features = [
|
||||
"services-fs",
|
||||
"services-s3",
|
||||
] }
|
||||
object-store.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use object_store::Error as ObjectStoreError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -225,7 +226,7 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: opendal::Error,
|
||||
error: ObjectStoreError,
|
||||
},
|
||||
#[snafu(display("S3 config need be set"))]
|
||||
S3ConfigNotSet {
|
||||
@@ -237,6 +238,12 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("KV backend not set: {}", backend))]
|
||||
KvBackendNotSet {
|
||||
backend: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -273,8 +280,9 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::OpenDal { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
|
||||
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
|
||||
Error::S3ConfigNotSet { .. }
|
||||
| Error::OutputDirNotSet { .. }
|
||||
| Error::KvBackendNotSet { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@ use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use opendal::layers::LoggingLayer;
|
||||
use opendal::{services, Operator};
|
||||
use object_store::layers::LoggingLayer;
|
||||
use object_store::{services, ObjectStore};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -470,7 +470,7 @@ impl Export {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_operator(&self) -> Result<Operator> {
|
||||
async fn build_operator(&self) -> Result<ObjectStore> {
|
||||
if self.s3 {
|
||||
self.build_s3_operator().await
|
||||
} else {
|
||||
@@ -479,11 +479,11 @@ impl Export {
|
||||
}
|
||||
|
||||
/// build operator with preference for file system
|
||||
async fn build_prefer_fs_operator(&self) -> Result<Operator> {
|
||||
async fn build_prefer_fs_operator(&self) -> Result<ObjectStore> {
|
||||
// is under s3 mode and s3_ddl_dir is set, use it as root
|
||||
if self.s3 && self.s3_ddl_local_dir.is_some() {
|
||||
let root = self.s3_ddl_local_dir.as_ref().unwrap().clone();
|
||||
let op = Operator::new(services::Fs::default().root(&root))
|
||||
let op = ObjectStore::new(services::Fs::default().root(&root))
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
@@ -495,7 +495,7 @@ impl Export {
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_s3_operator(&self) -> Result<Operator> {
|
||||
async fn build_s3_operator(&self) -> Result<ObjectStore> {
|
||||
let mut builder = services::S3::default().bucket(
|
||||
self.s3_bucket
|
||||
.as_ref()
|
||||
@@ -522,20 +522,20 @@ impl Export {
|
||||
builder = builder.secret_access_key(secret_key);
|
||||
}
|
||||
|
||||
let op = Operator::new(builder)
|
||||
let op = ObjectStore::new(builder)
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
async fn build_fs_operator(&self) -> Result<Operator> {
|
||||
async fn build_fs_operator(&self) -> Result<ObjectStore> {
|
||||
let root = self
|
||||
.output_dir
|
||||
.as_ref()
|
||||
.context(OutputDirNotSetSnafu)?
|
||||
.clone();
|
||||
let op = Operator::new(services::Fs::default().root(&root))
|
||||
let op = ObjectStore::new(services::Fs::default().root(&root))
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
@@ -642,11 +642,14 @@ impl Export {
|
||||
|
||||
async fn write_to_storage(
|
||||
&self,
|
||||
op: &Operator,
|
||||
op: &ObjectStore,
|
||||
file_path: &str,
|
||||
content: Vec<u8>,
|
||||
) -> Result<()> {
|
||||
op.write(file_path, content).await.context(OpenDalSnafu)
|
||||
op.write(file_path, content)
|
||||
.await
|
||||
.context(OpenDalSnafu)
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
fn get_storage_params(&self, schema: &str) -> (String, String) {
|
||||
|
||||
@@ -17,6 +17,7 @@ mod database;
|
||||
pub mod error;
|
||||
mod export;
|
||||
mod import;
|
||||
mod meta_snapshot;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
@@ -27,6 +28,7 @@ use error::Result;
|
||||
pub use crate::bench::BenchTableMetadataCommand;
|
||||
pub use crate::export::ExportCommand;
|
||||
pub use crate::import::ImportCommand;
|
||||
pub use crate::meta_snapshot::{MetaRestoreCommand, MetaSnapshotCommand};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Tool: Send + Sync {
|
||||
|
||||
329
src/cli/src/meta_snapshot.rs
Normal file
329
src/cli/src/meta_snapshot.rs
Normal file
@@ -0,0 +1,329 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::snapshot::MetadataSnapshotManager;
|
||||
use meta_srv::bootstrap::create_etcd_client;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{KvBackendNotSetSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
||||
use crate::Tool;
|
||||
#[derive(Debug, Default, Parser)]
|
||||
struct MetaConnection {
|
||||
/// The endpoint of store. one of etcd, pg or mysql.
|
||||
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
|
||||
store_addrs: Vec<String>,
|
||||
/// The database backend.
|
||||
#[clap(long, value_enum)]
|
||||
backend: Option<BackendImpl>,
|
||||
#[clap(long, default_value = "")]
|
||||
store_key_prefix: String,
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
#[clap(long,default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
|
||||
meta_table_name: String,
|
||||
#[clap(long, default_value = "128")]
|
||||
max_txn_ops: usize,
|
||||
}
|
||||
|
||||
impl MetaConnection {
|
||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||
let max_txn_ops = self.max_txn_ops;
|
||||
let store_addrs = &self.store_addrs;
|
||||
if store_addrs.is_empty() {
|
||||
KvBackendNotSetSnafu { backend: "all" }
|
||||
.fail()
|
||||
.map_err(BoxedError::new)
|
||||
} else {
|
||||
let kvbackend = match self.backend {
|
||||
Some(BackendImpl::EtcdStore) => {
|
||||
let etcd_client = create_etcd_client(store_addrs)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Some(BackendImpl::PostgresStore) => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||
pool,
|
||||
table_name,
|
||||
max_txn_ops,
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?)
|
||||
}
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
Some(BackendImpl::MysqlStore) => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
|
||||
pool,
|
||||
table_name,
|
||||
max_txn_ops,
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?)
|
||||
}
|
||||
_ => KvBackendNotSetSnafu { backend: "all" }
|
||||
.fail()
|
||||
.map_err(BoxedError::new),
|
||||
};
|
||||
if self.store_key_prefix.is_empty() {
|
||||
kvbackend
|
||||
} else {
|
||||
let chroot_kvbackend =
|
||||
ChrootKvBackend::new(self.store_key_prefix.as_bytes().to_vec(), kvbackend?);
|
||||
Ok(Arc::new(chroot_kvbackend))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(qtang): Abstract a generic s3 config for export import meta snapshot restore
|
||||
#[derive(Debug, Default, Parser)]
|
||||
struct S3Config {
|
||||
/// whether to use s3 as the output directory. default is false.
|
||||
#[clap(long, default_value = "false")]
|
||||
s3: bool,
|
||||
/// The s3 bucket name.
|
||||
#[clap(long)]
|
||||
s3_bucket: Option<String>,
|
||||
/// The s3 region.
|
||||
#[clap(long)]
|
||||
s3_region: Option<String>,
|
||||
/// The s3 access key.
|
||||
#[clap(long)]
|
||||
s3_access_key: Option<SecretString>,
|
||||
/// The s3 secret key.
|
||||
#[clap(long)]
|
||||
s3_secret_key: Option<SecretString>,
|
||||
/// The s3 endpoint. we will automatically use the default s3 decided by the region if not set.
|
||||
#[clap(long)]
|
||||
s3_endpoint: Option<String>,
|
||||
}
|
||||
|
||||
impl S3Config {
|
||||
pub fn build(&self, root: &str) -> Result<Option<ObjectStore>, BoxedError> {
|
||||
if !self.s3 {
|
||||
Ok(None)
|
||||
} else {
|
||||
if self.s3_region.is_none()
|
||||
|| self.s3_access_key.is_none()
|
||||
|| self.s3_secret_key.is_none()
|
||||
|| self.s3_bucket.is_none()
|
||||
{
|
||||
return S3ConfigNotSetSnafu.fail().map_err(BoxedError::new);
|
||||
}
|
||||
// Safety, unwrap is safe because we have checked the options above.
|
||||
let mut config = S3::default()
|
||||
.bucket(self.s3_bucket.as_ref().unwrap())
|
||||
.region(self.s3_region.as_ref().unwrap())
|
||||
.access_key_id(self.s3_access_key.as_ref().unwrap().expose_secret())
|
||||
.secret_access_key(self.s3_secret_key.as_ref().unwrap().expose_secret());
|
||||
|
||||
if !root.is_empty() && root != "." {
|
||||
config = config.root(root);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = &self.s3_endpoint {
|
||||
config = config.endpoint(endpoint);
|
||||
}
|
||||
Ok(Some(
|
||||
ObjectStore::new(config)
|
||||
.context(OpenDalSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Export metadata snapshot tool.
|
||||
/// This tool is used to export metadata snapshot from etcd, pg or mysql.
|
||||
/// It will dump the metadata snapshot to local file or s3 bucket.
|
||||
/// The snapshot file will be in binary format.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct MetaSnapshotCommand {
|
||||
/// The connection to the metadata store.
|
||||
#[clap(flatten)]
|
||||
connection: MetaConnection,
|
||||
/// The s3 config.
|
||||
#[clap(flatten)]
|
||||
s3_config: S3Config,
|
||||
/// The name of the target snapshot file. we will add the file extension automatically.
|
||||
#[clap(long, default_value = "metadata_snapshot")]
|
||||
file_name: String,
|
||||
/// The directory to store the snapshot file.
|
||||
/// if target output is s3 bucket, this is the root directory in the bucket.
|
||||
/// if target output is local file, this is the local directory.
|
||||
#[clap(long, default_value = "")]
|
||||
output_dir: String,
|
||||
}
|
||||
|
||||
fn create_local_file_object_store(root: &str) -> Result<ObjectStore, BoxedError> {
|
||||
let root = if root.is_empty() { "." } else { root };
|
||||
let object_store = ObjectStore::new(Fs::default().root(root))
|
||||
.context(OpenDalSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
impl MetaSnapshotCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
let kvbackend = self.connection.build().await?;
|
||||
let output_dir = &self.output_dir;
|
||||
let object_store = self.s3_config.build(output_dir).map_err(BoxedError::new)?;
|
||||
if let Some(store) = object_store {
|
||||
let tool = MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager::new(kvbackend, store),
|
||||
target_file: self.file_name.clone(),
|
||||
};
|
||||
Ok(Box::new(tool))
|
||||
} else {
|
||||
let object_store = create_local_file_object_store(output_dir)?;
|
||||
let tool = MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager::new(kvbackend, object_store),
|
||||
target_file: self.file_name.clone(),
|
||||
};
|
||||
Ok(Box::new(tool))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager,
|
||||
target_file: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MetaSnapshotTool {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
self.inner
|
||||
.dump("", &self.target_file)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Restore metadata snapshot tool.
|
||||
/// This tool is used to restore metadata snapshot from etcd, pg or mysql.
|
||||
/// It will restore the metadata snapshot from local file or s3 bucket.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct MetaRestoreCommand {
|
||||
/// The connection to the metadata store.
|
||||
#[clap(flatten)]
|
||||
connection: MetaConnection,
|
||||
/// The s3 config.
|
||||
#[clap(flatten)]
|
||||
s3_config: S3Config,
|
||||
/// The name of the target snapshot file.
|
||||
#[clap(long, default_value = "metadata_snapshot.metadata.fb")]
|
||||
file_name: String,
|
||||
/// The directory to store the snapshot file.
|
||||
#[clap(long, default_value = ".")]
|
||||
input_dir: String,
|
||||
#[clap(long, default_value = "false")]
|
||||
force: bool,
|
||||
}
|
||||
|
||||
impl MetaRestoreCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
let kvbackend = self.connection.build().await?;
|
||||
let input_dir = &self.input_dir;
|
||||
let object_store = self.s3_config.build(input_dir).map_err(BoxedError::new)?;
|
||||
if let Some(store) = object_store {
|
||||
let tool = MetaRestoreTool::new(
|
||||
MetadataSnapshotManager::new(kvbackend, store),
|
||||
self.file_name.clone(),
|
||||
self.force,
|
||||
);
|
||||
Ok(Box::new(tool))
|
||||
} else {
|
||||
let object_store = create_local_file_object_store(input_dir)?;
|
||||
let tool = MetaRestoreTool::new(
|
||||
MetadataSnapshotManager::new(kvbackend, object_store),
|
||||
self.file_name.clone(),
|
||||
self.force,
|
||||
);
|
||||
Ok(Box::new(tool))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MetaRestoreTool {
|
||||
inner: MetadataSnapshotManager,
|
||||
source_file: String,
|
||||
force: bool,
|
||||
}
|
||||
|
||||
impl MetaRestoreTool {
|
||||
pub fn new(inner: MetadataSnapshotManager, source_file: String, force: bool) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
source_file,
|
||||
force,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MetaRestoreTool {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
let clean = self
|
||||
.inner
|
||||
.check_target_source_clean()
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
if clean {
|
||||
common_telemetry::info!(
|
||||
"The target source is clean, we will restore the metadata snapshot."
|
||||
);
|
||||
self.inner
|
||||
.restore(&self.source_file)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
} else if !self.force {
|
||||
common_telemetry::warn!(
|
||||
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
||||
);
|
||||
Ok(())
|
||||
} else {
|
||||
common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force.");
|
||||
self.inner
|
||||
.restore(&self.source_file)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,7 @@ common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
datatypes.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
@@ -35,7 +36,7 @@ use common_grpc::flight::do_put::DoPutResponse;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper};
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use common_telemetry::{error, warn};
|
||||
use futures::future;
|
||||
@@ -49,7 +50,7 @@ use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
||||
InvalidTonicMetadataValueSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
use crate::{error, from_grpc_response, Client, Result};
|
||||
|
||||
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
|
||||
|
||||
@@ -337,20 +338,30 @@ impl Database {
|
||||
);
|
||||
Ok(Output::new_with_affected_rows(rows))
|
||||
}
|
||||
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
|
||||
FlightMessage::RecordBatch(_) | FlightMessage::Metrics(_) => {
|
||||
IllegalFlightMessagesSnafu {
|
||||
reason: "The first flight message cannot be a RecordBatch or Metrics message",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
FlightMessage::Schema(schema) => {
|
||||
let schema = Arc::new(
|
||||
datatypes::schema::Schema::try_from(schema)
|
||||
.context(error::ConvertSchemaSnafu)?,
|
||||
);
|
||||
let schema_cloned = schema.clone();
|
||||
let stream = Box::pin(stream!({
|
||||
while let Some(flight_message) = flight_message_stream.next().await {
|
||||
let flight_message = flight_message
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
match flight_message {
|
||||
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
|
||||
FlightMessage::RecordBatch(arrow_batch) => {
|
||||
yield RecordBatch::try_from_df_record_batch(
|
||||
schema_cloned.clone(),
|
||||
arrow_batch,
|
||||
)
|
||||
}
|
||||
FlightMessage::Metrics(_) => {}
|
||||
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
|
||||
yield IllegalFlightMessagesSnafu {reason: format!("A Schema message must be succeeded exclusively by a set of RecordBatch messages, flight_message: {:?}", flight_message)}
|
||||
|
||||
@@ -117,6 +117,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert Schema"))]
|
||||
ConvertSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -137,6 +144,7 @@ impl ErrorExt for Error {
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::InvalidTonicMetadataValue { .. } => StatusCode::InvalidArguments,
|
||||
Error::ConvertSchema { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_meta::node_manager::Datanode;
|
||||
use common_query::request::QueryRequest;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use prost::Message;
|
||||
@@ -55,6 +55,7 @@ impl Datanode for RegionRequester {
|
||||
if err.should_retry() {
|
||||
meta_error::Error::RetryLater {
|
||||
source: BoxedError::new(err),
|
||||
clean_poisons: false,
|
||||
}
|
||||
} else {
|
||||
meta_error::Error::External {
|
||||
@@ -146,6 +147,10 @@ impl RegionRequester {
|
||||
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
let schema = Arc::new(
|
||||
datatypes::schema::Schema::try_from(schema).context(error::ConvertSchemaSnafu)?,
|
||||
);
|
||||
let schema_cloned = schema.clone();
|
||||
let stream = Box::pin(stream!({
|
||||
let _span = tracing_context.attach(common_telemetry::tracing::info_span!(
|
||||
"poll_flight_data_stream"
|
||||
@@ -156,7 +161,12 @@ impl RegionRequester {
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
match flight_message {
|
||||
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
|
||||
FlightMessage::RecordBatch(record_batch) => {
|
||||
yield RecordBatch::try_from_df_record_batch(
|
||||
schema_cloned.clone(),
|
||||
record_batch,
|
||||
)
|
||||
}
|
||||
FlightMessage::Metrics(s) => {
|
||||
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
||||
metrics_ref.swap(m);
|
||||
|
||||
@@ -10,7 +10,13 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["servers/pprof", "servers/mem-prof"]
|
||||
default = [
|
||||
"servers/pprof",
|
||||
"servers/mem-prof",
|
||||
"meta-srv/pg_kvbackend",
|
||||
"meta-srv/mysql_kvbackend",
|
||||
]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -35,6 +35,8 @@ use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRe
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use common_meta::ddl_manager::TriggerDdlManagerRef;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
@@ -69,6 +71,7 @@ use frontend::service_config::{
|
||||
};
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use mito2::config::MitoConfig;
|
||||
use query::options::QueryOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
|
||||
use servers::grpc::GrpcOptions;
|
||||
@@ -153,6 +156,7 @@ pub struct StandaloneOptions {
|
||||
pub init_regions_parallelism: usize,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
pub slow_query: Option<SlowQueryOptions>,
|
||||
pub query: QueryOptions,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -185,6 +189,7 @@ impl Default for StandaloneOptions {
|
||||
init_regions_parallelism: 16,
|
||||
max_in_flight_write_bytes: None,
|
||||
slow_query: Some(SlowQueryOptions::default()),
|
||||
query: QueryOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -240,6 +245,7 @@ impl StandaloneOptions {
|
||||
grpc: cloned_opts.grpc,
|
||||
init_regions_in_background: cloned_opts.init_regions_in_background,
|
||||
init_regions_parallelism: cloned_opts.init_regions_parallelism,
|
||||
query: cloned_opts.query,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -579,6 +585,8 @@ impl StartCommand {
|
||||
flow_id_sequence,
|
||||
));
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let trigger_ddl_manager: Option<TriggerDdlManagerRef> = plugins.get();
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
procedure_manager.clone(),
|
||||
node_manager.clone(),
|
||||
@@ -587,6 +595,8 @@ impl StartCommand {
|
||||
table_meta_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_meta_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -651,6 +661,7 @@ impl StartCommand {
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn create_ddl_task_executor(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
node_manager: NodeManagerRef,
|
||||
@@ -659,6 +670,7 @@ impl StartCommand {
|
||||
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Result<ProcedureExecutorRef> {
|
||||
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
@@ -675,6 +687,8 @@ impl StartCommand {
|
||||
},
|
||||
procedure_manager,
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
90
src/common/function/src/adjust_flow.rs
Normal file
90
src/common/function/src/adjust_flow.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ensure;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
|
||||
use crate::handlers::FlowServiceHandlerRef;
|
||||
use crate::helper::parse_catalog_flow;
|
||||
|
||||
fn adjust_signature() -> Signature {
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(), // flow name
|
||||
ConcreteDataType::uint64_datatype(), // min_run_interval in seconds
|
||||
ConcreteDataType::uint64_datatype(), // max filter number per query
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[admin_fn(
|
||||
name = AdjustFlowFunction,
|
||||
display_name = adjust_flow,
|
||||
sig_fn = adjust_signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn adjust_flow(
|
||||
flow_service_handler: &FlowServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let (flow_name, min_run_interval, max_filter_num) = match (params[0], params[1], params[2]) {
|
||||
(
|
||||
ValueRef::String(flow_name),
|
||||
ValueRef::UInt64(min_run_interval),
|
||||
ValueRef::UInt64(max_filter_num),
|
||||
) => (flow_name, min_run_interval, max_filter_num),
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "adjust_flow",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
|
||||
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||
|
||||
let res = flow_service_handler
|
||||
.adjust(
|
||||
&catalog_name,
|
||||
&flow_name,
|
||||
min_run_interval,
|
||||
max_filter_num as usize,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
@@ -26,6 +26,7 @@ use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||
|
||||
use crate::adjust_flow::AdjustFlowFunction;
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -43,5 +44,6 @@ impl AdminFunction {
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
registry.register_async(Arc::new(CompactTableFunction));
|
||||
registry.register_async(Arc::new(FlushFlowFunction));
|
||||
registry.register_async(Arc::new(AdjustFlowFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ impl DfAccumulator for UddSketchState {
|
||||
}
|
||||
}
|
||||
// meaning instantiate as `uddsketch_merge`
|
||||
DataType::Binary => self.merge_batch(&[array.clone()])?,
|
||||
DataType::Binary => self.merge_batch(std::slice::from_ref(array))?,
|
||||
_ => {
|
||||
return not_impl_err!(
|
||||
"UDDSketch functions do not support data type: {}",
|
||||
|
||||
@@ -12,21 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::parser::ParserContext;
|
||||
use snafu::ensure;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
|
||||
use crate::handlers::FlowServiceHandlerRef;
|
||||
use crate::helper::parse_catalog_flow;
|
||||
|
||||
fn flush_signature() -> Signature {
|
||||
Signature::uniform(
|
||||
@@ -47,20 +45,6 @@ pub(crate) async fn flush_flow(
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
|
||||
|
||||
let res = flow_service_handler
|
||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
|
||||
fn parse_flush_flow(
|
||||
params: &[ValueRef<'_>],
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<(String, String)> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
@@ -70,7 +54,6 @@ fn parse_flush_flow(
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(flow_name) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "flush_flow",
|
||||
@@ -78,27 +61,14 @@ fn parse_flush_flow(
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteSnafu)?;
|
||||
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||
|
||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||
[flow_name] => (
|
||||
query_ctx.current_catalog().to_string(),
|
||||
flow_name.value.clone(),
|
||||
),
|
||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||
obj_name
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok((catalog_name, flow_name))
|
||||
let res = flow_service_handler
|
||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -154,10 +124,7 @@ mod test {
|
||||
("catalog.flow_name", ("catalog", "flow_name")),
|
||||
];
|
||||
for (input, expected) in testcases.iter() {
|
||||
let args = vec![*input];
|
||||
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
|
||||
|
||||
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
|
||||
let result = parse_catalog_flow(input, &QueryContext::arc()).unwrap();
|
||||
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,6 +87,15 @@ pub trait FlowServiceHandler: Send + Sync {
|
||||
flow: &str,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse>;
|
||||
|
||||
async fn adjust(
|
||||
&self,
|
||||
catalog: &str,
|
||||
flow: &str,
|
||||
min_run_interval_secs: u64,
|
||||
max_filter_num_per_query: usize,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse>;
|
||||
}
|
||||
|
||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||
|
||||
@@ -12,12 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::error::{ExecuteSnafu, InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::cast::cast;
|
||||
use datatypes::value::ValueRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
use sql::parser::ParserContext;
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
@@ -43,3 +46,30 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
||||
})
|
||||
.map(|v| v.as_u64())
|
||||
}
|
||||
|
||||
pub fn parse_catalog_flow(
|
||||
flow_name: &str,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<(String, String)> {
|
||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteSnafu)?;
|
||||
|
||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||
[flow_name] => (
|
||||
query_ctx.current_catalog().to_string(),
|
||||
flow_name.value.clone(),
|
||||
),
|
||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||
obj_name
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok((catalog_name, flow_name))
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
mod adjust_flow;
|
||||
mod admin;
|
||||
mod flush_flow;
|
||||
mod macros;
|
||||
|
||||
@@ -468,8 +468,8 @@ mod tests {
|
||||
let empty_values = vec![""];
|
||||
let empty_input = Arc::new(StringVector::from_slice(&empty_values)) as VectorRef;
|
||||
|
||||
let ipv4_result = ipv4_func.eval(&ctx, &[empty_input.clone()]);
|
||||
let ipv6_result = ipv6_func.eval(&ctx, &[empty_input.clone()]);
|
||||
let ipv4_result = ipv4_func.eval(&ctx, std::slice::from_ref(&empty_input));
|
||||
let ipv6_result = ipv6_func.eval(&ctx, std::slice::from_ref(&empty_input));
|
||||
|
||||
assert!(ipv4_result.is_err());
|
||||
assert!(ipv6_result.is_err());
|
||||
@@ -478,7 +478,7 @@ mod tests {
|
||||
let invalid_values = vec!["not an ip", "192.168.1.256", "zzzz::ffff"];
|
||||
let invalid_input = Arc::new(StringVector::from_slice(&invalid_values)) as VectorRef;
|
||||
|
||||
let ipv4_result = ipv4_func.eval(&ctx, &[invalid_input.clone()]);
|
||||
let ipv4_result = ipv4_func.eval(&ctx, std::slice::from_ref(&invalid_input));
|
||||
|
||||
assert!(ipv4_result.is_err());
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ mod tests {
|
||||
let input = Arc::new(StringVector::from_slice(&values)) as VectorRef;
|
||||
|
||||
// Convert IPv6 addresses to binary
|
||||
let binary_result = to_num.eval(&ctx, &[input.clone()]).unwrap();
|
||||
let binary_result = to_num.eval(&ctx, std::slice::from_ref(&input)).unwrap();
|
||||
|
||||
// Convert binary to hex string representation (for ipv6_num_to_string)
|
||||
let mut hex_strings = Vec::new();
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod clamp;
|
||||
pub mod clamp;
|
||||
mod modulo;
|
||||
mod pow;
|
||||
mod rate;
|
||||
@@ -20,7 +20,7 @@ mod rate;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use clamp::ClampFunction;
|
||||
pub use clamp::{ClampFunction, ClampMaxFunction, ClampMinFunction};
|
||||
use common_query::error::{GeneralDataFusionSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::error::DataFusionError;
|
||||
@@ -44,6 +44,8 @@ impl MathFunction {
|
||||
registry.register(Arc::new(RateFunction));
|
||||
registry.register(Arc::new(RangeFunction));
|
||||
registry.register(Arc::new(ClampFunction));
|
||||
registry.register(Arc::new(ClampMinFunction));
|
||||
registry.register(Arc::new(ClampMaxFunction));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -155,6 +155,182 @@ fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: b
|
||||
Ok(Arc::new(PrimitiveVector::<T>::from(result)))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ClampMinFunction;
|
||||
|
||||
const CLAMP_MIN_NAME: &str = "clamp_min";
|
||||
|
||||
impl Function for ClampMinFunction {
|
||||
fn name(&self) -> &str {
|
||||
CLAMP_MIN_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// input, min
|
||||
Signature::uniform(2, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type().is_numeric(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The first arg's type is not numeric, have: {}",
|
||||
columns[0].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type() == columns[1].data_type(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Arguments don't have identical types: {}, {}",
|
||||
columns[0].data_type(),
|
||||
columns[1].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second arg (min) should be scalar, have: {:?}",
|
||||
columns[1]
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
|
||||
let input_array = columns[0].to_arrow_array();
|
||||
let input = input_array
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<<$S as LogicalPrimitiveType>::ArrowPrimitive>>()
|
||||
.unwrap();
|
||||
|
||||
let min = TryAsPrimitive::<$S>::try_as_primitive(&columns[1].get(0))
|
||||
.with_context(|| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The second arg (min) should not be none",
|
||||
}
|
||||
})?;
|
||||
// For clamp_min, max is effectively infinity, so we don't use it in the clamp_impl logic.
|
||||
// We pass a default/dummy value for max.
|
||||
let max_dummy = <$S as LogicalPrimitiveType>::Native::default();
|
||||
|
||||
clamp_impl::<$S, true, false>(input, min, max_dummy)
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ClampMinFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", CLAMP_MIN_NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ClampMaxFunction;
|
||||
|
||||
const CLAMP_MAX_NAME: &str = "clamp_max";
|
||||
|
||||
impl Function for ClampMaxFunction {
|
||||
fn name(&self) -> &str {
|
||||
CLAMP_MAX_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// input, max
|
||||
Signature::uniform(2, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type().is_numeric(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The first arg's type is not numeric, have: {}",
|
||||
columns[0].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type() == columns[1].data_type(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Arguments don't have identical types: {}, {}",
|
||||
columns[0].data_type(),
|
||||
columns[1].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second arg (max) should be scalar, have: {:?}",
|
||||
columns[1]
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
|
||||
let input_array = columns[0].to_arrow_array();
|
||||
let input = input_array
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<<$S as LogicalPrimitiveType>::ArrowPrimitive>>()
|
||||
.unwrap();
|
||||
|
||||
let max = TryAsPrimitive::<$S>::try_as_primitive(&columns[1].get(0))
|
||||
.with_context(|| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The second arg (max) should not be none",
|
||||
}
|
||||
})?;
|
||||
// For clamp_max, min is effectively -infinity, so we don't use it in the clamp_impl logic.
|
||||
// We pass a default/dummy value for min.
|
||||
let min_dummy = <$S as LogicalPrimitiveType>::Native::default();
|
||||
|
||||
clamp_impl::<$S, false, true>(input, min_dummy, max)
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ClampMaxFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", CLAMP_MAX_NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
@@ -394,4 +570,134 @@ mod test {
|
||||
let result = func.eval(&FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_min_i64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
|
||||
-1,
|
||||
vec![Some(-1), Some(-1), Some(-1), Some(0), Some(1), Some(2)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3), None, Some(-1), None, None, Some(2)],
|
||||
-2,
|
||||
vec![Some(-2), None, Some(-1), None, None, Some(2)],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampMinFunction;
|
||||
for (in_data, min, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Int64Vector::from(in_data)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Int64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_max_i64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
|
||||
1,
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(1)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3), None, Some(-1), None, None, Some(2)],
|
||||
0,
|
||||
vec![Some(-3), None, Some(-1), None, None, Some(0)],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampMaxFunction;
|
||||
for (in_data, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Int64Vector::from(in_data)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Int64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_min_f64() {
|
||||
let inputs = [(
|
||||
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
-1.0,
|
||||
vec![Some(-1.0), Some(-1.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
)];
|
||||
|
||||
let func = ClampMinFunction;
|
||||
for (in_data, min, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(in_data)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Float64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_max_f64() {
|
||||
let inputs = [(
|
||||
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
0.0,
|
||||
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(0.0)],
|
||||
)];
|
||||
|
||||
let func = ClampMaxFunction;
|
||||
for (in_data, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(in_data)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Float64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_min_type_not_match() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -1;
|
||||
|
||||
let func = ClampMinFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
];
|
||||
let result = func.eval(&FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_max_type_not_match() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let max = 1;
|
||||
|
||||
let func = ClampMaxFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(&FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,6 +148,17 @@ impl FunctionState {
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn adjust(
|
||||
&self,
|
||||
_catalog: &str,
|
||||
_flow: &str,
|
||||
_min_run_interval_secs: u64,
|
||||
_max_filter_num_per_query: usize,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
|
||||
@@ -10,6 +10,7 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
bytes.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
146
src/common/grpc/benches/bench_flight_decoder.rs
Normal file
146
src/common/grpc/benches/bench_flight_decoder.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_flight::FlightData;
|
||||
use bytes::Bytes;
|
||||
use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage};
|
||||
use common_recordbatch::DfRecordBatch;
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::array::{ArrayRef, Int64Array, StringArray, TimestampMillisecondArray};
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use prost::Message;
|
||||
|
||||
fn schema() -> arrow::datatypes::SchemaRef {
|
||||
let schema = Schema::new(vec![
|
||||
ColumnSchema::new("k0", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("k1", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new("v0", ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new("v1", ConcreteDataType::int64_datatype(), false),
|
||||
]);
|
||||
schema.arrow_schema().clone()
|
||||
}
|
||||
|
||||
/// Generate record batch according to provided schema and num rows.
|
||||
fn prepare_random_record_batch(
|
||||
schema: arrow::datatypes::SchemaRef,
|
||||
num_rows: usize,
|
||||
) -> DfRecordBatch {
|
||||
let tag_candidates = (0..10000).map(|i| i.to_string()).collect::<Vec<_>>();
|
||||
|
||||
let columns: Vec<ArrayRef> = schema
|
||||
.fields
|
||||
.iter()
|
||||
.map(|col| match col.data_type() {
|
||||
DataType::Utf8 => {
|
||||
let array = StringArray::from(
|
||||
(0..num_rows)
|
||||
.map(|_| {
|
||||
let idx: usize = rand::random_range(0..10000);
|
||||
format!("tag-{}", tag_candidates[idx])
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
Arc::new(array) as ArrayRef
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let now = common_time::util::current_time_millis();
|
||||
let array = TimestampMillisecondArray::from(
|
||||
(0..num_rows).map(|i| now + i as i64).collect::<Vec<_>>(),
|
||||
);
|
||||
Arc::new(array) as ArrayRef
|
||||
}
|
||||
DataType::Int64 => {
|
||||
let array = Int64Array::from((0..num_rows).map(|i| i as i64).collect::<Vec<_>>());
|
||||
Arc::new(array) as ArrayRef
|
||||
}
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
DfRecordBatch::try_new(schema, columns).unwrap()
|
||||
}
|
||||
|
||||
fn prepare_flight_data(num_rows: usize) -> (FlightData, FlightData) {
|
||||
let schema = schema();
|
||||
let mut encoder = FlightEncoder::default();
|
||||
let schema_data = encoder.encode(FlightMessage::Schema(schema.clone()));
|
||||
let rb = prepare_random_record_batch(schema, num_rows);
|
||||
let rb_data = encoder.encode(FlightMessage::RecordBatch(rb));
|
||||
(schema_data, rb_data)
|
||||
}
|
||||
|
||||
fn decode_flight_data_from_protobuf(schema: &Bytes, payload: &Bytes) -> DfRecordBatch {
|
||||
let schema = FlightData::decode(&schema[..]).unwrap();
|
||||
let payload = FlightData::decode(&payload[..]).unwrap();
|
||||
let mut decoder = FlightDecoder::default();
|
||||
let _schema = decoder.try_decode(&schema).unwrap();
|
||||
let message = decoder.try_decode(&payload).unwrap();
|
||||
let FlightMessage::RecordBatch(batch) = message else {
|
||||
unreachable!("unexpected message");
|
||||
};
|
||||
batch
|
||||
}
|
||||
|
||||
fn decode_flight_data_from_header_and_body(
|
||||
schema: &Bytes,
|
||||
data_header: &Bytes,
|
||||
data_body: &Bytes,
|
||||
) -> DfRecordBatch {
|
||||
let mut decoder = FlightDecoder::try_from_schema_bytes(schema).unwrap();
|
||||
decoder
|
||||
.try_decode_record_batch(data_header, data_body)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn bench_decode_flight_data(c: &mut Criterion) {
|
||||
let row_counts = [100000, 200000, 1000000];
|
||||
|
||||
for row_count in row_counts {
|
||||
let (schema, payload) = prepare_flight_data(row_count);
|
||||
|
||||
// arguments for decode_flight_data_from_protobuf
|
||||
let schema_bytes = Bytes::from(schema.encode_to_vec());
|
||||
let payload_bytes = Bytes::from(payload.encode_to_vec());
|
||||
|
||||
let mut group = c.benchmark_group(format!("flight_decoder_{}_rows", row_count));
|
||||
group.bench_function("decode_from_protobuf", |b| {
|
||||
b.iter(|| decode_flight_data_from_protobuf(&schema_bytes, &payload_bytes));
|
||||
});
|
||||
|
||||
group.bench_function("decode_from_header_and_body", |b| {
|
||||
b.iter(|| {
|
||||
decode_flight_data_from_header_and_body(
|
||||
&schema.data_header,
|
||||
&payload.data_header,
|
||||
&payload.data_body,
|
||||
)
|
||||
});
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_decode_flight_data);
|
||||
criterion_main!(benches);
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
use criterion::criterion_main;
|
||||
|
||||
mod bench_flight_decoder;
|
||||
mod channel_manager;
|
||||
|
||||
criterion_main! {
|
||||
channel_manager::benches
|
||||
channel_manager::benches,
|
||||
bench_flight_decoder::benches
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::io;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::arrow::error::ArrowError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -59,13 +60,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create RecordBatch"))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert Arrow type: {}", from))]
|
||||
Conversion {
|
||||
from: String,
|
||||
@@ -88,13 +82,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert Arrow Schema"))]
|
||||
ConvertArrowSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Not supported: {}", feat))]
|
||||
NotSupported { feat: String },
|
||||
|
||||
@@ -105,6 +92,14 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed arrow operation"))]
|
||||
Arrow {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: ArrowError,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -121,8 +116,7 @@ impl ErrorExt for Error {
|
||||
| Error::DecodeFlightData { .. }
|
||||
| Error::SerdeJson { .. } => StatusCode::Internal,
|
||||
|
||||
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||
Error::ConvertArrowSchema { source, .. } => source.status_code(),
|
||||
Error::Arrow { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,25 +21,24 @@ use api::v1::{AffectedRows, FlightMetadata, Metrics};
|
||||
use arrow_flight::utils::flight_data_to_arrow_batch;
|
||||
use arrow_flight::{FlightData, SchemaAsIpc};
|
||||
use common_base::bytes::Bytes;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||
use common_recordbatch::DfRecordBatch;
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::datatypes::Schema as ArrowSchema;
|
||||
use datatypes::arrow::ipc::{root_as_message, writer, MessageHeader};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::arrow::buffer::Buffer;
|
||||
use datatypes::arrow::datatypes::{Schema as ArrowSchema, SchemaRef};
|
||||
use datatypes::arrow::error::ArrowError;
|
||||
use datatypes::arrow::ipc::{convert, reader, root_as_message, writer, MessageHeader};
|
||||
use flatbuffers::FlatBufferBuilder;
|
||||
use prost::bytes::Bytes as ProstBytes;
|
||||
use prost::Message;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
ConvertArrowSchemaSnafu, CreateRecordBatchSnafu, DecodeFlightDataSnafu, InvalidFlightDataSnafu,
|
||||
Result,
|
||||
};
|
||||
use crate::error;
|
||||
use crate::error::{DecodeFlightDataSnafu, InvalidFlightDataSnafu, Result};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum FlightMessage {
|
||||
Schema(SchemaRef),
|
||||
Recordbatch(RecordBatch),
|
||||
RecordBatch(DfRecordBatch),
|
||||
AffectedRows(usize),
|
||||
Metrics(String),
|
||||
}
|
||||
@@ -67,14 +66,12 @@ impl Default for FlightEncoder {
|
||||
impl FlightEncoder {
|
||||
pub fn encode(&mut self, flight_message: FlightMessage) -> FlightData {
|
||||
match flight_message {
|
||||
FlightMessage::Schema(schema) => {
|
||||
SchemaAsIpc::new(schema.arrow_schema(), &self.write_options).into()
|
||||
}
|
||||
FlightMessage::Recordbatch(recordbatch) => {
|
||||
FlightMessage::Schema(schema) => SchemaAsIpc::new(&schema, &self.write_options).into(),
|
||||
FlightMessage::RecordBatch(record_batch) => {
|
||||
let (encoded_dictionaries, encoded_batch) = self
|
||||
.data_gen
|
||||
.encoded_batch(
|
||||
recordbatch.df_record_batch(),
|
||||
&record_batch,
|
||||
&mut self.dictionary_tracker,
|
||||
&self.write_options,
|
||||
)
|
||||
@@ -124,9 +121,58 @@ impl FlightEncoder {
|
||||
#[derive(Default)]
|
||||
pub struct FlightDecoder {
|
||||
schema: Option<SchemaRef>,
|
||||
schema_bytes: Option<bytes::Bytes>,
|
||||
}
|
||||
|
||||
impl FlightDecoder {
|
||||
/// Build a [FlightDecoder] instance from provided schema bytes.
|
||||
pub fn try_from_schema_bytes(schema_bytes: &bytes::Bytes) -> Result<Self> {
|
||||
let arrow_schema = convert::try_schema_from_flatbuffer_bytes(&schema_bytes[..])
|
||||
.context(error::ArrowSnafu)?;
|
||||
Ok(Self {
|
||||
schema: Some(Arc::new(arrow_schema)),
|
||||
schema_bytes: Some(schema_bytes.clone()),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn try_decode_record_batch(
|
||||
&mut self,
|
||||
data_header: &bytes::Bytes,
|
||||
data_body: &bytes::Bytes,
|
||||
) -> Result<DfRecordBatch> {
|
||||
let schema = self
|
||||
.schema
|
||||
.as_ref()
|
||||
.context(InvalidFlightDataSnafu {
|
||||
reason: "Should have decoded schema first!",
|
||||
})?
|
||||
.clone();
|
||||
let message = root_as_message(&data_header[..])
|
||||
.map_err(|err| {
|
||||
ArrowError::ParseError(format!("Unable to get root as message: {err:?}"))
|
||||
})
|
||||
.context(error::ArrowSnafu)?;
|
||||
let result = message
|
||||
.header_as_record_batch()
|
||||
.ok_or_else(|| {
|
||||
ArrowError::ParseError(
|
||||
"Unable to convert flight data header to a record batch".to_string(),
|
||||
)
|
||||
})
|
||||
.and_then(|batch| {
|
||||
reader::read_record_batch(
|
||||
&Buffer::from(data_body.as_ref()),
|
||||
batch,
|
||||
schema,
|
||||
&HashMap::new(),
|
||||
None,
|
||||
&message.version(),
|
||||
)
|
||||
})
|
||||
.context(error::ArrowSnafu)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn try_decode(&mut self, flight_data: &FlightData) -> Result<FlightMessage> {
|
||||
let message = root_as_message(&flight_data.data_header).map_err(|e| {
|
||||
InvalidFlightDataSnafu {
|
||||
@@ -152,36 +198,29 @@ impl FlightDecoder {
|
||||
.fail()
|
||||
}
|
||||
MessageHeader::Schema => {
|
||||
let arrow_schema = ArrowSchema::try_from(flight_data).map_err(|e| {
|
||||
let arrow_schema = Arc::new(ArrowSchema::try_from(flight_data).map_err(|e| {
|
||||
InvalidFlightDataSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let schema =
|
||||
Arc::new(Schema::try_from(arrow_schema).context(ConvertArrowSchemaSnafu)?);
|
||||
|
||||
self.schema = Some(schema.clone());
|
||||
|
||||
Ok(FlightMessage::Schema(schema))
|
||||
})?);
|
||||
self.schema = Some(arrow_schema.clone());
|
||||
self.schema_bytes = Some(flight_data.data_header.clone());
|
||||
Ok(FlightMessage::Schema(arrow_schema))
|
||||
}
|
||||
MessageHeader::RecordBatch => {
|
||||
let schema = self.schema.clone().context(InvalidFlightDataSnafu {
|
||||
reason: "Should have decoded schema first!",
|
||||
})?;
|
||||
let arrow_schema = schema.arrow_schema().clone();
|
||||
|
||||
let arrow_batch =
|
||||
flight_data_to_arrow_batch(flight_data, arrow_schema, &HashMap::new())
|
||||
flight_data_to_arrow_batch(flight_data, schema.clone(), &HashMap::new())
|
||||
.map_err(|e| {
|
||||
InvalidFlightDataSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let recordbatch = RecordBatch::try_from_df_record_batch(schema, arrow_batch)
|
||||
.context(CreateRecordBatchSnafu)?;
|
||||
Ok(FlightMessage::Recordbatch(recordbatch))
|
||||
Ok(FlightMessage::RecordBatch(arrow_batch))
|
||||
}
|
||||
other => {
|
||||
let name = other.variant_name().unwrap_or("UNKNOWN");
|
||||
@@ -196,16 +235,22 @@ impl FlightDecoder {
|
||||
pub fn schema(&self) -> Option<&SchemaRef> {
|
||||
self.schema.as_ref()
|
||||
}
|
||||
|
||||
pub fn schema_bytes(&self) -> Option<bytes::Bytes> {
|
||||
self.schema_bytes.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<RecordBatches> {
|
||||
pub fn flight_messages_to_recordbatches(
|
||||
messages: Vec<FlightMessage>,
|
||||
) -> Result<Vec<DfRecordBatch>> {
|
||||
if messages.is_empty() {
|
||||
Ok(RecordBatches::empty())
|
||||
Ok(vec![])
|
||||
} else {
|
||||
let mut recordbatches = Vec::with_capacity(messages.len() - 1);
|
||||
|
||||
let schema = match &messages[0] {
|
||||
FlightMessage::Schema(schema) => schema.clone(),
|
||||
match &messages[0] {
|
||||
FlightMessage::Schema(_schema) => {}
|
||||
_ => {
|
||||
return InvalidFlightDataSnafu {
|
||||
reason: "First Flight Message must be schema!",
|
||||
@@ -216,7 +261,7 @@ pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<
|
||||
|
||||
for message in messages.into_iter().skip(1) {
|
||||
match message {
|
||||
FlightMessage::Recordbatch(recordbatch) => recordbatches.push(recordbatch),
|
||||
FlightMessage::RecordBatch(recordbatch) => recordbatches.push(recordbatch),
|
||||
_ => {
|
||||
return InvalidFlightDataSnafu {
|
||||
reason: "Expect the following Flight Messages are all Recordbatches!",
|
||||
@@ -226,7 +271,7 @@ pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<
|
||||
}
|
||||
}
|
||||
|
||||
RecordBatches::try_new(schema, recordbatches).context(CreateRecordBatchSnafu)
|
||||
Ok(recordbatches)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,38 +292,33 @@ fn build_none_flight_msg() -> Bytes {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use arrow_flight::utils::batches_to_flight_data;
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::vectors::Int32Vector;
|
||||
use datatypes::arrow::array::Int32Array;
|
||||
use datatypes::arrow::datatypes::{DataType, Field, Schema};
|
||||
|
||||
use super::*;
|
||||
use crate::Error;
|
||||
|
||||
#[test]
|
||||
fn test_try_decode() {
|
||||
let arrow_schema = ArrowSchema::new(vec![Field::new("n", DataType::Int32, true)]);
|
||||
let schema = Arc::new(Schema::try_from(arrow_schema.clone()).unwrap());
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"n",
|
||||
DataType::Int32,
|
||||
true,
|
||||
)]));
|
||||
|
||||
let batch1 = RecordBatch::new(
|
||||
let batch1 = DfRecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Vector::from(vec![Some(1), None, Some(3)])) as _],
|
||||
vec![Arc::new(Int32Array::from(vec![Some(1), None, Some(3)])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
let batch2 = RecordBatch::new(
|
||||
let batch2 = DfRecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Vector::from(vec![None, Some(5)])) as _],
|
||||
vec![Arc::new(Int32Array::from(vec![None, Some(5)])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let flight_data = batches_to_flight_data(
|
||||
&arrow_schema,
|
||||
vec![
|
||||
batch1.clone().into_df_record_batch(),
|
||||
batch2.clone().into_df_record_batch(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
let flight_data =
|
||||
batches_to_flight_data(&schema, vec![batch1.clone(), batch2.clone()]).unwrap();
|
||||
assert_eq!(flight_data.len(), 3);
|
||||
let [d1, d2, d3] = flight_data.as_slice() else {
|
||||
unreachable!()
|
||||
@@ -304,15 +344,15 @@ mod test {
|
||||
let _ = decoder.schema.as_ref().unwrap();
|
||||
|
||||
let message = decoder.try_decode(d2).unwrap();
|
||||
assert!(matches!(message, FlightMessage::Recordbatch(_)));
|
||||
let FlightMessage::Recordbatch(actual_batch) = message else {
|
||||
assert!(matches!(message, FlightMessage::RecordBatch(_)));
|
||||
let FlightMessage::RecordBatch(actual_batch) = message else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(actual_batch, batch1);
|
||||
|
||||
let message = decoder.try_decode(d3).unwrap();
|
||||
assert!(matches!(message, FlightMessage::Recordbatch(_)));
|
||||
let FlightMessage::Recordbatch(actual_batch) = message else {
|
||||
assert!(matches!(message, FlightMessage::RecordBatch(_)));
|
||||
let FlightMessage::RecordBatch(actual_batch) = message else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(actual_batch, batch2);
|
||||
@@ -320,27 +360,22 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_flight_messages_to_recordbatches() {
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"m",
|
||||
ConcreteDataType::int32_datatype(),
|
||||
true,
|
||||
)]));
|
||||
let batch1 = RecordBatch::new(
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("m", DataType::Int32, true)]));
|
||||
let batch1 = DfRecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Vector::from(vec![Some(2), None, Some(4)])) as _],
|
||||
vec![Arc::new(Int32Array::from(vec![Some(2), None, Some(4)])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
let batch2 = RecordBatch::new(
|
||||
let batch2 = DfRecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Vector::from(vec![None, Some(6)])) as _],
|
||||
vec![Arc::new(Int32Array::from(vec![None, Some(6)])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
let recordbatches =
|
||||
RecordBatches::try_new(schema.clone(), vec![batch1.clone(), batch2.clone()]).unwrap();
|
||||
let recordbatches = vec![batch1.clone(), batch2.clone()];
|
||||
|
||||
let m1 = FlightMessage::Schema(schema);
|
||||
let m2 = FlightMessage::Recordbatch(batch1);
|
||||
let m3 = FlightMessage::Recordbatch(batch2);
|
||||
let m2 = FlightMessage::RecordBatch(batch1);
|
||||
let m3 = FlightMessage::RecordBatch(batch2);
|
||||
|
||||
let result = flight_messages_to_recordbatches(vec![m2.clone(), m1.clone(), m3.clone()]);
|
||||
assert!(matches!(result, Err(Error::InvalidFlightData { .. })));
|
||||
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon", "dep:deadpool-postgres", "dep:deadpool"]
|
||||
mysql_kvbackend = ["dep:sqlx", "dep:backon"]
|
||||
enterprise = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -42,6 +43,7 @@ deadpool = { workspace = true, optional = true }
|
||||
deadpool-postgres = { workspace = true, optional = true }
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
flexbuffers = "25.2"
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
hex.workspace = true
|
||||
@@ -49,6 +51,7 @@ humantime-serde.workspace = true
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
moka.workspace = true
|
||||
object-store.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
@@ -71,6 +74,7 @@ typetag.workspace = true
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
common-procedure = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-wal = { workspace = true, features = ["testing"] }
|
||||
datatypes.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
|
||||
168
src/common/meta/src/cache/flow/table_flownode.rs
vendored
168
src/common/meta/src/cache/flow/table_flownode.rs
vendored
@@ -24,21 +24,39 @@ use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error::Result;
|
||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||
use crate::key::flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::key::{FlowId, FlowPartitionId};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
use crate::FlownodeId;
|
||||
|
||||
type FlownodeSet = Arc<HashMap<FlownodeId, Peer>>;
|
||||
/// Flow id&flow partition key
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct FlowIdent {
|
||||
pub flow_id: FlowId,
|
||||
pub partition_id: FlowPartitionId,
|
||||
}
|
||||
|
||||
impl FlowIdent {
|
||||
pub fn new(flow_id: FlowId, partition_id: FlowPartitionId) -> Self {
|
||||
Self {
|
||||
flow_id,
|
||||
partition_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// cache for TableFlowManager, the table_id part is in the outer cache
|
||||
/// include flownode_id, flow_id, partition_id mapping to Peer
|
||||
type FlownodeFlowSet = Arc<HashMap<FlowIdent, Peer>>;
|
||||
|
||||
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
|
||||
|
||||
/// [TableFlownodeSetCache] caches the [TableId] to [FlownodeSet] mapping.
|
||||
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeSet, CacheIdent>;
|
||||
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeFlowSet, CacheIdent>;
|
||||
|
||||
/// Constructs a [TableFlownodeSetCache].
|
||||
pub fn new_table_flownode_set_cache(
|
||||
name: String,
|
||||
cache: Cache<TableId, FlownodeSet>,
|
||||
cache: Cache<TableId, FlownodeFlowSet>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> TableFlownodeSetCache {
|
||||
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
||||
@@ -47,7 +65,7 @@ pub fn new_table_flownode_set_cache(
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
|
||||
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeFlowSet> {
|
||||
Arc::new(move |&table_id| {
|
||||
let table_flow_manager = table_flow_manager.clone();
|
||||
Box::pin(async move {
|
||||
@@ -57,7 +75,12 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
||||
.map(|flows| {
|
||||
flows
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.flownode_id(), value.peer))
|
||||
.map(|(key, value)| {
|
||||
(
|
||||
FlowIdent::new(key.flow_id(), key.partition_id()),
|
||||
value.peer,
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
})
|
||||
// We must cache the `HashSet` even if it's empty,
|
||||
@@ -71,26 +94,33 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
||||
}
|
||||
|
||||
async fn handle_create_flow(
|
||||
cache: &Cache<TableId, FlownodeSet>,
|
||||
cache: &Cache<TableId, FlownodeFlowSet>,
|
||||
CreateFlow {
|
||||
flow_id,
|
||||
source_table_ids,
|
||||
flownodes: flownode_peers,
|
||||
partition_to_peer_mapping: flow_part2nodes,
|
||||
}: &CreateFlow,
|
||||
) {
|
||||
for table_id in source_table_ids {
|
||||
let entry = cache.entry(*table_id);
|
||||
entry
|
||||
.and_compute_with(
|
||||
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
|
||||
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
|
||||
Some(entry) => {
|
||||
let mut map = entry.into_value().as_ref().clone();
|
||||
map.extend(flownode_peers.iter().map(|peer| (peer.id, peer.clone())));
|
||||
map.extend(
|
||||
flow_part2nodes.iter().map(|(part, peer)| {
|
||||
(FlowIdent::new(*flow_id, *part), peer.clone())
|
||||
}),
|
||||
);
|
||||
|
||||
Op::Put(Arc::new(map))
|
||||
}
|
||||
None => Op::Put(Arc::new(HashMap::from_iter(
|
||||
flownode_peers.iter().map(|peer| (peer.id, peer.clone())),
|
||||
))),
|
||||
None => {
|
||||
Op::Put(Arc::new(HashMap::from_iter(flow_part2nodes.iter().map(
|
||||
|(part, peer)| (FlowIdent::new(*flow_id, *part), peer.clone()),
|
||||
))))
|
||||
}
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -98,21 +128,23 @@ async fn handle_create_flow(
|
||||
}
|
||||
|
||||
async fn handle_drop_flow(
|
||||
cache: &Cache<TableId, FlownodeSet>,
|
||||
cache: &Cache<TableId, FlownodeFlowSet>,
|
||||
DropFlow {
|
||||
flow_id,
|
||||
source_table_ids,
|
||||
flownode_ids,
|
||||
flow_part2node_id,
|
||||
}: &DropFlow,
|
||||
) {
|
||||
for table_id in source_table_ids {
|
||||
let entry = cache.entry(*table_id);
|
||||
entry
|
||||
.and_compute_with(
|
||||
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
|
||||
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
|
||||
Some(entry) => {
|
||||
let mut set = entry.into_value().as_ref().clone();
|
||||
for flownode_id in flownode_ids {
|
||||
set.remove(flownode_id);
|
||||
for (part, _node) in flow_part2node_id {
|
||||
let key = FlowIdent::new(*flow_id, *part);
|
||||
set.remove(&key);
|
||||
}
|
||||
|
||||
Op::Put(Arc::new(set))
|
||||
@@ -128,7 +160,7 @@ async fn handle_drop_flow(
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableId, FlownodeSet>,
|
||||
cache: &'a Cache<TableId, FlownodeFlowSet>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
@@ -154,7 +186,7 @@ mod tests {
|
||||
use moka::future::CacheBuilder;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
||||
use crate::cache::flow::table_flownode::{new_table_flownode_set_cache, FlowIdent};
|
||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
@@ -214,12 +246,16 @@ mod tests {
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
|
||||
HashMap::from_iter(
|
||||
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
|
||||
)
|
||||
);
|
||||
let set = cache.get(1025).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
|
||||
HashMap::from_iter(
|
||||
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
|
||||
)
|
||||
);
|
||||
let result = cache.get(1026).await.unwrap().unwrap();
|
||||
assert_eq!(result.len(), 0);
|
||||
@@ -231,8 +267,9 @@ mod tests {
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id: 2001,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownodes: (1..=5).map(Peer::empty).collect(),
|
||||
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
|
||||
})];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
@@ -241,6 +278,54 @@ mod tests {
|
||||
assert_eq!(set.len(), 5);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_replace_flow() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id: 2001,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
|
||||
})];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(set.len(), 5);
|
||||
let set = cache.get(1025).await.unwrap().unwrap();
|
||||
assert_eq!(set.len(), 5);
|
||||
|
||||
let drop_then_create_flow = vec![
|
||||
CacheIdent::DropFlow(DropFlow {
|
||||
flow_id: 2001,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
|
||||
}),
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id: 2001,
|
||||
source_table_ids: vec![1026, 1027],
|
||||
partition_to_peer_mapping: (11..=15)
|
||||
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||
.collect(),
|
||||
}),
|
||||
CacheIdent::FlowId(2001),
|
||||
];
|
||||
cache.invalidate(&drop_then_create_flow).await.unwrap();
|
||||
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert!(set.is_empty());
|
||||
|
||||
let expected = HashMap::from_iter(
|
||||
(11..=15).map(|i| (FlowIdent::new(2001, i as u32), Peer::empty(i + 1))),
|
||||
);
|
||||
let set = cache.get(1026).await.unwrap().unwrap();
|
||||
|
||||
assert_eq!(set.as_ref().clone(), expected);
|
||||
|
||||
let set = cache.get(1027).await.unwrap().unwrap();
|
||||
|
||||
assert_eq!(set.as_ref().clone(), expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_flow() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
@@ -248,34 +333,57 @@ mod tests {
|
||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||
let ident = vec![
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id: 2001,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownodes: (1..=5).map(Peer::empty).collect(),
|
||||
partition_to_peer_mapping: (1..=5)
|
||||
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||
.collect(),
|
||||
}),
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id: 2002,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownodes: (11..=12).map(Peer::empty).collect(),
|
||||
partition_to_peer_mapping: (11..=12)
|
||||
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||
.collect(),
|
||||
}),
|
||||
// same flownode that hold multiple flows
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id: 2003,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
partition_to_peer_mapping: (1..=5)
|
||||
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||
.collect(),
|
||||
}),
|
||||
];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(set.len(), 7);
|
||||
assert_eq!(set.len(), 12);
|
||||
let set = cache.get(1025).await.unwrap().unwrap();
|
||||
assert_eq!(set.len(), 7);
|
||||
assert_eq!(set.len(), 12);
|
||||
|
||||
let ident = vec![CacheIdent::DropFlow(DropFlow {
|
||||
flow_id: 2001,
|
||||
source_table_ids: vec![1024, 1025],
|
||||
flownode_ids: vec![1, 2, 3, 4, 5],
|
||||
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
|
||||
})];
|
||||
cache.invalidate(&ident).await.unwrap();
|
||||
let set = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
|
||||
HashMap::from_iter(
|
||||
(11..=12)
|
||||
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
|
||||
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
|
||||
)
|
||||
);
|
||||
let set = cache.get(1025).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
set.as_ref().clone(),
|
||||
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
|
||||
HashMap::from_iter(
|
||||
(11..=12)
|
||||
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
|
||||
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,9 +16,12 @@ use std::sync::Arc;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::flow_name::FlowName;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::instruction::{CacheIdent, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoKey;
|
||||
use crate::key::flow::flow_name::FlowNameKey;
|
||||
use crate::key::flow::flow_route::FlowRouteKey;
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowKey;
|
||||
use crate::key::flow::table_flow::TableFlowKey;
|
||||
use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -89,9 +92,40 @@ where
|
||||
let key: SchemaNameKey = schema_name.into();
|
||||
self.invalidate_key(&key.to_bytes()).await;
|
||||
}
|
||||
CacheIdent::CreateFlow(_) | CacheIdent::DropFlow(_) => {
|
||||
CacheIdent::CreateFlow(_) => {
|
||||
// Do nothing
|
||||
}
|
||||
CacheIdent::DropFlow(DropFlow {
|
||||
flow_id,
|
||||
source_table_ids,
|
||||
flow_part2node_id,
|
||||
}) => {
|
||||
// invalidate flow route/flownode flow/table flow
|
||||
let mut keys = Vec::with_capacity(
|
||||
source_table_ids.len() * flow_part2node_id.len()
|
||||
+ flow_part2node_id.len() * 2,
|
||||
);
|
||||
for table_id in source_table_ids {
|
||||
for (partition_id, node_id) in flow_part2node_id {
|
||||
let key =
|
||||
TableFlowKey::new(*table_id, *node_id, *flow_id, *partition_id)
|
||||
.to_bytes();
|
||||
keys.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
for (partition_id, node_id) in flow_part2node_id {
|
||||
let key =
|
||||
FlownodeFlowKey::new(*node_id, *flow_id, *partition_id).to_bytes();
|
||||
keys.push(key);
|
||||
let key = FlowRouteKey::new(*flow_id, *partition_id).to_bytes();
|
||||
keys.push(key);
|
||||
}
|
||||
|
||||
for key in keys {
|
||||
self.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
CacheIdent::FlowName(FlowName {
|
||||
catalog_name,
|
||||
flow_name,
|
||||
|
||||
@@ -21,7 +21,7 @@ use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, SchemaNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
@@ -148,7 +148,7 @@ impl Procedure for AlterDatabaseProcedure {
|
||||
AlterDatabaseState::UpdateMetadata => self.on_update_metadata().await,
|
||||
AlterDatabaseState::InvalidateSchemaCache => self.on_invalidate_schema_cache().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -32,9 +32,12 @@ use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, sync_follower_regions};
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{DecodeJsonSnafu, Error, MetadataCorruptionSnafu, Result};
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
@@ -66,6 +69,7 @@ impl AlterLogicalTablesProcedure {
|
||||
physical_table_info: None,
|
||||
physical_table_route: None,
|
||||
physical_columns: vec![],
|
||||
table_cache_keys_to_invalidate: vec![],
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -195,16 +199,19 @@ impl AlterLogicalTablesProcedure {
|
||||
self.update_physical_table_metadata().await?;
|
||||
self.update_logical_tables_metadata().await?;
|
||||
|
||||
self.data.build_cache_keys_to_invalidate();
|
||||
self.data.clear_metadata_fields();
|
||||
|
||||
self.data.state = AlterTablesState::InvalidateTableCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
|
||||
let to_invalidate = self.build_table_cache_keys_to_invalidate();
|
||||
let to_invalidate = &self.data.table_cache_keys_to_invalidate;
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(&Default::default(), &to_invalidate)
|
||||
.invalidate(&Default::default(), to_invalidate)
|
||||
.await?;
|
||||
Ok(Status::done())
|
||||
}
|
||||
@@ -217,14 +224,6 @@ impl Procedure for AlterLogicalTablesProcedure {
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &Context) -> ProcedureResult<Status> {
|
||||
let error_handler = |e: Error| {
|
||||
if e.is_retry_later() {
|
||||
common_procedure::Error::retry_later(e)
|
||||
} else {
|
||||
common_procedure::Error::external(e)
|
||||
}
|
||||
};
|
||||
|
||||
let state = &self.data.state;
|
||||
|
||||
let step = state.as_ref();
|
||||
@@ -241,7 +240,7 @@ impl Procedure for AlterLogicalTablesProcedure {
|
||||
AlterTablesState::UpdateMetadata => self.on_update_metadata().await,
|
||||
AlterTablesState::InvalidateTableCache => self.on_invalidate_table_cache().await,
|
||||
}
|
||||
.map_err(error_handler)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
@@ -280,6 +279,20 @@ pub struct AlterTablesData {
|
||||
physical_table_info: Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
physical_table_route: Option<PhysicalTableRouteValue>,
|
||||
physical_columns: Vec<ColumnMetadata>,
|
||||
table_cache_keys_to_invalidate: Vec<CacheIdent>,
|
||||
}
|
||||
|
||||
impl AlterTablesData {
|
||||
/// Clears all data fields except `state` and `table_cache_keys_to_invalidate` after metadata update.
|
||||
/// This is done to avoid persisting unnecessary data after the update metadata step.
|
||||
fn clear_metadata_fields(&mut self) {
|
||||
self.tasks.clear();
|
||||
self.table_info_values.clear();
|
||||
self.physical_table_id = 0;
|
||||
self.physical_table_info = None;
|
||||
self.physical_table_route = None;
|
||||
self.physical_columns.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
|
||||
@@ -15,13 +15,12 @@
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::alter_logical_tables::AlterTablesData;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
|
||||
impl AlterTablesData {
|
||||
pub(crate) fn build_cache_keys_to_invalidate(&mut self) {
|
||||
let mut cache_keys = self
|
||||
.data
|
||||
.table_info_values
|
||||
.iter()
|
||||
.flat_map(|table| {
|
||||
@@ -31,14 +30,14 @@ impl AlterLogicalTablesProcedure {
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
cache_keys.push(CacheIdent::TableId(self.data.physical_table_id));
|
||||
cache_keys.push(CacheIdent::TableId(self.physical_table_id));
|
||||
// Safety: physical_table_info already filled in previous steps
|
||||
let physical_table_info = &self.data.physical_table_info.as_ref().unwrap().table_info;
|
||||
let physical_table_info = &self.physical_table_info.as_ref().unwrap().table_info;
|
||||
cache_keys.push(CacheIdent::TableName(extract_table_name(
|
||||
physical_table_info,
|
||||
)));
|
||||
|
||||
cache_keys
|
||||
self.table_cache_keys_to_invalidate = cache_keys;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,10 +40,11 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, handle_multiple_results, sync_follower_regions, MultipleResults,
|
||||
add_peer_context_if_needed, handle_multiple_results, map_to_procedure_error,
|
||||
sync_follower_regions, MultipleResults,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{AbortProcedureSnafu, Error, NoLeaderSnafu, PutPoisonSnafu, Result};
|
||||
use crate::error::{AbortProcedureSnafu, NoLeaderSnafu, PutPoisonSnafu, Result, RetryLaterSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
@@ -195,7 +196,10 @@ impl AlterTableProcedure {
|
||||
}
|
||||
MultipleResults::AllRetryable(error) => {
|
||||
// Just returns the error, and wait for the next try.
|
||||
Err(error)
|
||||
let err = BoxedError::new(error);
|
||||
Err(err).context(RetryLaterSnafu {
|
||||
clean_poisons: true,
|
||||
})
|
||||
}
|
||||
MultipleResults::Ok(results) => {
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
@@ -323,16 +327,6 @@ impl Procedure for AlterTableProcedure {
|
||||
}
|
||||
|
||||
async fn execute(&mut self, ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let error_handler = |e: Error| {
|
||||
if e.is_retry_later() {
|
||||
ProcedureError::retry_later(e)
|
||||
} else if e.need_clean_poisons() {
|
||||
ProcedureError::external_and_clean_poisons(e)
|
||||
} else {
|
||||
ProcedureError::external(e)
|
||||
}
|
||||
};
|
||||
|
||||
let state = &self.data.state;
|
||||
|
||||
let step = state.as_ref();
|
||||
@@ -350,7 +344,7 @@ impl Procedure for AlterTableProcedure {
|
||||
AlterTableState::UpdateMetadata => self.on_update_metadata().await,
|
||||
AlterTableState::InvalidateTableCache => self.on_broadcast().await,
|
||||
}
|
||||
.map_err(error_handler)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -22,7 +22,7 @@ use serde_with::{serde_as, DefaultOnNull};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
||||
@@ -115,7 +115,7 @@ impl Procedure for CreateDatabaseProcedure {
|
||||
CreateDatabaseState::Prepare => self.on_prepare().await,
|
||||
CreateDatabaseState::CreateMetadata => self.on_create_metadata().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -36,10 +36,10 @@ use strum::AsRefStr;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result, UnexpectedSnafu};
|
||||
use crate::instruction::{CacheIdent, CreateFlow};
|
||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -70,6 +70,7 @@ impl CreateFlowProcedure {
|
||||
query_context,
|
||||
state: CreateFlowState::Prepare,
|
||||
prev_flow_info_value: None,
|
||||
did_replace: false,
|
||||
flow_type: None,
|
||||
},
|
||||
}
|
||||
@@ -224,6 +225,7 @@ impl CreateFlowProcedure {
|
||||
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Replaced flow metadata for flow {flow_id}");
|
||||
self.data.did_replace = true;
|
||||
} else {
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
@@ -240,22 +242,43 @@ impl CreateFlowProcedure {
|
||||
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
let did_replace = self.data.did_replace;
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate flow cache by creating flow".to_string()),
|
||||
};
|
||||
|
||||
let mut caches = vec![];
|
||||
|
||||
// if did replaced, invalidate the flow cache with drop the old flow
|
||||
if did_replace {
|
||||
let old_flow_info = self.data.prev_flow_info_value.as_ref().unwrap();
|
||||
|
||||
// only drop flow is needed, since flow name haven't changed, and flow id already invalidated below
|
||||
caches.extend([CacheIdent::DropFlow(DropFlow {
|
||||
flow_id,
|
||||
source_table_ids: old_flow_info.source_table_ids.clone(),
|
||||
flow_part2node_id: old_flow_info.flownode_ids().clone().into_iter().collect(),
|
||||
})]);
|
||||
}
|
||||
|
||||
let (_flow_info, flow_routes) = (&self.data).into();
|
||||
let flow_part2peers = flow_routes
|
||||
.into_iter()
|
||||
.map(|(part_id, route)| (part_id, route.peer))
|
||||
.collect();
|
||||
|
||||
caches.extend([
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
flow_id,
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
partition_to_peer_mapping: flow_part2peers,
|
||||
}),
|
||||
CacheIdent::FlowId(flow_id),
|
||||
]);
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownodes: self.data.peers.clone(),
|
||||
}),
|
||||
CacheIdent::FlowId(flow_id),
|
||||
],
|
||||
)
|
||||
.invalidate(&ctx, &caches)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done_with_output(flow_id))
|
||||
@@ -281,7 +304,7 @@ impl Procedure for CreateFlowProcedure {
|
||||
CreateFlowState::CreateMetadata => self.on_create_metadata().await,
|
||||
CreateFlowState::InvalidateFlowCache => self.on_broadcast().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
@@ -377,6 +400,10 @@ pub struct CreateFlowData {
|
||||
/// For verify if prev value is consistent when need to update flow metadata.
|
||||
/// only set when `or_replace` is true.
|
||||
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||
/// Only set to true when replace actually happened.
|
||||
/// This is used to determine whether to invalidate the cache.
|
||||
#[serde(default)]
|
||||
pub(crate) did_replace: bool,
|
||||
pub(crate) flow_type: Option<FlowType>,
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,9 @@ use store_api::storage::{RegionId, RegionNumber};
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error, sync_follower_regions};
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
@@ -238,7 +240,7 @@ impl Procedure for CreateLogicalTablesProcedure {
|
||||
CreateTablesState::DatanodeCreateRegions => self.on_datanode_create_regions().await,
|
||||
CreateTablesState::CreateMetadata => self.on_create_metadata().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -34,7 +34,7 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error,
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, map_to_procedure_error,
|
||||
region_storage_path,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
@@ -319,7 +319,7 @@ impl Procedure for CreateTableProcedure {
|
||||
CreateTableState::DatanodeCreateRegions => self.on_datanode_create_regions().await,
|
||||
CreateTableState::CreateMetadata => self.on_create_metadata().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -23,7 +23,7 @@ use table::metadata::{RawTableInfo, TableId, TableType};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
@@ -249,7 +249,7 @@ impl Procedure for CreateViewProcedure {
|
||||
CreateViewState::Prepare => self.on_prepare().await,
|
||||
CreateViewState::CreateMetadata => self.on_create_metadata(ctx).await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{Error as ProcedureError, ExternalSnafu, FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::error::{ExternalSnafu, FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
@@ -31,6 +31,7 @@ use snafu::ResultExt;
|
||||
use tonic::async_trait;
|
||||
|
||||
use self::start::DropDatabaseStart;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::key::table_name::TableNameValue;
|
||||
@@ -141,13 +142,7 @@ impl Procedure for DropDatabaseProcedure {
|
||||
let (next, status) = state
|
||||
.next(&self.runtime_context, &mut self.context)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.is_retry_later() {
|
||||
ProcedureError::retry_later(e)
|
||||
} else {
|
||||
ProcedureError::external(e)
|
||||
}
|
||||
})?;
|
||||
.map_err(map_to_procedure_error)?;
|
||||
|
||||
*state = next;
|
||||
Ok(status)
|
||||
|
||||
@@ -323,6 +323,7 @@ mod tests {
|
||||
}
|
||||
.build(),
|
||||
),
|
||||
clean_poisons: false,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod metadata;
|
||||
|
||||
use api::v1::flow::{flow_request, DropRequest, FlowRequest};
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::format_full_flow_name;
|
||||
@@ -29,7 +30,7 @@ use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::flow_name::FlowName;
|
||||
@@ -153,6 +154,12 @@ impl DropFlowProcedure {
|
||||
};
|
||||
let flow_info_value = self.data.flow_info_value.as_ref().unwrap();
|
||||
|
||||
let flow_part2nodes = flow_info_value
|
||||
.flownode_ids()
|
||||
.clone()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
@@ -164,8 +171,9 @@ impl DropFlowProcedure {
|
||||
flow_name: flow_info_value.flow_name.to_string(),
|
||||
}),
|
||||
CacheIdent::DropFlow(DropFlow {
|
||||
flow_id,
|
||||
source_table_ids: flow_info_value.source_table_ids.clone(),
|
||||
flownode_ids: flow_info_value.flownode_ids.values().cloned().collect(),
|
||||
flow_part2node_id: flow_part2nodes,
|
||||
}),
|
||||
],
|
||||
)
|
||||
@@ -193,7 +201,7 @@ impl Procedure for DropFlowProcedure {
|
||||
DropFlowState::InvalidateFlowCache => self.on_broadcast().await,
|
||||
DropFlowState::DropFlows => self.on_flownode_drop_flows().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -35,7 +35,7 @@ use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use self::executor::DropTableExecutor;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
@@ -221,7 +221,7 @@ impl Procedure for DropTableProcedure {
|
||||
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions().await,
|
||||
DropTableState::DeleteTombstone => self.on_delete_metadata_tombstone().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -25,7 +25,7 @@ use table::metadata::{RawTableInfo, TableId, TableType};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::utils::map_to_procedure_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
@@ -191,7 +191,7 @@ impl Procedure for DropViewProcedure {
|
||||
DropViewState::DeleteMetadata => self.on_delete_metadata().await,
|
||||
DropViewState::InvalidateViewCache => self.on_broadcast().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -105,6 +105,7 @@ impl MockDatanodeHandler for RetryErrorDatanodeHandler {
|
||||
}
|
||||
.build(),
|
||||
),
|
||||
clean_poisons: false,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -218,6 +219,7 @@ impl MockDatanodeHandler for PartialSuccessDatanodeHandler {
|
||||
}
|
||||
.build(),
|
||||
),
|
||||
clean_poisons: false,
|
||||
})
|
||||
} else {
|
||||
error::UnexpectedSnafu {
|
||||
@@ -252,6 +254,7 @@ impl MockDatanodeHandler for AllFailureDatanodeHandler {
|
||||
}
|
||||
.build(),
|
||||
),
|
||||
clean_poisons: false,
|
||||
})
|
||||
} else {
|
||||
error::UnexpectedSnafu {
|
||||
|
||||
@@ -575,6 +575,7 @@ async fn test_on_submit_alter_request_with_partial_success_retryable() {
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(result.is_retry_later());
|
||||
assert!(!result.need_clean_poisons());
|
||||
|
||||
// Submits again
|
||||
let result = procedure
|
||||
@@ -582,6 +583,7 @@ async fn test_on_submit_alter_request_with_partial_success_retryable() {
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(result.is_retry_later());
|
||||
assert!(!result.need_clean_poisons());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -618,12 +620,14 @@ async fn test_on_submit_alter_request_with_all_failure_retrybale() {
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(err.is_retry_later());
|
||||
assert!(err.need_clean_poisons());
|
||||
// submits again
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(err.is_retry_later());
|
||||
assert!(err.need_clean_poisons());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -31,7 +31,7 @@ use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
@@ -66,7 +66,7 @@ impl Procedure for TruncateTableProcedure {
|
||||
self.on_datanode_truncate_regions().await
|
||||
}
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
.map_err(map_to_procedure_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
|
||||
@@ -60,11 +60,16 @@ pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_retry_error(e: Error) -> ProcedureError {
|
||||
if e.is_retry_later() {
|
||||
ProcedureError::retry_later(e)
|
||||
} else {
|
||||
ProcedureError::external(e)
|
||||
/// Maps the error to the corresponding procedure error.
|
||||
///
|
||||
/// This function determines whether the error should be retried and if poison cleanup is needed,
|
||||
/// then maps it to the appropriate procedure error variant.
|
||||
pub fn map_to_procedure_error(e: Error) -> ProcedureError {
|
||||
match (e.is_retry_later(), e.need_clean_poisons()) {
|
||||
(true, true) => ProcedureError::retry_later_and_clean_poisons(e),
|
||||
(true, false) => ProcedureError::retry_later(e),
|
||||
(false, true) => ProcedureError::external_and_clean_poisons(e),
|
||||
(false, false) => ProcedureError::external(e),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -47,6 +47,10 @@ use crate::error::{
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::CreateTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::CreateTrigger;
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
@@ -70,8 +74,29 @@ pub type BoxedProcedureLoaderFactory = dyn Fn(DdlContext) -> BoxedProcedureLoade
|
||||
pub struct DdlManager {
|
||||
ddl_context: DdlContext,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
}
|
||||
|
||||
/// This trait is responsible for handling DDL tasks about triggers. e.g.,
|
||||
/// create trigger, drop trigger, etc.
|
||||
#[cfg(feature = "enterprise")]
|
||||
#[async_trait::async_trait]
|
||||
pub trait TriggerDdlManager: Send + Sync {
|
||||
async fn create_trigger(
|
||||
&self,
|
||||
create_trigger_task: CreateTriggerTask,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
ddl_context: DdlContext,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type TriggerDdlManagerRef = Arc<dyn TriggerDdlManager>;
|
||||
|
||||
macro_rules! procedure_loader_entry {
|
||||
($procedure:ident) => {
|
||||
(
|
||||
@@ -100,10 +125,13 @@ impl DdlManager {
|
||||
ddl_context: DdlContext,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
register_loaders: bool,
|
||||
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
ddl_context,
|
||||
procedure_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
};
|
||||
if register_loaders {
|
||||
manager.register_loaders()?;
|
||||
@@ -669,6 +697,28 @@ async fn handle_create_flow_task(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
async fn handle_create_trigger_task(
|
||||
ddl_manager: &DdlManager,
|
||||
create_trigger_task: CreateTriggerTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let Some(m) = ddl_manager.trigger_ddl_manager.as_ref() else {
|
||||
return UnsupportedSnafu {
|
||||
operation: "create trigger",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
m.create_trigger(
|
||||
create_trigger_task,
|
||||
ddl_manager.procedure_manager.clone(),
|
||||
ddl_manager.ddl_context.clone(),
|
||||
query_context,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_alter_logical_table_tasks(
|
||||
ddl_manager: &DdlManager,
|
||||
alter_table_tasks: Vec<AlterTableTask>,
|
||||
@@ -777,6 +827,15 @@ impl ProcedureExecutor for DdlManager {
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
self,
|
||||
create_trigger_task,
|
||||
request.query_context.into(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
@@ -905,6 +964,8 @@ mod tests {
|
||||
},
|
||||
procedure_manager.clone(),
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
None,
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
|
||||
@@ -454,7 +454,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("Retry later"))]
|
||||
RetryLater { source: BoxedError },
|
||||
RetryLater {
|
||||
source: BoxedError,
|
||||
clean_poisons: bool,
|
||||
},
|
||||
|
||||
#[snafu(display("Abort procedure"))]
|
||||
AbortProcedure {
|
||||
@@ -812,6 +815,68 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: common_time::error::Error,
|
||||
},
|
||||
#[snafu(display("Invalid file path: {}", file_path))]
|
||||
InvalidFilePath {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
file_path: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize flexbuffers"))]
|
||||
SerializeFlexbuffers {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: flexbuffers::SerializationError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize flexbuffers"))]
|
||||
DeserializeFlexbuffers {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: flexbuffers::DeserializationError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read flexbuffers"))]
|
||||
ReadFlexbuffers {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: flexbuffers::ReaderError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid file name: {}", reason))]
|
||||
InvalidFileName {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
reason: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid file extension: {}", reason))]
|
||||
InvalidFileExtension {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
reason: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write object, file path: {}", file_path))]
|
||||
WriteObject {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
file_path: String,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read object, file path: {}", file_path))]
|
||||
ReadObject {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
file_path: String,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -834,6 +899,7 @@ impl ErrorExt for Error {
|
||||
ValueNotExist { .. } | ProcedurePoisonConflict { .. } => StatusCode::Unexpected,
|
||||
|
||||
Unsupported { .. } => StatusCode::Unsupported,
|
||||
WriteObject { .. } | ReadObject { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
SerdeJson { .. }
|
||||
| ParseOption { .. }
|
||||
@@ -867,7 +933,10 @@ impl ErrorExt for Error {
|
||||
| FromUtf8 { .. }
|
||||
| MetadataCorruption { .. }
|
||||
| ParseWalOptions { .. }
|
||||
| KafkaGetOffset { .. } => StatusCode::Unexpected,
|
||||
| KafkaGetOffset { .. }
|
||||
| ReadFlexbuffers { .. }
|
||||
| SerializeFlexbuffers { .. }
|
||||
| DeserializeFlexbuffers { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -884,7 +953,10 @@ impl ErrorExt for Error {
|
||||
| InvalidSetDatabaseOption { .. }
|
||||
| InvalidUnsetDatabaseOption { .. }
|
||||
| InvalidTopicNamePrefix { .. }
|
||||
| InvalidTimeZone { .. } => StatusCode::InvalidArguments,
|
||||
| InvalidTimeZone { .. }
|
||||
| InvalidFileExtension { .. }
|
||||
| InvalidFileName { .. }
|
||||
| InvalidFilePath { .. } => StatusCode::InvalidArguments,
|
||||
InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
@@ -970,6 +1042,7 @@ impl Error {
|
||||
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::RetryLater {
|
||||
source: BoxedError::new(err),
|
||||
clean_poisons: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -980,7 +1053,13 @@ impl Error {
|
||||
|
||||
/// Determine whether it needs to clean poisons.
|
||||
pub fn need_clean_poisons(&self) -> bool {
|
||||
matches!(self, Error::AbortProcedure { clean_poisons, .. } if *clean_poisons)
|
||||
matches!(
|
||||
self,
|
||||
Error::AbortProcedure { clean_poisons, .. } if *clean_poisons
|
||||
) || matches!(
|
||||
self,
|
||||
Error::RetryLater { clean_poisons, .. } if *clean_poisons
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns true if the response exceeds the size limit.
|
||||
|
||||
@@ -24,7 +24,7 @@ use table::table_name::TableName;
|
||||
|
||||
use crate::flow_name::FlowName;
|
||||
use crate::key::schema_name::SchemaName;
|
||||
use crate::key::FlowId;
|
||||
use crate::key::{FlowId, FlowPartitionId};
|
||||
use crate::peer::Peer;
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
@@ -184,14 +184,19 @@ pub enum CacheIdent {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct CreateFlow {
|
||||
/// The unique identifier for the flow.
|
||||
pub flow_id: FlowId,
|
||||
pub source_table_ids: Vec<TableId>,
|
||||
pub flownodes: Vec<Peer>,
|
||||
/// Mapping of flow partition to peer information
|
||||
pub partition_to_peer_mapping: Vec<(FlowPartitionId, Peer)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct DropFlow {
|
||||
pub flow_id: FlowId,
|
||||
pub source_table_ids: Vec<TableId>,
|
||||
pub flownode_ids: Vec<FlownodeId>,
|
||||
/// Mapping of flow partition to flownode id
|
||||
pub flow_part2node_id: Vec<(FlowPartitionId, FlownodeId)>,
|
||||
}
|
||||
|
||||
/// Flushes a batch of regions.
|
||||
|
||||
@@ -256,6 +256,11 @@ impl DatanodeTableManager {
|
||||
})?
|
||||
.and_then(|r| DatanodeTableValue::try_from_raw_value(&r.value))?
|
||||
.region_info;
|
||||
|
||||
// If the region options are the same, we don't need to update it.
|
||||
if region_info.region_options == new_region_options {
|
||||
return Ok(Txn::new());
|
||||
}
|
||||
// substitute region options only.
|
||||
region_info.region_options = new_region_options;
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
|
||||
/// The key of `__flow/` scope.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct FlowScoped<T> {
|
||||
inner: T,
|
||||
}
|
||||
@@ -246,27 +246,32 @@ impl FlowMetadataManager {
|
||||
new_flow_info: &FlowInfoValue,
|
||||
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
|
||||
) -> Result<()> {
|
||||
let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) =
|
||||
let (update_flow_flow_name_txn, on_create_flow_flow_name_failure) =
|
||||
self.flow_name_manager.build_update_txn(
|
||||
&new_flow_info.catalog_name,
|
||||
&new_flow_info.flow_name,
|
||||
flow_id,
|
||||
)?;
|
||||
|
||||
let (create_flow_txn, on_create_flow_failure) =
|
||||
let (update_flow_txn, on_create_flow_failure) =
|
||||
self.flow_info_manager
|
||||
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
|
||||
|
||||
let create_flow_routes_txn = self
|
||||
.flow_route_manager
|
||||
.build_create_txn(flow_id, flow_routes.clone())?;
|
||||
|
||||
let create_flownode_flow_txn = self
|
||||
.flownode_flow_manager
|
||||
.build_create_txn(flow_id, new_flow_info.flownode_ids().clone());
|
||||
|
||||
let create_table_flow_txn = self.table_flow_manager.build_create_txn(
|
||||
let update_flow_routes_txn = self.flow_route_manager.build_update_txn(
|
||||
flow_id,
|
||||
current_flow_info,
|
||||
flow_routes.clone(),
|
||||
)?;
|
||||
|
||||
let update_flownode_flow_txn = self.flownode_flow_manager.build_update_txn(
|
||||
flow_id,
|
||||
current_flow_info,
|
||||
new_flow_info.flownode_ids().clone(),
|
||||
);
|
||||
|
||||
let update_table_flow_txn = self.table_flow_manager.build_update_txn(
|
||||
flow_id,
|
||||
current_flow_info,
|
||||
flow_routes
|
||||
.into_iter()
|
||||
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
|
||||
@@ -275,11 +280,11 @@ impl FlowMetadataManager {
|
||||
)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![
|
||||
create_flow_flow_name_txn,
|
||||
create_flow_txn,
|
||||
create_flow_routes_txn,
|
||||
create_flownode_flow_txn,
|
||||
create_table_flow_txn,
|
||||
update_flow_flow_name_txn,
|
||||
update_flow_txn,
|
||||
update_flow_routes_txn,
|
||||
update_flownode_flow_txn,
|
||||
update_table_flow_txn,
|
||||
]);
|
||||
info!(
|
||||
"Creating flow {}.{}({}), with {} txn operations",
|
||||
@@ -783,6 +788,141 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata_diff_flownode() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
|
||||
let flow_id = 10;
|
||||
let flow_value = test_flow_info_value(
|
||||
"flow",
|
||||
[(0u32, 1u64), (1u32, 2u64)].into(),
|
||||
vec![1024, 1025, 1026],
|
||||
);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
0u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
1,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let new_flow_value = {
|
||||
let mut tmp = flow_value.clone();
|
||||
tmp.raw_sql = "new".to_string();
|
||||
// move to different flownodes
|
||||
tmp.flownode_ids = [(0, 3u64), (1, 4u64)].into();
|
||||
tmp
|
||||
};
|
||||
let new_flow_routes = vec![
|
||||
(
|
||||
0u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(3),
|
||||
},
|
||||
),
|
||||
(
|
||||
1,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(4),
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
// Update flow instead
|
||||
flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&new_flow_value,
|
||||
new_flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let got = flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let routes = flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
routes,
|
||||
vec![
|
||||
(
|
||||
FlowRouteKey::new(flow_id, 0),
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(3),
|
||||
},
|
||||
),
|
||||
(
|
||||
FlowRouteKey::new(flow_id, 1),
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(4),
|
||||
},
|
||||
),
|
||||
]
|
||||
);
|
||||
assert_eq!(got, new_flow_value);
|
||||
|
||||
let flows = flow_metadata_manager
|
||||
.flownode_flow_manager()
|
||||
.flows(1)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
// should moved to different flownode
|
||||
assert_eq!(flows, vec![]);
|
||||
|
||||
let flows = flow_metadata_manager
|
||||
.flownode_flow_manager()
|
||||
.flows(3)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(flows, vec![(flow_id, 0)]);
|
||||
|
||||
for table_id in [1024, 1025, 1026] {
|
||||
let nodes = flow_metadata_manager
|
||||
.table_flow_manager()
|
||||
.flows(table_id)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
nodes,
|
||||
vec![
|
||||
(
|
||||
TableFlowKey::new(table_id, 3, flow_id, 0),
|
||||
TableFlowValue {
|
||||
peer: Peer::empty(3)
|
||||
}
|
||||
),
|
||||
(
|
||||
TableFlowKey::new(table_id, 4, flow_id, 1),
|
||||
TableFlowValue {
|
||||
peer: Peer::empty(4)
|
||||
}
|
||||
)
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
@@ -153,6 +153,15 @@ impl FlowInfoValue {
|
||||
&self.flownode_ids
|
||||
}
|
||||
|
||||
/// Insert a new flownode id for a partition.
|
||||
pub fn insert_flownode_id(
|
||||
&mut self,
|
||||
partition: FlowPartitionId,
|
||||
node: FlownodeId,
|
||||
) -> Option<FlownodeId> {
|
||||
self.flownode_ids.insert(partition, node)
|
||||
}
|
||||
|
||||
/// Returns the `source_table`.
|
||||
pub fn source_table_ids(&self) -> &[TableId] {
|
||||
&self.source_table_ids
|
||||
@@ -272,10 +281,11 @@ impl FlowInfoManager {
|
||||
let raw_value = new_flow_value.try_as_raw_value()?;
|
||||
let prev_value = current_flow_value.get_raw_bytes();
|
||||
let txn = Txn::new()
|
||||
.when(vec![
|
||||
Compare::new(key.clone(), CompareOp::NotEqual, None),
|
||||
Compare::new(key.clone(), CompareOp::Equal, Some(prev_value)),
|
||||
])
|
||||
.when(vec![Compare::new(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
Some(prev_value),
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
|
||||
|
||||
@@ -205,7 +205,7 @@ impl FlowNameManager {
|
||||
catalog: &str,
|
||||
) -> BoxStream<'static, Result<(String, FlowNameValue)>> {
|
||||
let start_key = FlowNameKey::range_start_key(catalog);
|
||||
common_telemetry::debug!("flow_names: start_key: {:?}", start_key);
|
||||
common_telemetry::trace!("flow_names: start_key: {:?}", start_key);
|
||||
let req = RangeRequest::new().with_prefix(start_key);
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
|
||||
@@ -19,9 +19,12 @@ use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||
use crate::key::node_address::NodeAddressKey;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue,
|
||||
};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -39,7 +42,7 @@ lazy_static! {
|
||||
/// The key stores the route info of the flow.
|
||||
///
|
||||
/// The layout: `__flow/route/{flow_id}/{partition_id}`.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct FlowRouteKey(FlowScoped<FlowRouteKeyInner>);
|
||||
|
||||
impl FlowRouteKey {
|
||||
@@ -142,6 +145,12 @@ pub struct FlowRouteValue {
|
||||
pub(crate) peer: Peer,
|
||||
}
|
||||
|
||||
impl From<Peer> for FlowRouteValue {
|
||||
fn from(peer: Peer) -> Self {
|
||||
Self { peer }
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowRouteValue {
|
||||
/// Returns the `peer`.
|
||||
pub fn peer(&self) -> &Peer {
|
||||
@@ -204,6 +213,30 @@ impl FlowRouteManager {
|
||||
Ok(Txn::new().and_then(txns))
|
||||
}
|
||||
|
||||
/// Builds a update flow routes transaction.
|
||||
///
|
||||
/// Puts `__flow/route/{flow_id}/{partition_id}` keys.
|
||||
/// Also removes `__flow/route/{flow_id}/{old_partition_id}` keys.
|
||||
pub(crate) fn build_update_txn<I: IntoIterator<Item = (FlowPartitionId, FlowRouteValue)>>(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
flow_routes: I,
|
||||
) -> Result<Txn> {
|
||||
let del_txns = current_flow_info.flownode_ids().keys().map(|partition_id| {
|
||||
let key = FlowRouteKey::new(flow_id, *partition_id).to_bytes();
|
||||
Ok(TxnOp::Delete(key))
|
||||
});
|
||||
|
||||
let put_txns = flow_routes.into_iter().map(|(partition_id, route)| {
|
||||
let key = FlowRouteKey::new(flow_id, partition_id).to_bytes();
|
||||
|
||||
Ok(TxnOp::Put(key, route.try_as_raw_value()?))
|
||||
});
|
||||
let txns = del_txns.chain(put_txns).collect::<Result<Vec<_>>>()?;
|
||||
Ok(Txn::new().and_then(txns))
|
||||
}
|
||||
|
||||
async fn remap_flow_route_addresses(
|
||||
&self,
|
||||
flow_routes: &mut [(FlowRouteKey, FlowRouteValue)],
|
||||
|
||||
@@ -19,8 +19,9 @@ use regex::Regex;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
|
||||
use crate::key::{BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
@@ -165,6 +166,17 @@ impl FlownodeFlowManager {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Whether given flow exist on this flownode.
|
||||
pub async fn exists(
|
||||
&self,
|
||||
flownode_id: FlownodeId,
|
||||
flow_id: FlowId,
|
||||
partition_id: FlowPartitionId,
|
||||
) -> Result<bool> {
|
||||
let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes();
|
||||
Ok(self.kv_backend.get(&key).await?.is_some())
|
||||
}
|
||||
|
||||
/// Retrieves all [FlowId] and [FlowPartitionId]s of the specified `flownode_id`.
|
||||
pub fn flows(
|
||||
&self,
|
||||
@@ -202,6 +214,33 @@ impl FlownodeFlowManager {
|
||||
|
||||
Txn::new().and_then(txns)
|
||||
}
|
||||
|
||||
/// Builds a update flownode flow transaction.
|
||||
///
|
||||
/// Puts `__flownode_flow/{flownode_id}/{flow_id}/{partition_id}` keys.
|
||||
/// Remove the old `__flownode_flow/{old_flownode_id}/{flow_id}/{old_partition_id}` keys.
|
||||
pub(crate) fn build_update_txn<I: IntoIterator<Item = (FlowPartitionId, FlownodeId)>>(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
flownode_ids: I,
|
||||
) -> Txn {
|
||||
let del_txns =
|
||||
current_flow_info
|
||||
.flownode_ids()
|
||||
.iter()
|
||||
.map(|(partition_id, flownode_id)| {
|
||||
let key = FlownodeFlowKey::new(*flownode_id, flow_id, *partition_id).to_bytes();
|
||||
TxnOp::Delete(key)
|
||||
});
|
||||
let put_txns = flownode_ids.into_iter().map(|(partition_id, flownode_id)| {
|
||||
let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes();
|
||||
TxnOp::Put(key, vec![])
|
||||
});
|
||||
let txns = del_txns.chain(put_txns).collect::<Vec<_>>();
|
||||
|
||||
Txn::new().and_then(txns)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -22,9 +22,12 @@ use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||
use crate::key::node_address::NodeAddressKey;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue,
|
||||
};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -215,7 +218,7 @@ impl TableFlowManager {
|
||||
|
||||
/// Builds a create table flow transaction.
|
||||
///
|
||||
/// Puts `__flow/source_table/{table_id}/{node_id}/{partition_id}` keys.
|
||||
/// Puts `__flow/source_table/{table_id}/{node_id}/{flow_id}/{partition_id}` keys.
|
||||
pub fn build_create_txn(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
@@ -239,6 +242,44 @@ impl TableFlowManager {
|
||||
Ok(Txn::new().and_then(txns))
|
||||
}
|
||||
|
||||
/// Builds a update table flow transaction.
|
||||
///
|
||||
/// Puts `__flow/source_table/{table_id}/{node_id}/{flow_id}/{partition_id}` keys,
|
||||
/// Also remove previous
|
||||
/// `__flow/source_table/{table_id}/{old_node_id}/{flow_id}/{partition_id}` keys.
|
||||
pub fn build_update_txn(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
table_flow_values: Vec<(FlowPartitionId, TableFlowValue)>,
|
||||
source_table_ids: &[TableId],
|
||||
) -> Result<Txn> {
|
||||
let mut txns = Vec::with_capacity(2 * source_table_ids.len() * table_flow_values.len());
|
||||
|
||||
// first remove the old keys
|
||||
for (part_id, node_id) in current_flow_info.flownode_ids() {
|
||||
for source_table_id in current_flow_info.source_table_ids() {
|
||||
txns.push(TxnOp::Delete(
|
||||
TableFlowKey::new(*source_table_id, *node_id, flow_id, *part_id).to_bytes(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for (partition_id, table_flow_value) in table_flow_values {
|
||||
let flownode_id = table_flow_value.peer.id;
|
||||
let value = table_flow_value.try_as_raw_value()?;
|
||||
for source_table_id in source_table_ids {
|
||||
txns.push(TxnOp::Put(
|
||||
TableFlowKey::new(*source_table_id, flownode_id, flow_id, partition_id)
|
||||
.to_bytes(),
|
||||
value.clone(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Txn::new().and_then(txns))
|
||||
}
|
||||
|
||||
async fn remap_table_flow_addresses(
|
||||
&self,
|
||||
table_flows: &mut [(TableFlowKey, TableFlowValue)],
|
||||
|
||||
@@ -38,6 +38,14 @@ pub mod txn;
|
||||
pub mod util;
|
||||
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
||||
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
// The default meta table name, default is "greptime_metakv".
|
||||
pub const DEFAULT_META_TABLE_NAME: &str = "greptime_metakv";
|
||||
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
// The default lock id for election, default is 1.
|
||||
pub const DEFAULT_META_ELECTION_LOCK_ID: u64 = 1;
|
||||
|
||||
#[async_trait]
|
||||
pub trait KvBackend: TxnService
|
||||
where
|
||||
|
||||
@@ -308,10 +308,11 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv, test_kv_batch_delete, test_kv_batch_get, test_kv_compare_and_put,
|
||||
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2, test_txn_compare_equal,
|
||||
test_txn_compare_greater, test_txn_compare_less, test_txn_compare_not_equal,
|
||||
test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
prepare_kv, prepare_kv_with_prefix, test_kv_batch_delete, test_kv_batch_get,
|
||||
test_kv_compare_and_put, test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2,
|
||||
test_simple_kv_range, test_txn_compare_equal, test_txn_compare_greater,
|
||||
test_txn_compare_less, test_txn_compare_not_equal, test_txn_one_compare_op,
|
||||
text_txn_multi_compare_op, unprepare_kv,
|
||||
};
|
||||
|
||||
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
|
||||
@@ -380,4 +381,12 @@ mod tests {
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test_mem_all_range() {
|
||||
let kv_backend = MemoryKvBackend::<Error>::new();
|
||||
let prefix = b"";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_simple_kv_range(&kv_backend).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,12 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
const RDS_STORE_OP_BATCH_GET: &str = "batch_get";
|
||||
const RDS_STORE_OP_BATCH_PUT: &str = "batch_put";
|
||||
const RDS_STORE_OP_RANGE_QUERY: &str = "range_query";
|
||||
const RDS_STORE_OP_RANGE_DELETE: &str = "range_delete";
|
||||
const RDS_STORE_OP_BATCH_DELETE: &str = "batch_delete";
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
mod postgres;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
@@ -560,3 +566,21 @@ fn check_txn_ops(txn_ops: &[TxnOp]) -> Result<bool> {
|
||||
});
|
||||
Ok(same)
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! record_rds_sql_execute_elapsed {
|
||||
($result:expr, $label_store:expr,$label_op:expr,$label_type:expr) => {{
|
||||
let timer = std::time::Instant::now();
|
||||
$result
|
||||
.inspect(|_| {
|
||||
$crate::metrics::RDS_SQL_EXECUTE_ELAPSED
|
||||
.with_label_values(&[$label_store, "success", $label_op, $label_type])
|
||||
.observe(timer.elapsed().as_millis_f64())
|
||||
})
|
||||
.inspect_err(|_| {
|
||||
$crate::metrics::RDS_SQL_EXECUTE_ELAPSED
|
||||
.with_label_values(&[$label_store, "error", $label_op, $label_type])
|
||||
.observe(timer.elapsed().as_millis_f64());
|
||||
})
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -20,11 +20,13 @@ use snafu::ResultExt;
|
||||
use sqlx::mysql::MySqlRow;
|
||||
use sqlx::pool::Pool;
|
||||
use sqlx::{MySql, MySqlPool, Row, Transaction as MySqlTransaction};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use crate::error::{CreateMySqlPoolSnafu, MySqlExecutionSnafu, MySqlTransactionSnafu, Result};
|
||||
use crate::kv_backend::rds::{
|
||||
Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction,
|
||||
RDS_STORE_TXN_RETRY_COUNT,
|
||||
RDS_STORE_OP_BATCH_DELETE, RDS_STORE_OP_BATCH_GET, RDS_STORE_OP_BATCH_PUT,
|
||||
RDS_STORE_OP_RANGE_DELETE, RDS_STORE_OP_RANGE_QUERY, RDS_STORE_TXN_RETRY_COUNT,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{
|
||||
@@ -33,6 +35,8 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
const MYSQL_STORE_NAME: &str = "mysql_store";
|
||||
|
||||
type MySqlClient = Arc<Pool<MySql>>;
|
||||
pub struct MySqlTxnClient(MySqlTransaction<'static, MySql>);
|
||||
|
||||
@@ -47,7 +51,7 @@ fn key_value_from_row(row: MySqlRow) -> KeyValue {
|
||||
const EMPTY: &[u8] = &[0];
|
||||
|
||||
/// Type of range template.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[derive(Debug, Clone, Copy, AsRefStr)]
|
||||
enum RangeTemplateType {
|
||||
Point,
|
||||
Range,
|
||||
@@ -58,6 +62,8 @@ enum RangeTemplateType {
|
||||
|
||||
/// Builds params for the given range template type.
|
||||
impl RangeTemplateType {
|
||||
/// Builds the parameters for the given range template type.
|
||||
/// You can check out the conventions at [RangeRequest]
|
||||
fn build_params(&self, mut key: Vec<u8>, range_end: Vec<u8>) -> Vec<Vec<u8>> {
|
||||
match self {
|
||||
RangeTemplateType::Point => vec![key],
|
||||
@@ -160,7 +166,7 @@ impl<'a> MySqlTemplateFactory<'a> {
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM `{table_name}` WHERE k = ?"),
|
||||
range: format!("SELECT k, v FROM `{table_name}` WHERE k >= ? AND k < ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM `{table_name}` ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM `{table_name}` ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM `{table_name}` WHERE k >= ? ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM `{table_name}` WHERE k LIKE ? ORDER BY k"),
|
||||
},
|
||||
@@ -343,7 +349,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
RangeTemplate::with_limit(template, if req.limit == 0 { 0 } else { req.limit + 1 });
|
||||
let limit = req.limit as usize;
|
||||
debug!("query: {:?}, params: {:?}", query, params);
|
||||
let mut kvs = query_executor.query(&query, ¶ms_ref).await?;
|
||||
let mut kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&query, ¶ms_ref).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_RANGE_QUERY,
|
||||
template_type.as_ref()
|
||||
)?;
|
||||
if req.keys_only {
|
||||
kvs.iter_mut().for_each(|kv| kv.value = vec![]);
|
||||
}
|
||||
@@ -381,7 +392,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
|
||||
// Fast path: if we don't need previous kvs, we can just upsert the keys.
|
||||
if !req.prev_kv {
|
||||
query_executor.execute(&update, &values_params).await?;
|
||||
crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.execute(&update, &values_params).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_PUT,
|
||||
""
|
||||
)?;
|
||||
return Ok(BatchPutResponse::default());
|
||||
}
|
||||
// Should use transaction to ensure atomicity.
|
||||
@@ -392,7 +408,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
txn.commit().await?;
|
||||
return res;
|
||||
}
|
||||
let prev_kvs = query_executor.query(&select, &in_params).await?;
|
||||
let prev_kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&select, &in_params).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_PUT,
|
||||
""
|
||||
)?;
|
||||
query_executor.execute(&update, &values_params).await?;
|
||||
Ok(BatchPutResponse { prev_kvs })
|
||||
}
|
||||
@@ -409,7 +430,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
.sql_template_set
|
||||
.generate_batch_get_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
let kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&query, ¶ms).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_GET,
|
||||
""
|
||||
)?;
|
||||
Ok(BatchGetResponse { kvs })
|
||||
}
|
||||
|
||||
@@ -441,7 +467,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
let template = self.sql_template_set.delete_template.get(template_type);
|
||||
let params = template_type.build_params(req.key, req.range_end);
|
||||
let params_ref = params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
query_executor.execute(template, ¶ms_ref).await?;
|
||||
crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.execute(template, ¶ms_ref).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_RANGE_DELETE,
|
||||
template_type.as_ref()
|
||||
)?;
|
||||
let mut resp = DeleteRangeResponse::new(prev_kvs.len() as i64);
|
||||
if req.prev_kv {
|
||||
resp.with_prev_kvs(prev_kvs);
|
||||
@@ -463,7 +494,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
// Fast path: if we don't need previous kvs, we can just delete the keys.
|
||||
if !req.prev_kv {
|
||||
query_executor.execute(&query, ¶ms).await?;
|
||||
crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.execute(&query, ¶ms).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_DELETE,
|
||||
""
|
||||
)?;
|
||||
return Ok(BatchDeleteResponse::default());
|
||||
}
|
||||
// Should use transaction to ensure atomicity.
|
||||
@@ -483,7 +519,12 @@ impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
.await?
|
||||
.kvs;
|
||||
// Pure `DELETE` has no return value, so we need to use `execute` instead of `query`.
|
||||
query_executor.execute(&query, ¶ms).await?;
|
||||
crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.execute(&query, ¶ms).await,
|
||||
MYSQL_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_DELETE,
|
||||
""
|
||||
)?;
|
||||
if req.prev_kv {
|
||||
Ok(BatchDeleteResponse { prev_kvs })
|
||||
} else {
|
||||
@@ -538,10 +579,11 @@ mod tests {
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
|
||||
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
unprepare_kv,
|
||||
test_simple_kv_range, test_txn_compare_equal, test_txn_compare_greater,
|
||||
test_txn_compare_less, test_txn_compare_not_equal, test_txn_one_compare_op,
|
||||
text_txn_multi_compare_op, unprepare_kv,
|
||||
};
|
||||
use crate::maybe_skip_mysql_integration_test;
|
||||
|
||||
async fn build_mysql_kv_backend(table_name: &str) -> Option<MySqlStore> {
|
||||
init_default_ut_logging();
|
||||
@@ -568,6 +610,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_put() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("put_test").await.unwrap();
|
||||
let prefix = b"put/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -577,6 +620,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_range() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("range_test").await.unwrap();
|
||||
let prefix = b"range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -586,14 +630,26 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_range_2() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("range2_test").await.unwrap();
|
||||
let prefix = b"range2/";
|
||||
test_kv_range_2_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_all_range() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("simple_range_test").await.unwrap();
|
||||
let prefix = b"";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_simple_kv_range(&kv_backend).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_batch_get() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("batch_get_test").await.unwrap();
|
||||
let prefix = b"batch_get/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -603,6 +659,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_batch_delete() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("batch_delete_test").await.unwrap();
|
||||
let prefix = b"batch_delete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -612,6 +669,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_batch_delete_with_prefix() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("batch_delete_with_prefix_test")
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -623,6 +681,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_delete_range() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("delete_range_test").await.unwrap();
|
||||
let prefix = b"delete_range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -632,6 +691,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_compare_and_put() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("compare_and_put_test")
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -642,6 +702,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_txn() {
|
||||
maybe_skip_mysql_integration_test!();
|
||||
let kv_backend = build_mysql_kv_backend("txn_test").await.unwrap();
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
use common_telemetry::debug;
|
||||
use deadpool_postgres::{Config, Pool, Runtime};
|
||||
use snafu::ResultExt;
|
||||
use strum::AsRefStr;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{IsolationLevel, NoTls, Row};
|
||||
|
||||
@@ -27,7 +28,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::kv_backend::rds::{
|
||||
Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction,
|
||||
RDS_STORE_TXN_RETRY_COUNT,
|
||||
RDS_STORE_OP_BATCH_DELETE, RDS_STORE_OP_BATCH_GET, RDS_STORE_OP_BATCH_PUT,
|
||||
RDS_STORE_OP_RANGE_DELETE, RDS_STORE_OP_RANGE_QUERY, RDS_STORE_TXN_RETRY_COUNT,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{
|
||||
@@ -36,6 +38,8 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
const PG_STORE_NAME: &str = "pg_store";
|
||||
|
||||
pub struct PgClient(deadpool::managed::Object<deadpool_postgres::Manager>);
|
||||
pub struct PgTxnClient<'a>(deadpool_postgres::Transaction<'a>);
|
||||
|
||||
@@ -50,7 +54,7 @@ fn key_value_from_row(r: Row) -> KeyValue {
|
||||
const EMPTY: &[u8] = &[0];
|
||||
|
||||
/// Type of range template.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[derive(Debug, Clone, Copy, AsRefStr)]
|
||||
enum RangeTemplateType {
|
||||
Point,
|
||||
Range,
|
||||
@@ -61,6 +65,8 @@ enum RangeTemplateType {
|
||||
|
||||
/// Builds params for the given range template type.
|
||||
impl RangeTemplateType {
|
||||
/// Builds the parameters for the given range template type.
|
||||
/// You can check out the conventions at [RangeRequest]
|
||||
fn build_params(&self, mut key: Vec<u8>, range_end: Vec<u8>) -> Vec<Vec<u8>> {
|
||||
match self {
|
||||
RangeTemplateType::Point => vec![key],
|
||||
@@ -164,7 +170,7 @@ impl<'a> PgSqlTemplateFactory<'a> {
|
||||
range: format!(
|
||||
"SELECT k, v FROM \"{table_name}\" WHERE k >= $1 AND k < $2 ORDER BY k"
|
||||
),
|
||||
full: format!("SELECT k, v FROM \"{table_name}\" $1 ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM \"{table_name}\" ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM \"{table_name}\" WHERE k >= $1 ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM \"{table_name}\" WHERE k LIKE $1 ORDER BY k"),
|
||||
},
|
||||
@@ -358,7 +364,13 @@ impl KvQueryExecutor<PgClient> for PgStore {
|
||||
RangeTemplate::with_limit(template, if req.limit == 0 { 0 } else { req.limit + 1 });
|
||||
let limit = req.limit as usize;
|
||||
debug!("query: {:?}, params: {:?}", query, params);
|
||||
let mut kvs = query_executor.query(&query, ¶ms_ref).await?;
|
||||
let mut kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&query, ¶ms_ref).await,
|
||||
PG_STORE_NAME,
|
||||
RDS_STORE_OP_RANGE_QUERY,
|
||||
template_type.as_ref()
|
||||
)?;
|
||||
|
||||
if req.keys_only {
|
||||
kvs.iter_mut().for_each(|kv| kv.value = vec![]);
|
||||
}
|
||||
@@ -393,7 +405,13 @@ impl KvQueryExecutor<PgClient> for PgStore {
|
||||
let query = self
|
||||
.sql_template_set
|
||||
.generate_batch_upsert_query(req.kvs.len());
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
|
||||
let kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&query, ¶ms).await,
|
||||
PG_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_PUT,
|
||||
""
|
||||
)?;
|
||||
if req.prev_kv {
|
||||
Ok(BatchPutResponse { prev_kvs: kvs })
|
||||
} else {
|
||||
@@ -414,7 +432,12 @@ impl KvQueryExecutor<PgClient> for PgStore {
|
||||
.sql_template_set
|
||||
.generate_batch_get_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
let kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&query, ¶ms).await,
|
||||
PG_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_GET,
|
||||
""
|
||||
)?;
|
||||
Ok(BatchGetResponse { kvs })
|
||||
}
|
||||
|
||||
@@ -427,7 +450,12 @@ impl KvQueryExecutor<PgClient> for PgStore {
|
||||
let template = self.sql_template_set.delete_template.get(template_type);
|
||||
let params = template_type.build_params(req.key, req.range_end);
|
||||
let params_ref = params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(template, ¶ms_ref).await?;
|
||||
let kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(template, ¶ms_ref).await,
|
||||
PG_STORE_NAME,
|
||||
RDS_STORE_OP_RANGE_DELETE,
|
||||
template_type.as_ref()
|
||||
)?;
|
||||
let mut resp = DeleteRangeResponse::new(kvs.len() as i64);
|
||||
if req.prev_kv {
|
||||
resp.with_prev_kvs(kvs);
|
||||
@@ -447,7 +475,13 @@ impl KvQueryExecutor<PgClient> for PgStore {
|
||||
.sql_template_set
|
||||
.generate_batch_delete_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
|
||||
let kvs = crate::record_rds_sql_execute_elapsed!(
|
||||
query_executor.query(&query, ¶ms).await,
|
||||
PG_STORE_NAME,
|
||||
RDS_STORE_OP_BATCH_DELETE,
|
||||
""
|
||||
)?;
|
||||
if req.prev_kv {
|
||||
Ok(BatchDeleteResponse { prev_kvs: kvs })
|
||||
} else {
|
||||
@@ -511,10 +545,11 @@ mod tests {
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
|
||||
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
unprepare_kv,
|
||||
test_simple_kv_range, test_txn_compare_equal, test_txn_compare_greater,
|
||||
test_txn_compare_less, test_txn_compare_not_equal, test_txn_one_compare_op,
|
||||
text_txn_multi_compare_op, unprepare_kv,
|
||||
};
|
||||
use crate::maybe_skip_postgres_integration_test;
|
||||
|
||||
async fn build_pg_kv_backend(table_name: &str) -> Option<PgStore> {
|
||||
let endpoints = std::env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
|
||||
@@ -549,6 +584,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_put() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("put_test").await.unwrap();
|
||||
let prefix = b"put/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -558,6 +594,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_range() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("range_test").await.unwrap();
|
||||
let prefix = b"range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -567,14 +604,26 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_range_2() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("range2_test").await.unwrap();
|
||||
let prefix = b"range2/";
|
||||
test_kv_range_2_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_all_range() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("simple_range_test").await.unwrap();
|
||||
let prefix = b"";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_simple_kv_range(&kv_backend).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_batch_get() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("batch_get_test").await.unwrap();
|
||||
let prefix = b"batch_get/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -584,6 +633,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_batch_delete() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("batch_delete_test").await.unwrap();
|
||||
let prefix = b"batch_delete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -593,6 +643,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_batch_delete_with_prefix() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("batch_delete_with_prefix_test")
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -604,6 +655,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_delete_range() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("delete_range_test").await.unwrap();
|
||||
let prefix = b"delete_range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
@@ -613,6 +665,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_compare_and_put() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("compare_and_put_test").await.unwrap();
|
||||
let prefix = b"compare_and_put/";
|
||||
let kv_backend = Arc::new(kv_backend);
|
||||
@@ -621,6 +674,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_txn() {
|
||||
maybe_skip_postgres_integration_test!();
|
||||
let kv_backend = build_pg_kv_backend("txn_test").await.unwrap();
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
|
||||
@@ -108,6 +108,44 @@ pub async fn test_kv_range(kv_backend: &impl KvBackend) {
|
||||
test_kv_range_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_simple_kv_range(kvbackend: &impl KvBackend) {
|
||||
{
|
||||
let full_query = RangeRequest::new().with_range(vec![0], vec![0]);
|
||||
let response = kvbackend.range(full_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 4);
|
||||
}
|
||||
{
|
||||
let point_query = RangeRequest::new().with_range(b"key11".to_vec(), vec![]);
|
||||
let response = kvbackend.range(point_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 1);
|
||||
}
|
||||
{
|
||||
let left_bounded_query = RangeRequest::new().with_range(b"key1".to_vec(), vec![0]);
|
||||
let response = kvbackend.range(left_bounded_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 4);
|
||||
}
|
||||
{
|
||||
let range_query = RangeRequest::new().with_range(b"key1".to_vec(), b"key11".to_vec());
|
||||
let response = kvbackend.range(range_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 1);
|
||||
}
|
||||
{
|
||||
let prefix_query = RangeRequest::new().with_range(b"key1".to_vec(), b"key2".to_vec());
|
||||
let response = kvbackend.range(prefix_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 2);
|
||||
}
|
||||
{
|
||||
let range_query = RangeRequest::new().with_range(b"key10".to_vec(), b"key100".to_vec());
|
||||
let response = kvbackend.range(range_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 0);
|
||||
}
|
||||
{
|
||||
let prefix_query = RangeRequest::new().with_range(b"key10".to_vec(), b"key11".to_vec());
|
||||
let response = kvbackend.range(prefix_query).await.unwrap();
|
||||
assert_eq!(response.kvs.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
let key = [prefix.clone(), b"key1".to_vec()].concat();
|
||||
let key11 = [prefix.clone(), b"key11".to_vec()].concat();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user