mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 13:22:57 +00:00
Compare commits
44 Commits
rr_part_6
...
release/v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e46efb3d6c | ||
|
|
34af9580e0 | ||
|
|
b19d23d665 | ||
|
|
209f15dd51 | ||
|
|
0829fb204c | ||
|
|
c8e470e8ed | ||
|
|
f66803622d | ||
|
|
e7774437b8 | ||
|
|
c272b25456 | ||
|
|
724b802018 | ||
|
|
f3ca5f5d7f | ||
|
|
6c672b96bf | ||
|
|
83018d6670 | ||
|
|
69f1cbd484 | ||
|
|
e1dad69648 | ||
|
|
6c976bc737 | ||
|
|
b20c1ac797 | ||
|
|
d7cfb741a5 | ||
|
|
1b3efef15c | ||
|
|
1ca2dbd240 | ||
|
|
d596dba240 | ||
|
|
5c9cbb5f4c | ||
|
|
e2df38d0d1 | ||
|
|
66e2242e46 | ||
|
|
489b16ae30 | ||
|
|
85d564b0fb | ||
|
|
d5026f3491 | ||
|
|
e30753fc31 | ||
|
|
b476584f56 | ||
|
|
ff3a46b1d0 | ||
|
|
a533ac2555 | ||
|
|
cc5629b4a1 | ||
|
|
f3d000f6ec | ||
|
|
9557b76224 | ||
|
|
a0900f5b90 | ||
|
|
45a05fb08c | ||
|
|
71db79c8d6 | ||
|
|
79ed7bbc44 | ||
|
|
02e9a66d7a | ||
|
|
55cadcd2c0 | ||
|
|
8c4796734a | ||
|
|
919956999b | ||
|
|
7e5f6cbeae | ||
|
|
5c07f0dec7 |
@@ -1,15 +0,0 @@
|
|||||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
|
||||||
language: "en-US"
|
|
||||||
early_access: false
|
|
||||||
reviews:
|
|
||||||
profile: "chill"
|
|
||||||
request_changes_workflow: false
|
|
||||||
high_level_summary: true
|
|
||||||
poem: true
|
|
||||||
review_status: true
|
|
||||||
collapse_walkthrough: false
|
|
||||||
auto_review:
|
|
||||||
enabled: false
|
|
||||||
drafts: false
|
|
||||||
chat:
|
|
||||||
auto_reply: true
|
|
||||||
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DEV_BUILDER_IMAGE_TAG=$1
|
||||||
|
|
||||||
|
update_dev_builder_version() {
|
||||||
|
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||||
|
echo "Error: Should specify the dev-builder image tag"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email greptimedb-ci@greptime.com
|
||||||
|
git config --global user.name greptimedb-ci
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update the dev-builder image tag in the Makefile.
|
||||||
|
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add Makefile
|
||||||
|
git commit -m "ci: update dev-builder image tag"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "ci: update dev-builder image tag" \
|
||||||
|
--body "This PR updates the dev-builder image tag" \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_dev_builder_version
|
||||||
@@ -24,11 +24,19 @@ on:
|
|||||||
description: Release dev-builder-android image
|
description: Release dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
|
update_dev_builder_image_tag:
|
||||||
|
type: boolean
|
||||||
|
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
# The jobs are triggered by the following events:
|
||||||
|
# 1. Manually triggered workflow_dispatch event
|
||||||
|
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
version: ${{ steps.set-version.outputs.version }}
|
||||||
@@ -57,9 +65,9 @@ jobs:
|
|||||||
version: ${{ env.VERSION }}
|
version: ${{ env.VERSION }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
|
|
||||||
release-dev-builder-images-ecr:
|
release-dev-builder-images-ecr:
|
||||||
name: Release dev builder images to AWS ECR
|
name: Release dev builder images to AWS ECR
|
||||||
@@ -85,7 +93,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -106,7 +114,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -127,7 +135,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -162,7 +170,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -176,7 +184,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -190,7 +198,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -201,3 +209,24 @@ jobs:
|
|||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|
||||||
|
update-dev-builder-image-tag:
|
||||||
|
name: Update dev-builder image tag
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||||
|
needs: [
|
||||||
|
release-dev-builder-images
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Update dev-builder image tag
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|||||||
892
Cargo.lock
generated
892
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -68,7 +68,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.14.0"
|
version = "0.14.4"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -112,15 +112,15 @@ clap = { version = "4.4", features = ["derive"] }
|
|||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "6.1"
|
dashmap = "6.1"
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
deadpool = "0.12"
|
deadpool = "0.12"
|
||||||
deadpool-postgres = "0.14"
|
deadpool-postgres = "0.14"
|
||||||
derive_builder = "0.20"
|
derive_builder = "0.20"
|
||||||
@@ -129,7 +129,7 @@ etcd-client = "0.14"
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e82b0158cd38d4021edb4e4c0ae77f999051e62f" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "4d4136692fe7fbbd509ebc8c902f6afcc0ce61e4" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
@@ -161,8 +161,10 @@ parquet = { version = "54.2", default-features = false, features = ["arrow", "as
|
|||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.5.1", features = ["ser"] }
|
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "0410e8b459dda7cb222ce9596f8bf3971bd07bd2", features = [
|
||||||
prost = "0.13"
|
"ser",
|
||||||
|
] }
|
||||||
|
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.9"
|
rand = "0.9"
|
||||||
ratelimit = "0.10"
|
ratelimit = "0.10"
|
||||||
@@ -191,7 +193,7 @@ simd-json = "0.15"
|
|||||||
similar-asserts = "1.6.0"
|
similar-asserts = "1.6.0"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e98e6b322426a9d397a71efef17075966223c089", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
"serde",
|
"serde",
|
||||||
] } # branch = "v0.54.x"
|
] } # branch = "v0.54.x"
|
||||||
|
|||||||
@@ -319,6 +319,7 @@
|
|||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
|
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
|||||||
@@ -50,6 +50,10 @@ use_memory_store = false
|
|||||||
## - Using shared storage (e.g., s3).
|
## - Using shared storage (e.g., s3).
|
||||||
enable_region_failover = false
|
enable_region_failover = false
|
||||||
|
|
||||||
|
## Whether to allow region failover on local WAL.
|
||||||
|
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||||
|
allow_region_failover_on_local_wal = false
|
||||||
|
|
||||||
## Max allowed idle time before removing node info from metasrv memory.
|
## Max allowed idle time before removing node info from metasrv memory.
|
||||||
node_max_idle_time = "24hours"
|
node_max_idle_time = "24hours"
|
||||||
|
|
||||||
|
|||||||
@@ -4,15 +4,21 @@
|
|||||||
|
|
||||||
This repository maintains the Grafana dashboards for GreptimeDB. It has two types of dashboards:
|
This repository maintains the Grafana dashboards for GreptimeDB. It has two types of dashboards:
|
||||||
|
|
||||||
- `cluster/`: The dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
|
- `cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
|
||||||
- `standalone/`: The dashboard for the standalone GreptimeDB instance. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
|
- `standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
|
||||||
|
|
||||||
As the rapid development of GreptimeDB, the metrics may be changed, and please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
As the rapid development of GreptimeDB, the metrics may be changed, and please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||||
|
|
||||||
To maintain the dashboards, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
|
**NOTE**:
|
||||||
|
|
||||||
|
- The Grafana version should be greater than 9.0.
|
||||||
|
|
||||||
|
- If you want to modify the dashboards, you only need to modify the `cluster/dashboard.json` and run the `make dashboards` command to generate the `standalone/dashboard.json` and other related files.
|
||||||
|
|
||||||
|
To maintain the dashboards easily, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
|
||||||
|
|
||||||
- `cluster/dashboard.yaml`: The intermediate dashboard for the GreptimeDB cluster.
|
- `cluster/dashboard.yaml`: The intermediate dashboard for the GreptimeDB cluster.
|
||||||
- `standalone/dashboard.yaml`: The intermediatedashboard for the standalone GreptimeDB instance.
|
- `standalone/dashboard.yaml`: The intermediate dashboard for the standalone GreptimeDB instance.
|
||||||
|
|
||||||
## Data Sources
|
## Data Sources
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,96 +1,97 @@
|
|||||||
# Overview
|
# Overview
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `s` | `prometheus` | `__auto` |
|
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
|
||||||
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | -- | `mysql` | -- |
|
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
|
||||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `rowsps` | `prometheus` | `__auto` |
|
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
|
||||||
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `decbytes` | `mysql` | -- |
|
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
|
||||||
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `sishort` | `mysql` | -- |
|
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
|
||||||
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | -- | `mysql` | -- |
|
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
|
||||||
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | -- | `mysql` | -- |
|
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
|
||||||
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `decbytes` | `mysql` | -- |
|
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
|
||||||
# Ingestion
|
# Ingestion
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `ingestion` |
|
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
|
||||||
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `http-logs` |
|
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
|
||||||
# Queries
|
# Queries
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `reqps` | `prometheus` | `mysql` |
|
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
|
||||||
# Resources
|
# Resources
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{instance}}]-[{{ pod }}]` |
|
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
# Frontend Requests
|
# Frontend Requests
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
||||||
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||||
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
||||||
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||||
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `s` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
||||||
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||||
# Frontend to Datanode
|
# Frontend to Datanode
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||||
# Mito Engine
|
# Mito Engine
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
||||||
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `ops` | `prometheus` | `[{{ instance }}]-[{{pod}}]` |
|
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `bytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||||
# OpenDAL
|
# OpenDAL
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
|
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||||
# Metasrv
|
# Metasrv
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `none` | `prometheus` | `from-datanode-{{datanode_id}}` |
|
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `none` | `prometheus` | `__auto` |
|
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `none` | `prometheus` | `__auto` |
|
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||||
# Flownode
|
# Flownode
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | -- | `prometheus` | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
||||||
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-p95` |
|
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
|
||||||
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
||||||
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}]` |
|
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
|
||||||
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
||||||
|
|||||||
@@ -426,7 +426,6 @@ groups:
|
|||||||
- title: Write Stall per Instance
|
- title: Write Stall per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Write Stall per Instance.
|
description: Write Stall per Instance.
|
||||||
unit: decbytes
|
|
||||||
queries:
|
queries:
|
||||||
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})
|
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})
|
||||||
datasource:
|
datasource:
|
||||||
@@ -658,13 +657,22 @@ groups:
|
|||||||
- title: Opendal traffic
|
- title: Opendal traffic
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Total traffic as in bytes by instance and operation
|
description: Total traffic as in bytes by instance and operation
|
||||||
unit: ops
|
unit: decbytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))
|
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||||
|
- title: OpenDAL errors per Instance
|
||||||
|
type: timeseries
|
||||||
|
description: OpenDAL error counts per Instance.
|
||||||
|
queries:
|
||||||
|
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||||
- title: Metasrv
|
- title: Metasrv
|
||||||
panels:
|
panels:
|
||||||
- title: Region migration datanode
|
- title: Region migration datanode
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,96 +1,97 @@
|
|||||||
# Overview
|
# Overview
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `s` | `prometheus` | `__auto` |
|
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
|
||||||
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | -- | `mysql` | -- |
|
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
|
||||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `rowsps` | `prometheus` | `__auto` |
|
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
|
||||||
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `decbytes` | `mysql` | -- |
|
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
|
||||||
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `sishort` | `mysql` | -- |
|
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
|
||||||
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | -- | `mysql` | -- |
|
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
|
||||||
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | -- | `mysql` | -- |
|
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
|
||||||
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `decbytes` | `mysql` | -- |
|
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
|
||||||
# Ingestion
|
# Ingestion
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `ingestion` |
|
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
|
||||||
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `http-logs` |
|
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
|
||||||
# Queries
|
# Queries
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `reqps` | `prometheus` | `mysql` |
|
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
|
||||||
# Resources
|
# Resources
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{instance}}]-[{{ pod }}]` |
|
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
# Frontend Requests
|
# Frontend Requests
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
||||||
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||||
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
||||||
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||||
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `s` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
||||||
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||||
# Frontend to Datanode
|
# Frontend to Datanode
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||||
# Mito Engine
|
# Mito Engine
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
||||||
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `ops` | `prometheus` | `[{{ instance }}]-[{{pod}}]` |
|
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `bytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||||
# OpenDAL
|
# OpenDAL
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
|
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||||
# Metasrv
|
# Metasrv
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `none` | `prometheus` | `from-datanode-{{datanode_id}}` |
|
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `none` | `prometheus` | `__auto` |
|
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `none` | `prometheus` | `__auto` |
|
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||||
# Flownode
|
# Flownode
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | -- | `prometheus` | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
||||||
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-p95` |
|
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
|
||||||
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
||||||
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}]` |
|
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
|
||||||
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
||||||
|
|||||||
@@ -426,7 +426,6 @@ groups:
|
|||||||
- title: Write Stall per Instance
|
- title: Write Stall per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Write Stall per Instance.
|
description: Write Stall per Instance.
|
||||||
unit: decbytes
|
|
||||||
queries:
|
queries:
|
||||||
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{})
|
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{})
|
||||||
datasource:
|
datasource:
|
||||||
@@ -658,13 +657,22 @@ groups:
|
|||||||
- title: Opendal traffic
|
- title: Opendal traffic
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Total traffic as in bytes by instance and operation
|
description: Total traffic as in bytes by instance and operation
|
||||||
unit: ops
|
unit: decbytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))
|
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||||
|
- title: OpenDAL errors per Instance
|
||||||
|
type: timeseries
|
||||||
|
description: OpenDAL error counts per Instance.
|
||||||
|
queries:
|
||||||
|
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||||
- title: Metasrv
|
- title: Metasrv
|
||||||
panels:
|
panels:
|
||||||
- title: Region migration datanode
|
- title: Region migration datanode
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/cluster}
|
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/cluster}
|
||||||
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/standalone}
|
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/standalone}
|
||||||
DAC_IMAGE=ghcr.io/zyy17/dac:20250422-c9435ce
|
DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
|
||||||
|
|
||||||
remove_instance_filters() {
|
remove_instance_filters() {
|
||||||
# Remove the instance filters for the standalone dashboards.
|
# Remove the instance filters for the standalone dashboards.
|
||||||
@@ -10,8 +10,15 @@ remove_instance_filters() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
generate_intermediate_dashboards_and_docs() {
|
generate_intermediate_dashboards_and_docs() {
|
||||||
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} -i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json -o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml -m > $CLUSTER_DASHBOARD_DIR/dashboard.md
|
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
|
||||||
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} -i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json -o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml -m > $STANDALONE_DASHBOARD_DIR/dashboard.md
|
-i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json \
|
||||||
|
-o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml \
|
||||||
|
-m /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.md
|
||||||
|
|
||||||
|
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
|
||||||
|
-i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json \
|
||||||
|
-o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml \
|
||||||
|
-m /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.md
|
||||||
}
|
}
|
||||||
|
|
||||||
remove_instance_filters
|
remove_instance_filters
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
|
|||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::RecordBatchStreamWrapper;
|
use common_recordbatch::RecordBatchStreamWrapper;
|
||||||
use common_telemetry::error;
|
|
||||||
use common_telemetry::tracing_context::W3cTrace;
|
use common_telemetry::tracing_context::W3cTrace;
|
||||||
|
use common_telemetry::{error, warn};
|
||||||
use futures::future;
|
use futures::future;
|
||||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
@@ -192,6 +192,36 @@ impl Database {
|
|||||||
from_grpc_response(response)
|
from_grpc_response(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
|
||||||
|
/// is `max_retries * GRPC_CONN_TIMEOUT`
|
||||||
|
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
|
||||||
|
let mut client = make_database_client(&self.client)?.inner;
|
||||||
|
let mut retries = 0;
|
||||||
|
let request = self.to_rpc_request(request);
|
||||||
|
loop {
|
||||||
|
let raw_response = client.handle(request.clone()).await;
|
||||||
|
match (raw_response, retries < max_retries) {
|
||||||
|
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
|
||||||
|
(Err(err), true) => {
|
||||||
|
// determine if the error is retryable
|
||||||
|
if is_grpc_retryable(&err) {
|
||||||
|
// retry
|
||||||
|
retries += 1;
|
||||||
|
warn!("Retrying {} times with error = {:?}", retries, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Err(err), false) => {
|
||||||
|
error!(
|
||||||
|
"Failed to send request to grpc handle after {} retries, error = {:?}",
|
||||||
|
retries, err
|
||||||
|
);
|
||||||
|
return Err(err.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||||
GreptimeRequest {
|
GreptimeRequest {
|
||||||
@@ -368,6 +398,11 @@ impl Database {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// by grpc standard, only `Unavailable` is retryable, see: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc
|
||||||
|
pub fn is_grpc_retryable(err: &tonic::Status) -> bool {
|
||||||
|
matches!(err.code(), tonic::Code::Unavailable)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone)]
|
#[derive(Default, Debug, Clone)]
|
||||||
struct FlightContext {
|
struct FlightContext {
|
||||||
auth_header: Option<AuthHeader>,
|
auth_header: Option<AuthHeader>,
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -131,8 +132,8 @@ impl SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Default, Parser)]
|
||||||
struct StartCommand {
|
pub struct StartCommand {
|
||||||
/// The address to bind the gRPC server.
|
/// The address to bind the gRPC server.
|
||||||
#[clap(long, alias = "bind-addr")]
|
#[clap(long, alias = "bind-addr")]
|
||||||
rpc_bind_addr: Option<String>,
|
rpc_bind_addr: Option<String>,
|
||||||
@@ -171,8 +172,29 @@ struct StartCommand {
|
|||||||
backend: Option<BackendImpl>,
|
backend: Option<BackendImpl>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for StartCommand {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("StartCommand")
|
||||||
|
.field("rpc_bind_addr", &self.rpc_bind_addr)
|
||||||
|
.field("rpc_server_addr", &self.rpc_server_addr)
|
||||||
|
.field("store_addrs", &self.sanitize_store_addrs())
|
||||||
|
.field("config_file", &self.config_file)
|
||||||
|
.field("selector", &self.selector)
|
||||||
|
.field("use_memory_store", &self.use_memory_store)
|
||||||
|
.field("enable_region_failover", &self.enable_region_failover)
|
||||||
|
.field("http_addr", &self.http_addr)
|
||||||
|
.field("http_timeout", &self.http_timeout)
|
||||||
|
.field("env_prefix", &self.env_prefix)
|
||||||
|
.field("data_home", &self.data_home)
|
||||||
|
.field("store_key_prefix", &self.store_key_prefix)
|
||||||
|
.field("max_txn_ops", &self.max_txn_ops)
|
||||||
|
.field("backend", &self.backend)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||||
let mut opts = MetasrvOptions::load_layered_options(
|
let mut opts = MetasrvOptions::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
@@ -184,6 +206,15 @@ impl StartCommand {
|
|||||||
Ok(opts)
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn sanitize_store_addrs(&self) -> Option<Vec<String>> {
|
||||||
|
self.store_addrs.as_ref().map(|addrs| {
|
||||||
|
addrs
|
||||||
|
.iter()
|
||||||
|
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// The precedence order is: cli > config file > environment variables > default values.
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
fn merge_with_cli_options(
|
fn merge_with_cli_options(
|
||||||
&self,
|
&self,
|
||||||
@@ -261,7 +292,7 @@ impl StartCommand {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
pub async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
||||||
common_runtime::init_global_runtimes(&opts.runtime);
|
common_runtime::init_global_runtimes(&opts.runtime);
|
||||||
|
|
||||||
let guard = common_telemetry::init_global_logging(
|
let guard = common_telemetry::init_global_logging(
|
||||||
|
|||||||
@@ -56,8 +56,8 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
|
|||||||
use datanode::region_server::RegionServer;
|
use datanode::region_server::RegionServer;
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
use flow::{
|
use flow::{
|
||||||
FlowConfig, FlowStreamingEngine, FlownodeBuilder, FlownodeInstance, FlownodeOptions,
|
FlowConfig, FlownodeBuilder, FlownodeInstance, FlownodeOptions, FrontendClient,
|
||||||
FrontendClient, FrontendInvoker, GrpcQueryHandlerWithBoxedError,
|
FrontendInvoker, GrpcQueryHandlerWithBoxedError, StreamingEngine,
|
||||||
};
|
};
|
||||||
use frontend::frontend::{Frontend, FrontendOptions};
|
use frontend::frontend::{Frontend, FrontendOptions};
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
@@ -544,9 +544,9 @@ impl StartCommand {
|
|||||||
|
|
||||||
// set the ref to query for the local flow state
|
// set the ref to query for the local flow state
|
||||||
{
|
{
|
||||||
let flow_worker_manager = flownode.flow_engine().streaming_engine();
|
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||||
information_extension
|
information_extension
|
||||||
.set_flow_worker_manager(flow_worker_manager)
|
.set_flow_streaming_engine(flow_streaming_engine)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -615,10 +615,10 @@ impl StartCommand {
|
|||||||
.replace(weak_grpc_handler);
|
.replace(weak_grpc_handler);
|
||||||
|
|
||||||
// set the frontend invoker for flownode
|
// set the frontend invoker for flownode
|
||||||
let flow_worker_manager = flownode.flow_engine().streaming_engine();
|
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||||
// flow server need to be able to use frontend to write insert requests back
|
// flow server need to be able to use frontend to write insert requests back
|
||||||
let invoker = FrontendInvoker::build_from(
|
let invoker = FrontendInvoker::build_from(
|
||||||
flow_worker_manager.clone(),
|
flow_streaming_engine.clone(),
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
@@ -627,7 +627,7 @@ impl StartCommand {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(error::StartFlownodeSnafu)?;
|
.context(error::StartFlownodeSnafu)?;
|
||||||
flow_worker_manager.set_frontend_invoker(invoker).await;
|
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
||||||
|
|
||||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||||
.context(error::ServersSnafu)?;
|
.context(error::ServersSnafu)?;
|
||||||
@@ -703,7 +703,7 @@ pub struct StandaloneInformationExtension {
|
|||||||
region_server: RegionServer,
|
region_server: RegionServer,
|
||||||
procedure_manager: ProcedureManagerRef,
|
procedure_manager: ProcedureManagerRef,
|
||||||
start_time_ms: u64,
|
start_time_ms: u64,
|
||||||
flow_worker_manager: RwLock<Option<Arc<FlowStreamingEngine>>>,
|
flow_streaming_engine: RwLock<Option<Arc<StreamingEngine>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StandaloneInformationExtension {
|
impl StandaloneInformationExtension {
|
||||||
@@ -712,14 +712,14 @@ impl StandaloneInformationExtension {
|
|||||||
region_server,
|
region_server,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||||
flow_worker_manager: RwLock::new(None),
|
flow_streaming_engine: RwLock::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the flow worker manager for the standalone instance.
|
/// Set the flow streaming engine for the standalone instance.
|
||||||
pub async fn set_flow_worker_manager(&self, flow_worker_manager: Arc<FlowStreamingEngine>) {
|
pub async fn set_flow_streaming_engine(&self, flow_streaming_engine: Arc<StreamingEngine>) {
|
||||||
let mut guard = self.flow_worker_manager.write().await;
|
let mut guard = self.flow_streaming_engine.write().await;
|
||||||
*guard = Some(flow_worker_manager);
|
*guard = Some(flow_streaming_engine);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -798,7 +798,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
|||||||
|
|
||||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
self.flow_worker_manager
|
self.flow_streaming_engine
|
||||||
.read()
|
.read()
|
||||||
.await
|
.await
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
|||||||
@@ -74,6 +74,7 @@ fn test_load_datanode_example_config() {
|
|||||||
RegionEngineConfig::File(FileEngineConfig {}),
|
RegionEngineConfig::File(FileEngineConfig {}),
|
||||||
RegionEngineConfig::Metric(MetricEngineConfig {
|
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||||
experimental_sparse_primary_key_encoding: false,
|
experimental_sparse_primary_key_encoding: false,
|
||||||
|
flush_metadata_region_interval: Duration::from_secs(30),
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
@@ -216,6 +217,7 @@ fn test_load_standalone_example_config() {
|
|||||||
RegionEngineConfig::File(FileEngineConfig {}),
|
RegionEngineConfig::File(FileEngineConfig {}),
|
||||||
RegionEngineConfig::Metric(MetricEngineConfig {
|
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||||
experimental_sparse_primary_key_encoding: false,
|
experimental_sparse_primary_key_encoding: false,
|
||||||
|
flush_metadata_region_interval: Duration::from_secs(30),
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
storage: StorageConfig {
|
storage: StorageConfig {
|
||||||
|
|||||||
@@ -13,10 +13,8 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
mod greatest;
|
|
||||||
mod to_unixtime;
|
mod to_unixtime;
|
||||||
|
|
||||||
use greatest::GreatestFunction;
|
|
||||||
use to_unixtime::ToUnixtimeFunction;
|
use to_unixtime::ToUnixtimeFunction;
|
||||||
|
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
@@ -26,6 +24,5 @@ pub(crate) struct TimestampFunction;
|
|||||||
impl TimestampFunction {
|
impl TimestampFunction {
|
||||||
pub fn register(registry: &FunctionRegistry) {
|
pub fn register(registry: &FunctionRegistry) {
|
||||||
registry.register(Arc::new(ToUnixtimeFunction));
|
registry.register(Arc::new(ToUnixtimeFunction));
|
||||||
registry.register(Arc::new(GreatestFunction));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,328 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::fmt::{self};
|
|
||||||
|
|
||||||
use common_query::error::{
|
|
||||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
|
||||||
use common_query::prelude::{Signature, Volatility};
|
|
||||||
use datafusion::arrow::compute::kernels::cmp::gt;
|
|
||||||
use datatypes::arrow::array::AsArray;
|
|
||||||
use datatypes::arrow::compute::cast;
|
|
||||||
use datatypes::arrow::compute::kernels::zip;
|
|
||||||
use datatypes::arrow::datatypes::{
|
|
||||||
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
|
|
||||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
|
|
||||||
};
|
|
||||||
use datatypes::prelude::ConcreteDataType;
|
|
||||||
use datatypes::types::TimestampType;
|
|
||||||
use datatypes::vectors::{Helper, VectorRef};
|
|
||||||
use snafu::{ensure, ResultExt};
|
|
||||||
|
|
||||||
use crate::function::{Function, FunctionContext};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
|
||||||
pub struct GreatestFunction;
|
|
||||||
|
|
||||||
const NAME: &str = "greatest";
|
|
||||||
|
|
||||||
macro_rules! gt_time_types {
|
|
||||||
($ty: ident, $columns:expr) => {{
|
|
||||||
let column1 = $columns[0].to_arrow_array();
|
|
||||||
let column2 = $columns[1].to_arrow_array();
|
|
||||||
|
|
||||||
let column1 = column1.as_primitive::<$ty>();
|
|
||||||
let column2 = column2.as_primitive::<$ty>();
|
|
||||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
|
||||||
|
|
||||||
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
|
||||||
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Function for GreatestFunction {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
NAME
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
|
||||||
ensure!(
|
|
||||||
input_types.len() == 2,
|
|
||||||
InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the args is not correct, expect exactly two, have: {}",
|
|
||||||
input_types.len()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
match &input_types[0] {
|
|
||||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
|
|
||||||
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
|
|
||||||
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
|
|
||||||
_ => UnsupportedInputDataTypeSnafu {
|
|
||||||
function: NAME,
|
|
||||||
datatypes: input_types,
|
|
||||||
}
|
|
||||||
.fail(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature(&self) -> Signature {
|
|
||||||
Signature::uniform(
|
|
||||||
2,
|
|
||||||
vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::date_datatype(),
|
|
||||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_second_datatype(),
|
|
||||||
],
|
|
||||||
Volatility::Immutable,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
ensure!(
|
|
||||||
columns.len() == 2,
|
|
||||||
InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the args is not correct, expect exactly two, have: {}",
|
|
||||||
columns.len()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
match columns[0].data_type() {
|
|
||||||
ConcreteDataType::String(_) => {
|
|
||||||
let column1 = cast(
|
|
||||||
&columns[0].to_arrow_array(),
|
|
||||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
|
||||||
)
|
|
||||||
.context(ArrowComputeSnafu)?;
|
|
||||||
let column1 = column1.as_primitive::<TimestampMillisecondType>();
|
|
||||||
let column2 = cast(
|
|
||||||
&columns[1].to_arrow_array(),
|
|
||||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
|
||||||
)
|
|
||||||
.context(ArrowComputeSnafu)?;
|
|
||||||
let column2 = column2.as_primitive::<TimestampMillisecondType>();
|
|
||||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
|
||||||
let result =
|
|
||||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
|
||||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
|
||||||
}
|
|
||||||
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
|
|
||||||
ConcreteDataType::Timestamp(ts_type) => match ts_type {
|
|
||||||
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
|
|
||||||
TimestampType::Millisecond(_) => {
|
|
||||||
gt_time_types!(TimestampMillisecondType, columns)
|
|
||||||
}
|
|
||||||
TimestampType::Microsecond(_) => {
|
|
||||||
gt_time_types!(TimestampMicrosecondType, columns)
|
|
||||||
}
|
|
||||||
TimestampType::Nanosecond(_) => {
|
|
||||||
gt_time_types!(TimestampNanosecondType, columns)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ => UnsupportedInputDataTypeSnafu {
|
|
||||||
function: NAME,
|
|
||||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
|
||||||
}
|
|
||||||
.fail(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for GreatestFunction {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "GREATEST")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_time::timestamp::TimeUnit;
|
|
||||||
use common_time::{Date, Timestamp};
|
|
||||||
use datatypes::types::{
|
|
||||||
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
|
|
||||||
TimestampSecondType,
|
|
||||||
};
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use datatypes::vectors::{
|
|
||||||
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
|
||||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
|
||||||
};
|
|
||||||
use paste::paste;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
#[test]
|
|
||||||
fn test_greatest_takes_string_vector() {
|
|
||||||
let function = GreatestFunction;
|
|
||||||
assert_eq!(
|
|
||||||
function
|
|
||||||
.return_type(&[
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::string_datatype()
|
|
||||||
])
|
|
||||||
.unwrap(),
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
|
||||||
);
|
|
||||||
let columns = vec![
|
|
||||||
Arc::new(StringVector::from(vec![
|
|
||||||
"1970-01-01".to_string(),
|
|
||||||
"2012-12-23".to_string(),
|
|
||||||
])) as _,
|
|
||||||
Arc::new(StringVector::from(vec![
|
|
||||||
"2001-02-01".to_string(),
|
|
||||||
"1999-01-01".to_string(),
|
|
||||||
])) as _,
|
|
||||||
];
|
|
||||||
|
|
||||||
let result = function
|
|
||||||
.eval(&FunctionContext::default(), &columns)
|
|
||||||
.unwrap();
|
|
||||||
let result = result
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<TimestampMillisecondVector>()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(result.len(), 2);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(0),
|
|
||||||
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(1),
|
|
||||||
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_greatest_takes_date_vector() {
|
|
||||||
let function = GreatestFunction;
|
|
||||||
assert_eq!(
|
|
||||||
function
|
|
||||||
.return_type(&[
|
|
||||||
ConcreteDataType::date_datatype(),
|
|
||||||
ConcreteDataType::date_datatype()
|
|
||||||
])
|
|
||||||
.unwrap(),
|
|
||||||
ConcreteDataType::Date(DateType)
|
|
||||||
);
|
|
||||||
|
|
||||||
let columns = vec![
|
|
||||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
|
||||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
|
||||||
];
|
|
||||||
|
|
||||||
let result = function
|
|
||||||
.eval(&FunctionContext::default(), &columns)
|
|
||||||
.unwrap();
|
|
||||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
|
||||||
assert_eq!(result.len(), 2);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(0),
|
|
||||||
Value::Date(Date::from_str_utc("1970-01-01").unwrap())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(1),
|
|
||||||
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_greatest_takes_datetime_vector() {
|
|
||||||
let function = GreatestFunction;
|
|
||||||
assert_eq!(
|
|
||||||
function
|
|
||||||
.return_type(&[
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
|
||||||
])
|
|
||||||
.unwrap(),
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
|
||||||
);
|
|
||||||
|
|
||||||
let columns = vec![
|
|
||||||
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
|
|
||||||
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
|
|
||||||
];
|
|
||||||
|
|
||||||
let result = function
|
|
||||||
.eval(&FunctionContext::default(), &columns)
|
|
||||||
.unwrap();
|
|
||||||
let result = result
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<TimestampMillisecondVector>()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(result.len(), 2);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(0),
|
|
||||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(1),
|
|
||||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! test_timestamp {
|
|
||||||
($type: expr,$unit: ident) => {
|
|
||||||
paste! {
|
|
||||||
#[test]
|
|
||||||
fn [<test_greatest_takes_ $unit:lower _vector>]() {
|
|
||||||
let function = GreatestFunction;
|
|
||||||
assert_eq!(
|
|
||||||
function.return_type(&[$type, $type]).unwrap(),
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
|
|
||||||
);
|
|
||||||
|
|
||||||
let columns = vec![
|
|
||||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
|
|
||||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
|
|
||||||
];
|
|
||||||
|
|
||||||
let result = function.eval(&FunctionContext::default(), &columns).unwrap();
|
|
||||||
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
|
|
||||||
assert_eq!(result.len(), 2);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(0),
|
|
||||||
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
result.get(1),
|
|
||||||
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test_timestamp!(
|
|
||||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
|
||||||
Nanosecond
|
|
||||||
);
|
|
||||||
test_timestamp!(
|
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
|
||||||
Microsecond
|
|
||||||
);
|
|
||||||
test_timestamp!(
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
Millisecond
|
|
||||||
);
|
|
||||||
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
|
|
||||||
}
|
|
||||||
@@ -115,6 +115,13 @@ impl Function for UddSketchCalcFunction {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Check if the sketch is empty, if so, return null
|
||||||
|
// This is important to avoid panics when calling estimate_quantile on an empty sketch
|
||||||
|
// In practice, this will happen if input is all null
|
||||||
|
if sketch.bucket_iter().count() == 0 {
|
||||||
|
builder.push_null();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
// Compute the estimated quantile from the sketch
|
// Compute the estimated quantile from the sketch
|
||||||
let result = sketch.estimate_quantile(perc);
|
let result = sketch.estimate_quantile(perc);
|
||||||
builder.push(Some(result));
|
builder.push(Some(result));
|
||||||
|
|||||||
@@ -18,4 +18,5 @@ pub mod flight;
|
|||||||
pub mod precision;
|
pub mod precision;
|
||||||
pub mod select;
|
pub mod select;
|
||||||
|
|
||||||
|
pub use arrow_flight::FlightData;
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
|
|||||||
169
src/common/meta/src/cache/flow/table_flownode.rs
vendored
169
src/common/meta/src/cache/flow/table_flownode.rs
vendored
@@ -24,21 +24,39 @@ use crate::cache::{CacheContainer, Initializer};
|
|||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||||
use crate::key::flow::{TableFlowManager, TableFlowManagerRef};
|
use crate::key::flow::{TableFlowManager, TableFlowManagerRef};
|
||||||
|
use crate::key::{FlowId, FlowPartitionId};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::FlownodeId;
|
|
||||||
|
|
||||||
type FlownodeSet = Arc<HashMap<FlownodeId, Peer>>;
|
/// Flow id&flow partition key
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||||
|
pub struct FlowIdent {
|
||||||
|
pub flow_id: FlowId,
|
||||||
|
pub partition_id: FlowPartitionId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlowIdent {
|
||||||
|
pub fn new(flow_id: FlowId, partition_id: FlowPartitionId) -> Self {
|
||||||
|
Self {
|
||||||
|
flow_id,
|
||||||
|
partition_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// cache for TableFlowManager, the table_id part is in the outer cache
|
||||||
|
/// include flownode_id, flow_id, partition_id mapping to Peer
|
||||||
|
type FlownodeFlowSet = Arc<HashMap<FlowIdent, Peer>>;
|
||||||
|
|
||||||
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
|
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
|
||||||
|
|
||||||
/// [TableFlownodeSetCache] caches the [TableId] to [FlownodeSet] mapping.
|
/// [TableFlownodeSetCache] caches the [TableId] to [FlownodeSet] mapping.
|
||||||
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeSet, CacheIdent>;
|
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeFlowSet, CacheIdent>;
|
||||||
|
|
||||||
/// Constructs a [TableFlownodeSetCache].
|
/// Constructs a [TableFlownodeSetCache].
|
||||||
pub fn new_table_flownode_set_cache(
|
pub fn new_table_flownode_set_cache(
|
||||||
name: String,
|
name: String,
|
||||||
cache: Cache<TableId, FlownodeSet>,
|
cache: Cache<TableId, FlownodeFlowSet>,
|
||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
) -> TableFlownodeSetCache {
|
) -> TableFlownodeSetCache {
|
||||||
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
||||||
@@ -47,7 +65,7 @@ pub fn new_table_flownode_set_cache(
|
|||||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
|
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeFlowSet> {
|
||||||
Arc::new(move |&table_id| {
|
Arc::new(move |&table_id| {
|
||||||
let table_flow_manager = table_flow_manager.clone();
|
let table_flow_manager = table_flow_manager.clone();
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
@@ -57,7 +75,12 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
|||||||
.map(|flows| {
|
.map(|flows| {
|
||||||
flows
|
flows
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(key, value)| (key.flownode_id(), value.peer))
|
.map(|(key, value)| {
|
||||||
|
(
|
||||||
|
FlowIdent::new(key.flow_id(), key.partition_id()),
|
||||||
|
value.peer,
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect::<HashMap<_, _>>()
|
.collect::<HashMap<_, _>>()
|
||||||
})
|
})
|
||||||
// We must cache the `HashSet` even if it's empty,
|
// We must cache the `HashSet` even if it's empty,
|
||||||
@@ -71,26 +94,33 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_create_flow(
|
async fn handle_create_flow(
|
||||||
cache: &Cache<TableId, FlownodeSet>,
|
cache: &Cache<TableId, FlownodeFlowSet>,
|
||||||
CreateFlow {
|
CreateFlow {
|
||||||
|
flow_id,
|
||||||
source_table_ids,
|
source_table_ids,
|
||||||
flownodes: flownode_peers,
|
partition_to_peer_mapping: flow_part2nodes,
|
||||||
}: &CreateFlow,
|
}: &CreateFlow,
|
||||||
) {
|
) {
|
||||||
for table_id in source_table_ids {
|
for table_id in source_table_ids {
|
||||||
let entry = cache.entry(*table_id);
|
let entry = cache.entry(*table_id);
|
||||||
entry
|
entry
|
||||||
.and_compute_with(
|
.and_compute_with(
|
||||||
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
|
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
|
||||||
Some(entry) => {
|
Some(entry) => {
|
||||||
let mut map = entry.into_value().as_ref().clone();
|
let mut map = entry.into_value().as_ref().clone();
|
||||||
map.extend(flownode_peers.iter().map(|peer| (peer.id, peer.clone())));
|
map.extend(
|
||||||
|
flow_part2nodes.iter().map(|(part, peer)| {
|
||||||
|
(FlowIdent::new(*flow_id, *part), peer.clone())
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
Op::Put(Arc::new(map))
|
Op::Put(Arc::new(map))
|
||||||
}
|
}
|
||||||
None => Op::Put(Arc::new(HashMap::from_iter(
|
None => {
|
||||||
flownode_peers.iter().map(|peer| (peer.id, peer.clone())),
|
Op::Put(Arc::new(HashMap::from_iter(flow_part2nodes.iter().map(
|
||||||
))),
|
|(part, peer)| (FlowIdent::new(*flow_id, *part), peer.clone()),
|
||||||
|
))))
|
||||||
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -98,21 +128,23 @@ async fn handle_create_flow(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_drop_flow(
|
async fn handle_drop_flow(
|
||||||
cache: &Cache<TableId, FlownodeSet>,
|
cache: &Cache<TableId, FlownodeFlowSet>,
|
||||||
DropFlow {
|
DropFlow {
|
||||||
|
flow_id,
|
||||||
source_table_ids,
|
source_table_ids,
|
||||||
flownode_ids,
|
flow_part2node_id,
|
||||||
}: &DropFlow,
|
}: &DropFlow,
|
||||||
) {
|
) {
|
||||||
for table_id in source_table_ids {
|
for table_id in source_table_ids {
|
||||||
let entry = cache.entry(*table_id);
|
let entry = cache.entry(*table_id);
|
||||||
entry
|
entry
|
||||||
.and_compute_with(
|
.and_compute_with(
|
||||||
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
|
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
|
||||||
Some(entry) => {
|
Some(entry) => {
|
||||||
let mut set = entry.into_value().as_ref().clone();
|
let mut set = entry.into_value().as_ref().clone();
|
||||||
for flownode_id in flownode_ids {
|
for (part, _node) in flow_part2node_id {
|
||||||
set.remove(flownode_id);
|
let key = FlowIdent::new(*flow_id, *part);
|
||||||
|
set.remove(&key);
|
||||||
}
|
}
|
||||||
|
|
||||||
Op::Put(Arc::new(set))
|
Op::Put(Arc::new(set))
|
||||||
@@ -128,7 +160,7 @@ async fn handle_drop_flow(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn invalidator<'a>(
|
fn invalidator<'a>(
|
||||||
cache: &'a Cache<TableId, FlownodeSet>,
|
cache: &'a Cache<TableId, FlownodeFlowSet>,
|
||||||
ident: &'a CacheIdent,
|
ident: &'a CacheIdent,
|
||||||
) -> BoxFuture<'a, Result<()>> {
|
) -> BoxFuture<'a, Result<()>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
@@ -154,7 +186,7 @@ mod tests {
|
|||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
use crate::cache::flow::table_flownode::{new_table_flownode_set_cache, FlowIdent};
|
||||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||||
use crate::key::flow::flow_info::FlowInfoValue;
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::flow_route::FlowRouteValue;
|
use crate::key::flow::flow_route::FlowRouteValue;
|
||||||
@@ -187,6 +219,7 @@ mod tests {
|
|||||||
},
|
},
|
||||||
flownode_ids: BTreeMap::from([(0, 1), (1, 2), (2, 3)]),
|
flownode_ids: BTreeMap::from([(0, 1), (1, 2), (2, 3)]),
|
||||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
query_context: None,
|
||||||
flow_name: "my_flow".to_string(),
|
flow_name: "my_flow".to_string(),
|
||||||
raw_sql: "sql".to_string(),
|
raw_sql: "sql".to_string(),
|
||||||
expire_after: Some(300),
|
expire_after: Some(300),
|
||||||
@@ -213,12 +246,16 @@ mod tests {
|
|||||||
let set = cache.get(1024).await.unwrap().unwrap();
|
let set = cache.get(1024).await.unwrap().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
set.as_ref().clone(),
|
set.as_ref().clone(),
|
||||||
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
|
HashMap::from_iter(
|
||||||
|
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
|
||||||
|
)
|
||||||
);
|
);
|
||||||
let set = cache.get(1025).await.unwrap().unwrap();
|
let set = cache.get(1025).await.unwrap().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
set.as_ref().clone(),
|
set.as_ref().clone(),
|
||||||
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
|
HashMap::from_iter(
|
||||||
|
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
|
||||||
|
)
|
||||||
);
|
);
|
||||||
let result = cache.get(1026).await.unwrap().unwrap();
|
let result = cache.get(1026).await.unwrap().unwrap();
|
||||||
assert_eq!(result.len(), 0);
|
assert_eq!(result.len(), 0);
|
||||||
@@ -230,8 +267,9 @@ mod tests {
|
|||||||
let cache = CacheBuilder::new(128).build();
|
let cache = CacheBuilder::new(128).build();
|
||||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||||
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
|
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id: 2001,
|
||||||
source_table_ids: vec![1024, 1025],
|
source_table_ids: vec![1024, 1025],
|
||||||
flownodes: (1..=5).map(Peer::empty).collect(),
|
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
|
||||||
})];
|
})];
|
||||||
cache.invalidate(&ident).await.unwrap();
|
cache.invalidate(&ident).await.unwrap();
|
||||||
let set = cache.get(1024).await.unwrap().unwrap();
|
let set = cache.get(1024).await.unwrap().unwrap();
|
||||||
@@ -240,6 +278,54 @@ mod tests {
|
|||||||
assert_eq!(set.len(), 5);
|
assert_eq!(set.len(), 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_replace_flow() {
|
||||||
|
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||||
|
let cache = CacheBuilder::new(128).build();
|
||||||
|
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||||
|
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id: 2001,
|
||||||
|
source_table_ids: vec![1024, 1025],
|
||||||
|
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
|
||||||
|
})];
|
||||||
|
cache.invalidate(&ident).await.unwrap();
|
||||||
|
let set = cache.get(1024).await.unwrap().unwrap();
|
||||||
|
assert_eq!(set.len(), 5);
|
||||||
|
let set = cache.get(1025).await.unwrap().unwrap();
|
||||||
|
assert_eq!(set.len(), 5);
|
||||||
|
|
||||||
|
let drop_then_create_flow = vec![
|
||||||
|
CacheIdent::DropFlow(DropFlow {
|
||||||
|
flow_id: 2001,
|
||||||
|
source_table_ids: vec![1024, 1025],
|
||||||
|
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
|
||||||
|
}),
|
||||||
|
CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id: 2001,
|
||||||
|
source_table_ids: vec![1026, 1027],
|
||||||
|
partition_to_peer_mapping: (11..=15)
|
||||||
|
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||||
|
.collect(),
|
||||||
|
}),
|
||||||
|
CacheIdent::FlowId(2001),
|
||||||
|
];
|
||||||
|
cache.invalidate(&drop_then_create_flow).await.unwrap();
|
||||||
|
|
||||||
|
let set = cache.get(1024).await.unwrap().unwrap();
|
||||||
|
assert!(set.is_empty());
|
||||||
|
|
||||||
|
let expected = HashMap::from_iter(
|
||||||
|
(11..=15).map(|i| (FlowIdent::new(2001, i as u32), Peer::empty(i + 1))),
|
||||||
|
);
|
||||||
|
let set = cache.get(1026).await.unwrap().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(set.as_ref().clone(), expected);
|
||||||
|
|
||||||
|
let set = cache.get(1027).await.unwrap().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(set.as_ref().clone(), expected);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_drop_flow() {
|
async fn test_drop_flow() {
|
||||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||||
@@ -247,34 +333,57 @@ mod tests {
|
|||||||
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
|
||||||
let ident = vec![
|
let ident = vec![
|
||||||
CacheIdent::CreateFlow(CreateFlow {
|
CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id: 2001,
|
||||||
source_table_ids: vec![1024, 1025],
|
source_table_ids: vec![1024, 1025],
|
||||||
flownodes: (1..=5).map(Peer::empty).collect(),
|
partition_to_peer_mapping: (1..=5)
|
||||||
|
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||||
|
.collect(),
|
||||||
}),
|
}),
|
||||||
CacheIdent::CreateFlow(CreateFlow {
|
CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id: 2002,
|
||||||
source_table_ids: vec![1024, 1025],
|
source_table_ids: vec![1024, 1025],
|
||||||
flownodes: (11..=12).map(Peer::empty).collect(),
|
partition_to_peer_mapping: (11..=12)
|
||||||
|
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||||
|
.collect(),
|
||||||
|
}),
|
||||||
|
// same flownode that hold multiple flows
|
||||||
|
CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id: 2003,
|
||||||
|
source_table_ids: vec![1024, 1025],
|
||||||
|
partition_to_peer_mapping: (1..=5)
|
||||||
|
.map(|i| (i as u32, Peer::empty(i + 1)))
|
||||||
|
.collect(),
|
||||||
}),
|
}),
|
||||||
];
|
];
|
||||||
cache.invalidate(&ident).await.unwrap();
|
cache.invalidate(&ident).await.unwrap();
|
||||||
let set = cache.get(1024).await.unwrap().unwrap();
|
let set = cache.get(1024).await.unwrap().unwrap();
|
||||||
assert_eq!(set.len(), 7);
|
assert_eq!(set.len(), 12);
|
||||||
let set = cache.get(1025).await.unwrap().unwrap();
|
let set = cache.get(1025).await.unwrap().unwrap();
|
||||||
assert_eq!(set.len(), 7);
|
assert_eq!(set.len(), 12);
|
||||||
|
|
||||||
let ident = vec![CacheIdent::DropFlow(DropFlow {
|
let ident = vec![CacheIdent::DropFlow(DropFlow {
|
||||||
|
flow_id: 2001,
|
||||||
source_table_ids: vec![1024, 1025],
|
source_table_ids: vec![1024, 1025],
|
||||||
flownode_ids: vec![1, 2, 3, 4, 5],
|
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
|
||||||
})];
|
})];
|
||||||
cache.invalidate(&ident).await.unwrap();
|
cache.invalidate(&ident).await.unwrap();
|
||||||
let set = cache.get(1024).await.unwrap().unwrap();
|
let set = cache.get(1024).await.unwrap().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
set.as_ref().clone(),
|
set.as_ref().clone(),
|
||||||
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
|
HashMap::from_iter(
|
||||||
|
(11..=12)
|
||||||
|
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
|
||||||
|
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
|
||||||
|
)
|
||||||
);
|
);
|
||||||
let set = cache.get(1025).await.unwrap().unwrap();
|
let set = cache.get(1025).await.unwrap().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
set.as_ref().clone(),
|
set.as_ref().clone(),
|
||||||
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
|
HashMap::from_iter(
|
||||||
|
(11..=12)
|
||||||
|
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
|
||||||
|
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
|
||||||
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,9 +16,12 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::flow_name::FlowName;
|
use crate::flow_name::FlowName;
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::{CacheIdent, DropFlow};
|
||||||
use crate::key::flow::flow_info::FlowInfoKey;
|
use crate::key::flow::flow_info::FlowInfoKey;
|
||||||
use crate::key::flow::flow_name::FlowNameKey;
|
use crate::key::flow::flow_name::FlowNameKey;
|
||||||
|
use crate::key::flow::flow_route::FlowRouteKey;
|
||||||
|
use crate::key::flow::flownode_flow::FlownodeFlowKey;
|
||||||
|
use crate::key::flow::table_flow::TableFlowKey;
|
||||||
use crate::key::schema_name::SchemaNameKey;
|
use crate::key::schema_name::SchemaNameKey;
|
||||||
use crate::key::table_info::TableInfoKey;
|
use crate::key::table_info::TableInfoKey;
|
||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
@@ -89,9 +92,40 @@ where
|
|||||||
let key: SchemaNameKey = schema_name.into();
|
let key: SchemaNameKey = schema_name.into();
|
||||||
self.invalidate_key(&key.to_bytes()).await;
|
self.invalidate_key(&key.to_bytes()).await;
|
||||||
}
|
}
|
||||||
CacheIdent::CreateFlow(_) | CacheIdent::DropFlow(_) => {
|
CacheIdent::CreateFlow(_) => {
|
||||||
// Do nothing
|
// Do nothing
|
||||||
}
|
}
|
||||||
|
CacheIdent::DropFlow(DropFlow {
|
||||||
|
flow_id,
|
||||||
|
source_table_ids,
|
||||||
|
flow_part2node_id,
|
||||||
|
}) => {
|
||||||
|
// invalidate flow route/flownode flow/table flow
|
||||||
|
let mut keys = Vec::with_capacity(
|
||||||
|
source_table_ids.len() * flow_part2node_id.len()
|
||||||
|
+ flow_part2node_id.len() * 2,
|
||||||
|
);
|
||||||
|
for table_id in source_table_ids {
|
||||||
|
for (partition_id, node_id) in flow_part2node_id {
|
||||||
|
let key =
|
||||||
|
TableFlowKey::new(*table_id, *node_id, *flow_id, *partition_id)
|
||||||
|
.to_bytes();
|
||||||
|
keys.push(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (partition_id, node_id) in flow_part2node_id {
|
||||||
|
let key =
|
||||||
|
FlownodeFlowKey::new(*node_id, *flow_id, *partition_id).to_bytes();
|
||||||
|
keys.push(key);
|
||||||
|
let key = FlowRouteKey::new(*flow_id, *partition_id).to_bytes();
|
||||||
|
keys.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
for key in keys {
|
||||||
|
self.invalidate_key(&key).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
CacheIdent::FlowName(FlowName {
|
CacheIdent::FlowName(FlowName {
|
||||||
catalog_name,
|
catalog_name,
|
||||||
flow_name,
|
flow_name,
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ use crate::cache_invalidator::Context;
|
|||||||
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
|
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
|
||||||
use crate::ddl::DdlContext;
|
use crate::ddl::DdlContext;
|
||||||
use crate::error::{self, Result, UnexpectedSnafu};
|
use crate::error::{self, Result, UnexpectedSnafu};
|
||||||
use crate::instruction::{CacheIdent, CreateFlow};
|
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||||
use crate::key::flow::flow_info::FlowInfoValue;
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::flow_route::FlowRouteValue;
|
use crate::key::flow::flow_route::FlowRouteValue;
|
||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
@@ -70,6 +70,7 @@ impl CreateFlowProcedure {
|
|||||||
query_context,
|
query_context,
|
||||||
state: CreateFlowState::Prepare,
|
state: CreateFlowState::Prepare,
|
||||||
prev_flow_info_value: None,
|
prev_flow_info_value: None,
|
||||||
|
did_replace: false,
|
||||||
flow_type: None,
|
flow_type: None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -224,6 +225,7 @@ impl CreateFlowProcedure {
|
|||||||
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
|
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
|
||||||
.await?;
|
.await?;
|
||||||
info!("Replaced flow metadata for flow {flow_id}");
|
info!("Replaced flow metadata for flow {flow_id}");
|
||||||
|
self.data.did_replace = true;
|
||||||
} else {
|
} else {
|
||||||
self.context
|
self.context
|
||||||
.flow_metadata_manager
|
.flow_metadata_manager
|
||||||
@@ -240,22 +242,43 @@ impl CreateFlowProcedure {
|
|||||||
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
|
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
|
||||||
// Safety: The flow id must be allocated.
|
// Safety: The flow id must be allocated.
|
||||||
let flow_id = self.data.flow_id.unwrap();
|
let flow_id = self.data.flow_id.unwrap();
|
||||||
|
let did_replace = self.data.did_replace;
|
||||||
let ctx = Context {
|
let ctx = Context {
|
||||||
subject: Some("Invalidate flow cache by creating flow".to_string()),
|
subject: Some("Invalidate flow cache by creating flow".to_string()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut caches = vec![];
|
||||||
|
|
||||||
|
// if did replaced, invalidate the flow cache with drop the old flow
|
||||||
|
if did_replace {
|
||||||
|
let old_flow_info = self.data.prev_flow_info_value.as_ref().unwrap();
|
||||||
|
|
||||||
|
// only drop flow is needed, since flow name haven't changed, and flow id already invalidated below
|
||||||
|
caches.extend([CacheIdent::DropFlow(DropFlow {
|
||||||
|
flow_id,
|
||||||
|
source_table_ids: old_flow_info.source_table_ids.clone(),
|
||||||
|
flow_part2node_id: old_flow_info.flownode_ids().clone().into_iter().collect(),
|
||||||
|
})]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (_flow_info, flow_routes) = (&self.data).into();
|
||||||
|
let flow_part2peers = flow_routes
|
||||||
|
.into_iter()
|
||||||
|
.map(|(part_id, route)| (part_id, route.peer))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
caches.extend([
|
||||||
|
CacheIdent::CreateFlow(CreateFlow {
|
||||||
|
flow_id,
|
||||||
|
source_table_ids: self.data.source_table_ids.clone(),
|
||||||
|
partition_to_peer_mapping: flow_part2peers,
|
||||||
|
}),
|
||||||
|
CacheIdent::FlowId(flow_id),
|
||||||
|
]);
|
||||||
|
|
||||||
self.context
|
self.context
|
||||||
.cache_invalidator
|
.cache_invalidator
|
||||||
.invalidate(
|
.invalidate(&ctx, &caches)
|
||||||
&ctx,
|
|
||||||
&[
|
|
||||||
CacheIdent::CreateFlow(CreateFlow {
|
|
||||||
source_table_ids: self.data.source_table_ids.clone(),
|
|
||||||
flownodes: self.data.peers.clone(),
|
|
||||||
}),
|
|
||||||
CacheIdent::FlowId(flow_id),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(Status::done_with_output(flow_id))
|
Ok(Status::done_with_output(flow_id))
|
||||||
@@ -377,6 +400,10 @@ pub struct CreateFlowData {
|
|||||||
/// For verify if prev value is consistent when need to update flow metadata.
|
/// For verify if prev value is consistent when need to update flow metadata.
|
||||||
/// only set when `or_replace` is true.
|
/// only set when `or_replace` is true.
|
||||||
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||||
|
/// Only set to true when replace actually happened.
|
||||||
|
/// This is used to determine whether to invalidate the cache.
|
||||||
|
#[serde(default)]
|
||||||
|
pub(crate) did_replace: bool,
|
||||||
pub(crate) flow_type: Option<FlowType>,
|
pub(crate) flow_type: Option<FlowType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -449,6 +476,7 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
|||||||
sink_table_name,
|
sink_table_name,
|
||||||
flownode_ids,
|
flownode_ids,
|
||||||
catalog_name,
|
catalog_name,
|
||||||
|
query_context: Some(value.query_context.clone()),
|
||||||
flow_name,
|
flow_name,
|
||||||
raw_sql: sql,
|
raw_sql: sql,
|
||||||
expire_after,
|
expire_after,
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
mod metadata;
|
mod metadata;
|
||||||
|
|
||||||
use api::v1::flow::{flow_request, DropRequest, FlowRequest};
|
use api::v1::flow::{flow_request, DropRequest, FlowRequest};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_catalog::format_full_flow_name;
|
use common_catalog::format_full_flow_name;
|
||||||
@@ -153,6 +154,12 @@ impl DropFlowProcedure {
|
|||||||
};
|
};
|
||||||
let flow_info_value = self.data.flow_info_value.as_ref().unwrap();
|
let flow_info_value = self.data.flow_info_value.as_ref().unwrap();
|
||||||
|
|
||||||
|
let flow_part2nodes = flow_info_value
|
||||||
|
.flownode_ids()
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
self.context
|
self.context
|
||||||
.cache_invalidator
|
.cache_invalidator
|
||||||
.invalidate(
|
.invalidate(
|
||||||
@@ -164,8 +171,9 @@ impl DropFlowProcedure {
|
|||||||
flow_name: flow_info_value.flow_name.to_string(),
|
flow_name: flow_info_value.flow_name.to_string(),
|
||||||
}),
|
}),
|
||||||
CacheIdent::DropFlow(DropFlow {
|
CacheIdent::DropFlow(DropFlow {
|
||||||
|
flow_id,
|
||||||
source_table_ids: flow_info_value.source_table_ids.clone(),
|
source_table_ids: flow_info_value.source_table_ids.clone(),
|
||||||
flownode_ids: flow_info_value.flownode_ids.values().cloned().collect(),
|
flow_part2node_id: flow_part2nodes,
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -514,11 +514,25 @@ pub enum Error {
|
|||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(
|
#[snafu(display(
|
||||||
"Failed to build a Kafka partition client, topic: {}, partition: {}",
|
"Failed to get a Kafka partition client, topic: {}, partition: {}",
|
||||||
topic,
|
topic,
|
||||||
partition
|
partition
|
||||||
))]
|
))]
|
||||||
BuildKafkaPartitionClient {
|
KafkaPartitionClient {
|
||||||
|
topic: String,
|
||||||
|
partition: i32,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: rskafka::client::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display(
|
||||||
|
"Failed to get offset from Kafka, topic: {}, partition: {}",
|
||||||
|
topic,
|
||||||
|
partition
|
||||||
|
))]
|
||||||
|
KafkaGetOffset {
|
||||||
topic: String,
|
topic: String,
|
||||||
partition: i32,
|
partition: i32,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -790,6 +804,14 @@ pub enum Error {
|
|||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
source: common_procedure::error::Error,
|
source: common_procedure::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to parse timezone"))]
|
||||||
|
InvalidTimeZone {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: common_time::error::Error,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -835,7 +857,7 @@ impl ErrorExt for Error {
|
|||||||
| EncodeWalOptions { .. }
|
| EncodeWalOptions { .. }
|
||||||
| BuildKafkaClient { .. }
|
| BuildKafkaClient { .. }
|
||||||
| BuildKafkaCtrlClient { .. }
|
| BuildKafkaCtrlClient { .. }
|
||||||
| BuildKafkaPartitionClient { .. }
|
| KafkaPartitionClient { .. }
|
||||||
| ResolveKafkaEndpoint { .. }
|
| ResolveKafkaEndpoint { .. }
|
||||||
| ProduceRecord { .. }
|
| ProduceRecord { .. }
|
||||||
| CreateKafkaWalTopic { .. }
|
| CreateKafkaWalTopic { .. }
|
||||||
@@ -844,7 +866,8 @@ impl ErrorExt for Error {
|
|||||||
| ProcedureOutput { .. }
|
| ProcedureOutput { .. }
|
||||||
| FromUtf8 { .. }
|
| FromUtf8 { .. }
|
||||||
| MetadataCorruption { .. }
|
| MetadataCorruption { .. }
|
||||||
| ParseWalOptions { .. } => StatusCode::Unexpected,
|
| ParseWalOptions { .. }
|
||||||
|
| KafkaGetOffset { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||||
|
|
||||||
@@ -861,7 +884,8 @@ impl ErrorExt for Error {
|
|||||||
| InvalidSetDatabaseOption { .. }
|
| InvalidSetDatabaseOption { .. }
|
||||||
| InvalidUnsetDatabaseOption { .. }
|
| InvalidUnsetDatabaseOption { .. }
|
||||||
| InvalidTopicNamePrefix { .. }
|
| InvalidTopicNamePrefix { .. }
|
||||||
| InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
|
| InvalidTimeZone { .. } => StatusCode::InvalidArguments,
|
||||||
|
InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||||
FlowRouteNotFound { .. } => StatusCode::Unexpected,
|
FlowRouteNotFound { .. } => StatusCode::Unexpected,
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use table::table_name::TableName;
|
|||||||
|
|
||||||
use crate::flow_name::FlowName;
|
use crate::flow_name::FlowName;
|
||||||
use crate::key::schema_name::SchemaName;
|
use crate::key::schema_name::SchemaName;
|
||||||
use crate::key::FlowId;
|
use crate::key::{FlowId, FlowPartitionId};
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::{DatanodeId, FlownodeId};
|
use crate::{DatanodeId, FlownodeId};
|
||||||
|
|
||||||
@@ -184,14 +184,19 @@ pub enum CacheIdent {
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
pub struct CreateFlow {
|
pub struct CreateFlow {
|
||||||
|
/// The unique identifier for the flow.
|
||||||
|
pub flow_id: FlowId,
|
||||||
pub source_table_ids: Vec<TableId>,
|
pub source_table_ids: Vec<TableId>,
|
||||||
pub flownodes: Vec<Peer>,
|
/// Mapping of flow partition to peer information
|
||||||
|
pub partition_to_peer_mapping: Vec<(FlowPartitionId, Peer)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
pub struct DropFlow {
|
pub struct DropFlow {
|
||||||
|
pub flow_id: FlowId,
|
||||||
pub source_table_ids: Vec<TableId>,
|
pub source_table_ids: Vec<TableId>,
|
||||||
pub flownode_ids: Vec<FlownodeId>,
|
/// Mapping of flow partition to flownode id
|
||||||
|
pub flow_part2node_id: Vec<(FlowPartitionId, FlownodeId)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flushes a batch of regions.
|
/// Flushes a batch of regions.
|
||||||
|
|||||||
@@ -256,6 +256,11 @@ impl DatanodeTableManager {
|
|||||||
})?
|
})?
|
||||||
.and_then(|r| DatanodeTableValue::try_from_raw_value(&r.value))?
|
.and_then(|r| DatanodeTableValue::try_from_raw_value(&r.value))?
|
||||||
.region_info;
|
.region_info;
|
||||||
|
|
||||||
|
// If the region options are the same, we don't need to update it.
|
||||||
|
if region_info.region_options == new_region_options {
|
||||||
|
return Ok(Txn::new());
|
||||||
|
}
|
||||||
// substitute region options only.
|
// substitute region options only.
|
||||||
region_info.region_options = new_region_options;
|
region_info.region_options = new_region_options;
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ use crate::kv_backend::KvBackendRef;
|
|||||||
use crate::rpc::store::BatchDeleteRequest;
|
use crate::rpc::store::BatchDeleteRequest;
|
||||||
|
|
||||||
/// The key of `__flow/` scope.
|
/// The key of `__flow/` scope.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub struct FlowScoped<T> {
|
pub struct FlowScoped<T> {
|
||||||
inner: T,
|
inner: T,
|
||||||
}
|
}
|
||||||
@@ -246,27 +246,32 @@ impl FlowMetadataManager {
|
|||||||
new_flow_info: &FlowInfoValue,
|
new_flow_info: &FlowInfoValue,
|
||||||
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
|
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) =
|
let (update_flow_flow_name_txn, on_create_flow_flow_name_failure) =
|
||||||
self.flow_name_manager.build_update_txn(
|
self.flow_name_manager.build_update_txn(
|
||||||
&new_flow_info.catalog_name,
|
&new_flow_info.catalog_name,
|
||||||
&new_flow_info.flow_name,
|
&new_flow_info.flow_name,
|
||||||
flow_id,
|
flow_id,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let (create_flow_txn, on_create_flow_failure) =
|
let (update_flow_txn, on_create_flow_failure) =
|
||||||
self.flow_info_manager
|
self.flow_info_manager
|
||||||
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
|
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
|
||||||
|
|
||||||
let create_flow_routes_txn = self
|
let update_flow_routes_txn = self.flow_route_manager.build_update_txn(
|
||||||
.flow_route_manager
|
|
||||||
.build_create_txn(flow_id, flow_routes.clone())?;
|
|
||||||
|
|
||||||
let create_flownode_flow_txn = self
|
|
||||||
.flownode_flow_manager
|
|
||||||
.build_create_txn(flow_id, new_flow_info.flownode_ids().clone());
|
|
||||||
|
|
||||||
let create_table_flow_txn = self.table_flow_manager.build_create_txn(
|
|
||||||
flow_id,
|
flow_id,
|
||||||
|
current_flow_info,
|
||||||
|
flow_routes.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let update_flownode_flow_txn = self.flownode_flow_manager.build_update_txn(
|
||||||
|
flow_id,
|
||||||
|
current_flow_info,
|
||||||
|
new_flow_info.flownode_ids().clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let update_table_flow_txn = self.table_flow_manager.build_update_txn(
|
||||||
|
flow_id,
|
||||||
|
current_flow_info,
|
||||||
flow_routes
|
flow_routes
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
|
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
|
||||||
@@ -275,11 +280,11 @@ impl FlowMetadataManager {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
let txn = Txn::merge_all(vec![
|
let txn = Txn::merge_all(vec![
|
||||||
create_flow_flow_name_txn,
|
update_flow_flow_name_txn,
|
||||||
create_flow_txn,
|
update_flow_txn,
|
||||||
create_flow_routes_txn,
|
update_flow_routes_txn,
|
||||||
create_flownode_flow_txn,
|
update_flownode_flow_txn,
|
||||||
create_table_flow_txn,
|
update_table_flow_txn,
|
||||||
]);
|
]);
|
||||||
info!(
|
info!(
|
||||||
"Creating flow {}.{}({}), with {} txn operations",
|
"Creating flow {}.{}({}), with {} txn operations",
|
||||||
@@ -452,6 +457,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
FlowInfoValue {
|
FlowInfoValue {
|
||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.to_string(),
|
||||||
|
query_context: None,
|
||||||
flow_name: flow_name.to_string(),
|
flow_name: flow_name.to_string(),
|
||||||
source_table_ids,
|
source_table_ids,
|
||||||
sink_table_name,
|
sink_table_name,
|
||||||
@@ -625,6 +631,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let flow_value = FlowInfoValue {
|
let flow_value = FlowInfoValue {
|
||||||
catalog_name: "greptime".to_string(),
|
catalog_name: "greptime".to_string(),
|
||||||
|
query_context: None,
|
||||||
flow_name: "flow".to_string(),
|
flow_name: "flow".to_string(),
|
||||||
source_table_ids: vec![1024, 1025, 1026],
|
source_table_ids: vec![1024, 1025, 1026],
|
||||||
sink_table_name: another_sink_table_name,
|
sink_table_name: another_sink_table_name,
|
||||||
@@ -781,6 +788,141 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_update_flow_metadata_diff_flownode() {
|
||||||
|
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||||
|
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
|
||||||
|
let flow_id = 10;
|
||||||
|
let flow_value = test_flow_info_value(
|
||||||
|
"flow",
|
||||||
|
[(0u32, 1u64), (1u32, 2u64)].into(),
|
||||||
|
vec![1024, 1025, 1026],
|
||||||
|
);
|
||||||
|
let flow_routes = vec![
|
||||||
|
(
|
||||||
|
0u32,
|
||||||
|
FlowRouteValue {
|
||||||
|
peer: Peer::empty(1),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
1,
|
||||||
|
FlowRouteValue {
|
||||||
|
peer: Peer::empty(2),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
];
|
||||||
|
flow_metadata_manager
|
||||||
|
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let new_flow_value = {
|
||||||
|
let mut tmp = flow_value.clone();
|
||||||
|
tmp.raw_sql = "new".to_string();
|
||||||
|
// move to different flownodes
|
||||||
|
tmp.flownode_ids = [(0, 3u64), (1, 4u64)].into();
|
||||||
|
tmp
|
||||||
|
};
|
||||||
|
let new_flow_routes = vec![
|
||||||
|
(
|
||||||
|
0u32,
|
||||||
|
FlowRouteValue {
|
||||||
|
peer: Peer::empty(3),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
1,
|
||||||
|
FlowRouteValue {
|
||||||
|
peer: Peer::empty(4),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
// Update flow instead
|
||||||
|
flow_metadata_manager
|
||||||
|
.update_flow_metadata(
|
||||||
|
flow_id,
|
||||||
|
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||||
|
&new_flow_value,
|
||||||
|
new_flow_routes.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let got = flow_metadata_manager
|
||||||
|
.flow_info_manager()
|
||||||
|
.get(flow_id)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let routes = flow_metadata_manager
|
||||||
|
.flow_route_manager()
|
||||||
|
.routes(flow_id)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
routes,
|
||||||
|
vec![
|
||||||
|
(
|
||||||
|
FlowRouteKey::new(flow_id, 0),
|
||||||
|
FlowRouteValue {
|
||||||
|
peer: Peer::empty(3),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
FlowRouteKey::new(flow_id, 1),
|
||||||
|
FlowRouteValue {
|
||||||
|
peer: Peer::empty(4),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
assert_eq!(got, new_flow_value);
|
||||||
|
|
||||||
|
let flows = flow_metadata_manager
|
||||||
|
.flownode_flow_manager()
|
||||||
|
.flows(1)
|
||||||
|
.try_collect::<Vec<_>>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
// should moved to different flownode
|
||||||
|
assert_eq!(flows, vec![]);
|
||||||
|
|
||||||
|
let flows = flow_metadata_manager
|
||||||
|
.flownode_flow_manager()
|
||||||
|
.flows(3)
|
||||||
|
.try_collect::<Vec<_>>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(flows, vec![(flow_id, 0)]);
|
||||||
|
|
||||||
|
for table_id in [1024, 1025, 1026] {
|
||||||
|
let nodes = flow_metadata_manager
|
||||||
|
.table_flow_manager()
|
||||||
|
.flows(table_id)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
nodes,
|
||||||
|
vec![
|
||||||
|
(
|
||||||
|
TableFlowKey::new(table_id, 3, flow_id, 0),
|
||||||
|
TableFlowValue {
|
||||||
|
peer: Peer::empty(3)
|
||||||
|
}
|
||||||
|
),
|
||||||
|
(
|
||||||
|
TableFlowKey::new(table_id, 4, flow_id, 1),
|
||||||
|
TableFlowValue {
|
||||||
|
peer: Peer::empty(4)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
|
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
|
||||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||||
@@ -864,6 +1006,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let flow_value = FlowInfoValue {
|
let flow_value = FlowInfoValue {
|
||||||
catalog_name: "greptime".to_string(),
|
catalog_name: "greptime".to_string(),
|
||||||
|
query_context: None,
|
||||||
flow_name: "flow".to_string(),
|
flow_name: "flow".to_string(),
|
||||||
source_table_ids: vec![1024, 1025, 1026],
|
source_table_ids: vec![1024, 1025, 1026],
|
||||||
sink_table_name: another_sink_table_name,
|
sink_table_name: another_sink_table_name,
|
||||||
|
|||||||
@@ -121,6 +121,13 @@ pub struct FlowInfoValue {
|
|||||||
pub(crate) flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
|
pub(crate) flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
|
||||||
/// The catalog name.
|
/// The catalog name.
|
||||||
pub(crate) catalog_name: String,
|
pub(crate) catalog_name: String,
|
||||||
|
/// The query context used when create flow.
|
||||||
|
/// Although flow doesn't belong to any schema, this query_context is needed to remember
|
||||||
|
/// the query context when `create_flow` is executed
|
||||||
|
/// for recovering flow using the same sql&query_context after db restart.
|
||||||
|
/// if none, should use default query context
|
||||||
|
#[serde(default)]
|
||||||
|
pub(crate) query_context: Option<crate::rpc::ddl::QueryContext>,
|
||||||
/// The flow name.
|
/// The flow name.
|
||||||
pub(crate) flow_name: String,
|
pub(crate) flow_name: String,
|
||||||
/// The raw sql.
|
/// The raw sql.
|
||||||
@@ -146,6 +153,15 @@ impl FlowInfoValue {
|
|||||||
&self.flownode_ids
|
&self.flownode_ids
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Insert a new flownode id for a partition.
|
||||||
|
pub fn insert_flownode_id(
|
||||||
|
&mut self,
|
||||||
|
partition: FlowPartitionId,
|
||||||
|
node: FlownodeId,
|
||||||
|
) -> Option<FlownodeId> {
|
||||||
|
self.flownode_ids.insert(partition, node)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `source_table`.
|
/// Returns the `source_table`.
|
||||||
pub fn source_table_ids(&self) -> &[TableId] {
|
pub fn source_table_ids(&self) -> &[TableId] {
|
||||||
&self.source_table_ids
|
&self.source_table_ids
|
||||||
@@ -155,6 +171,10 @@ impl FlowInfoValue {
|
|||||||
&self.catalog_name
|
&self.catalog_name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn query_context(&self) -> &Option<crate::rpc::ddl::QueryContext> {
|
||||||
|
&self.query_context
|
||||||
|
}
|
||||||
|
|
||||||
pub fn flow_name(&self) -> &String {
|
pub fn flow_name(&self) -> &String {
|
||||||
&self.flow_name
|
&self.flow_name
|
||||||
}
|
}
|
||||||
@@ -261,10 +281,11 @@ impl FlowInfoManager {
|
|||||||
let raw_value = new_flow_value.try_as_raw_value()?;
|
let raw_value = new_flow_value.try_as_raw_value()?;
|
||||||
let prev_value = current_flow_value.get_raw_bytes();
|
let prev_value = current_flow_value.get_raw_bytes();
|
||||||
let txn = Txn::new()
|
let txn = Txn::new()
|
||||||
.when(vec![
|
.when(vec![Compare::new(
|
||||||
Compare::new(key.clone(), CompareOp::NotEqual, None),
|
key.clone(),
|
||||||
Compare::new(key.clone(), CompareOp::Equal, Some(prev_value)),
|
CompareOp::Equal,
|
||||||
])
|
Some(prev_value),
|
||||||
|
)])
|
||||||
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
|
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
|
||||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||||
|
|
||||||
|
|||||||
@@ -19,9 +19,12 @@ use serde::{Deserialize, Serialize};
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||||
use crate::key::node_address::NodeAddressKey;
|
use crate::key::node_address::NodeAddressKey;
|
||||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
use crate::key::{
|
||||||
|
BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue,
|
||||||
|
};
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
@@ -39,7 +42,7 @@ lazy_static! {
|
|||||||
/// The key stores the route info of the flow.
|
/// The key stores the route info of the flow.
|
||||||
///
|
///
|
||||||
/// The layout: `__flow/route/{flow_id}/{partition_id}`.
|
/// The layout: `__flow/route/{flow_id}/{partition_id}`.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub struct FlowRouteKey(FlowScoped<FlowRouteKeyInner>);
|
pub struct FlowRouteKey(FlowScoped<FlowRouteKeyInner>);
|
||||||
|
|
||||||
impl FlowRouteKey {
|
impl FlowRouteKey {
|
||||||
@@ -142,6 +145,12 @@ pub struct FlowRouteValue {
|
|||||||
pub(crate) peer: Peer,
|
pub(crate) peer: Peer,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<Peer> for FlowRouteValue {
|
||||||
|
fn from(peer: Peer) -> Self {
|
||||||
|
Self { peer }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl FlowRouteValue {
|
impl FlowRouteValue {
|
||||||
/// Returns the `peer`.
|
/// Returns the `peer`.
|
||||||
pub fn peer(&self) -> &Peer {
|
pub fn peer(&self) -> &Peer {
|
||||||
@@ -204,6 +213,33 @@ impl FlowRouteManager {
|
|||||||
Ok(Txn::new().and_then(txns))
|
Ok(Txn::new().and_then(txns))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Builds a update flow routes transaction.
|
||||||
|
///
|
||||||
|
/// Puts `__flow/route/{flow_id}/{partition_id}` keys.
|
||||||
|
/// Also removes `__flow/route/{flow_id}/{old_partition_id}` keys.
|
||||||
|
pub(crate) fn build_update_txn<I: IntoIterator<Item = (FlowPartitionId, FlowRouteValue)>>(
|
||||||
|
&self,
|
||||||
|
flow_id: FlowId,
|
||||||
|
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||||
|
flow_routes: I,
|
||||||
|
) -> Result<Txn> {
|
||||||
|
let del_txns = current_flow_info
|
||||||
|
.flownode_ids()
|
||||||
|
.iter()
|
||||||
|
.map(|(partition_id, _)| {
|
||||||
|
let key = FlowRouteKey::new(flow_id, *partition_id).to_bytes();
|
||||||
|
Ok(TxnOp::Delete(key))
|
||||||
|
});
|
||||||
|
|
||||||
|
let put_txns = flow_routes.into_iter().map(|(partition_id, route)| {
|
||||||
|
let key = FlowRouteKey::new(flow_id, partition_id).to_bytes();
|
||||||
|
|
||||||
|
Ok(TxnOp::Put(key, route.try_as_raw_value()?))
|
||||||
|
});
|
||||||
|
let txns = del_txns.chain(put_txns).collect::<Result<Vec<_>>>()?;
|
||||||
|
Ok(Txn::new().and_then(txns))
|
||||||
|
}
|
||||||
|
|
||||||
async fn remap_flow_route_addresses(
|
async fn remap_flow_route_addresses(
|
||||||
&self,
|
&self,
|
||||||
flow_routes: &mut [(FlowRouteKey, FlowRouteValue)],
|
flow_routes: &mut [(FlowRouteKey, FlowRouteValue)],
|
||||||
|
|||||||
@@ -19,8 +19,9 @@ use regex::Regex;
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::FlowScoped;
|
use crate::key::flow::FlowScoped;
|
||||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
|
use crate::key::{BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey};
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||||
@@ -165,6 +166,17 @@ impl FlownodeFlowManager {
|
|||||||
Self { kv_backend }
|
Self { kv_backend }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Whether given flow exist on this flownode.
|
||||||
|
pub async fn exists(
|
||||||
|
&self,
|
||||||
|
flownode_id: FlownodeId,
|
||||||
|
flow_id: FlowId,
|
||||||
|
partition_id: FlowPartitionId,
|
||||||
|
) -> Result<bool> {
|
||||||
|
let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes();
|
||||||
|
Ok(self.kv_backend.get(&key).await?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
/// Retrieves all [FlowId] and [FlowPartitionId]s of the specified `flownode_id`.
|
/// Retrieves all [FlowId] and [FlowPartitionId]s of the specified `flownode_id`.
|
||||||
pub fn flows(
|
pub fn flows(
|
||||||
&self,
|
&self,
|
||||||
@@ -202,6 +214,33 @@ impl FlownodeFlowManager {
|
|||||||
|
|
||||||
Txn::new().and_then(txns)
|
Txn::new().and_then(txns)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Builds a update flownode flow transaction.
|
||||||
|
///
|
||||||
|
/// Puts `__flownode_flow/{flownode_id}/{flow_id}/{partition_id}` keys.
|
||||||
|
/// Remove the old `__flownode_flow/{old_flownode_id}/{flow_id}/{old_partition_id}` keys.
|
||||||
|
pub(crate) fn build_update_txn<I: IntoIterator<Item = (FlowPartitionId, FlownodeId)>>(
|
||||||
|
&self,
|
||||||
|
flow_id: FlowId,
|
||||||
|
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||||
|
flownode_ids: I,
|
||||||
|
) -> Txn {
|
||||||
|
let del_txns =
|
||||||
|
current_flow_info
|
||||||
|
.flownode_ids()
|
||||||
|
.iter()
|
||||||
|
.map(|(partition_id, flownode_id)| {
|
||||||
|
let key = FlownodeFlowKey::new(*flownode_id, flow_id, *partition_id).to_bytes();
|
||||||
|
TxnOp::Delete(key)
|
||||||
|
});
|
||||||
|
let put_txns = flownode_ids.into_iter().map(|(partition_id, flownode_id)| {
|
||||||
|
let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes();
|
||||||
|
TxnOp::Put(key, vec![])
|
||||||
|
});
|
||||||
|
let txns = del_txns.chain(put_txns).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Txn::new().and_then(txns)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -22,9 +22,12 @@ use snafu::OptionExt;
|
|||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||||
use crate::key::node_address::NodeAddressKey;
|
use crate::key::node_address::NodeAddressKey;
|
||||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
use crate::key::{
|
||||||
|
BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue,
|
||||||
|
};
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
@@ -215,7 +218,7 @@ impl TableFlowManager {
|
|||||||
|
|
||||||
/// Builds a create table flow transaction.
|
/// Builds a create table flow transaction.
|
||||||
///
|
///
|
||||||
/// Puts `__flow/source_table/{table_id}/{node_id}/{partition_id}` keys.
|
/// Puts `__flow/source_table/{table_id}/{node_id}/{flow_id}/{partition_id}` keys.
|
||||||
pub fn build_create_txn(
|
pub fn build_create_txn(
|
||||||
&self,
|
&self,
|
||||||
flow_id: FlowId,
|
flow_id: FlowId,
|
||||||
@@ -239,6 +242,44 @@ impl TableFlowManager {
|
|||||||
Ok(Txn::new().and_then(txns))
|
Ok(Txn::new().and_then(txns))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Builds a update table flow transaction.
|
||||||
|
///
|
||||||
|
/// Puts `__flow/source_table/{table_id}/{node_id}/{flow_id}/{partition_id}` keys,
|
||||||
|
/// Also remove previous
|
||||||
|
/// `__flow/source_table/{table_id}/{old_node_id}/{flow_id}/{partition_id}` keys.
|
||||||
|
pub fn build_update_txn(
|
||||||
|
&self,
|
||||||
|
flow_id: FlowId,
|
||||||
|
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||||
|
table_flow_values: Vec<(FlowPartitionId, TableFlowValue)>,
|
||||||
|
source_table_ids: &[TableId],
|
||||||
|
) -> Result<Txn> {
|
||||||
|
let mut txns = Vec::with_capacity(2 * source_table_ids.len() * table_flow_values.len());
|
||||||
|
|
||||||
|
// first remove the old keys
|
||||||
|
for (part_id, node_id) in current_flow_info.flownode_ids() {
|
||||||
|
for source_table_id in current_flow_info.source_table_ids() {
|
||||||
|
txns.push(TxnOp::Delete(
|
||||||
|
TableFlowKey::new(*source_table_id, *node_id, flow_id, *part_id).to_bytes(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (partition_id, table_flow_value) in table_flow_values {
|
||||||
|
let flownode_id = table_flow_value.peer.id;
|
||||||
|
let value = table_flow_value.try_as_raw_value()?;
|
||||||
|
for source_table_id in source_table_ids {
|
||||||
|
txns.push(TxnOp::Put(
|
||||||
|
TableFlowKey::new(*source_table_id, flownode_id, flow_id, partition_id)
|
||||||
|
.to_bytes(),
|
||||||
|
value.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Txn::new().and_then(txns))
|
||||||
|
}
|
||||||
|
|
||||||
async fn remap_table_flow_addresses(
|
async fn remap_table_flow_addresses(
|
||||||
&self,
|
&self,
|
||||||
table_flows: &mut [(TableFlowKey, TableFlowValue)],
|
table_flows: &mut [(TableFlowKey, TableFlowValue)],
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ pub mod memory;
|
|||||||
pub mod rds;
|
pub mod rds;
|
||||||
pub mod test;
|
pub mod test;
|
||||||
pub mod txn;
|
pub mod txn;
|
||||||
|
pub mod util;
|
||||||
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
85
src/common/meta/src/kv_backend/util.rs
Normal file
85
src/common/meta/src/kv_backend/util.rs
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/// Removes sensitive information like passwords from connection strings.
|
||||||
|
///
|
||||||
|
/// This function sanitizes connection strings by removing credentials:
|
||||||
|
/// - For URL format (mysql://user:password@host:port/db): Removes everything before '@'
|
||||||
|
/// - For parameter format (host=localhost password=secret): Removes the password parameter
|
||||||
|
/// - For URL format without credentials (mysql://host:port/db): Removes the protocol prefix
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `conn_str` - The connection string to sanitize
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A sanitized version of the connection string with sensitive information removed
|
||||||
|
pub fn sanitize_connection_string(conn_str: &str) -> String {
|
||||||
|
// Case 1: URL format with credentials (mysql://user:password@host:port/db)
|
||||||
|
// Extract everything after the '@' symbol
|
||||||
|
if let Some(at_pos) = conn_str.find('@') {
|
||||||
|
return conn_str[at_pos + 1..].to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 2: Parameter format with password (host=localhost password=secret dbname=mydb)
|
||||||
|
// Filter out any parameter that starts with "password="
|
||||||
|
if conn_str.contains("password=") {
|
||||||
|
return conn_str
|
||||||
|
.split_whitespace()
|
||||||
|
.filter(|param| !param.starts_with("password="))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(" ");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 3: URL format without credentials (mysql://host:port/db)
|
||||||
|
// Extract everything after the protocol prefix
|
||||||
|
if let Some(host_part) = conn_str.split("://").nth(1) {
|
||||||
|
return host_part.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 4: Already sanitized or unknown format
|
||||||
|
// Return as is
|
||||||
|
conn_str.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_sanitize_connection_string() {
|
||||||
|
// Test URL format with username/password
|
||||||
|
let conn_str = "mysql://user:password123@localhost:3306/db";
|
||||||
|
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
|
||||||
|
|
||||||
|
// Test URL format without credentials
|
||||||
|
let conn_str = "mysql://localhost:3306/db";
|
||||||
|
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
|
||||||
|
|
||||||
|
// Test parameter format with password
|
||||||
|
let conn_str = "host=localhost port=5432 user=postgres password=secret dbname=mydb";
|
||||||
|
assert_eq!(
|
||||||
|
sanitize_connection_string(conn_str),
|
||||||
|
"host=localhost port=5432 user=postgres dbname=mydb"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test parameter format without password
|
||||||
|
let conn_str = "host=localhost port=5432 user=postgres dbname=mydb";
|
||||||
|
assert_eq!(
|
||||||
|
sanitize_connection_string(conn_str),
|
||||||
|
"host=localhost port=5432 user=postgres dbname=mydb"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -113,8 +113,10 @@ impl LeaderRegionManifestInfo {
|
|||||||
pub fn prunable_entry_id(&self) -> u64 {
|
pub fn prunable_entry_id(&self) -> u64 {
|
||||||
match self {
|
match self {
|
||||||
LeaderRegionManifestInfo::Mito {
|
LeaderRegionManifestInfo::Mito {
|
||||||
flushed_entry_id, ..
|
flushed_entry_id,
|
||||||
} => *flushed_entry_id,
|
topic_latest_entry_id,
|
||||||
|
..
|
||||||
|
} => (*flushed_entry_id).max(*topic_latest_entry_id),
|
||||||
LeaderRegionManifestInfo::Metric {
|
LeaderRegionManifestInfo::Metric {
|
||||||
data_flushed_entry_id,
|
data_flushed_entry_id,
|
||||||
data_topic_latest_entry_id,
|
data_topic_latest_entry_id,
|
||||||
|
|||||||
@@ -35,17 +35,20 @@ use api::v1::{
|
|||||||
};
|
};
|
||||||
use base64::engine::general_purpose;
|
use base64::engine::general_purpose;
|
||||||
use base64::Engine as _;
|
use base64::Engine as _;
|
||||||
use common_time::DatabaseTimeToLive;
|
use common_time::{DatabaseTimeToLive, Timezone};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{serde_as, DefaultOnNull};
|
use serde_with::{serde_as, DefaultOnNull};
|
||||||
use session::context::QueryContextRef;
|
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
|
use crate::error::{
|
||||||
|
self, InvalidSetDatabaseOptionSnafu, InvalidTimeZoneSnafu, InvalidUnsetDatabaseOptionSnafu,
|
||||||
|
Result,
|
||||||
|
};
|
||||||
use crate::key::FlowId;
|
use crate::key::FlowId;
|
||||||
|
|
||||||
/// DDL tasks
|
/// DDL tasks
|
||||||
@@ -1202,7 +1205,7 @@ impl From<DropFlowTask> for PbDropFlowTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
pub struct QueryContext {
|
pub struct QueryContext {
|
||||||
current_catalog: String,
|
current_catalog: String,
|
||||||
current_schema: String,
|
current_schema: String,
|
||||||
@@ -1223,6 +1226,19 @@ impl From<QueryContextRef> for QueryContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TryFrom<QueryContext> for session::context::QueryContext {
|
||||||
|
type Error = error::Error;
|
||||||
|
fn try_from(value: QueryContext) -> std::result::Result<Self, Self::Error> {
|
||||||
|
Ok(QueryContextBuilder::default()
|
||||||
|
.current_catalog(value.current_catalog)
|
||||||
|
.current_schema(value.current_schema)
|
||||||
|
.timezone(Timezone::from_tz_string(&value.timezone).context(InvalidTimeZoneSnafu)?)
|
||||||
|
.extensions(value.extensions)
|
||||||
|
.channel((value.channel as u32).into())
|
||||||
|
.build())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<QueryContext> for PbQueryContext {
|
impl From<QueryContext> for PbQueryContext {
|
||||||
fn from(
|
fn from(
|
||||||
QueryContext {
|
QueryContext {
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ use api::v1::region::{InsertRequests, RegionRequest};
|
|||||||
pub use common_base::AffectedRows;
|
pub use common_base::AffectedRows;
|
||||||
use common_query::request::QueryRequest;
|
use common_query::request::QueryRequest;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
||||||
|
use common_wal::config::kafka::MetasrvKafkaConfig;
|
||||||
|
|
||||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||||
use crate::ddl::flow_meta::FlowMetadataAllocator;
|
use crate::ddl::flow_meta::FlowMetadataAllocator;
|
||||||
@@ -37,7 +39,8 @@ use crate::peer::{Peer, PeerLookupService};
|
|||||||
use crate::region_keeper::MemoryRegionKeeper;
|
use crate::region_keeper::MemoryRegionKeeper;
|
||||||
use crate::region_registry::LeaderRegionRegistry;
|
use crate::region_registry::LeaderRegionRegistry;
|
||||||
use crate::sequence::SequenceBuilder;
|
use crate::sequence::SequenceBuilder;
|
||||||
use crate::wal_options_allocator::WalOptionsAllocator;
|
use crate::wal_options_allocator::topic_pool::KafkaTopicPool;
|
||||||
|
use crate::wal_options_allocator::{build_kafka_topic_creator, WalOptionsAllocator};
|
||||||
use crate::{DatanodeId, FlownodeId};
|
use crate::{DatanodeId, FlownodeId};
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -199,3 +202,34 @@ impl PeerLookupService for NoopPeerLookupService {
|
|||||||
Ok(Some(Peer::empty(id)))
|
Ok(Some(Peer::empty(id)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a kafka topic pool for testing.
|
||||||
|
pub async fn test_kafka_topic_pool(
|
||||||
|
broker_endpoints: Vec<String>,
|
||||||
|
num_topics: usize,
|
||||||
|
auto_create_topics: bool,
|
||||||
|
topic_name_prefix: Option<&str>,
|
||||||
|
) -> KafkaTopicPool {
|
||||||
|
let mut config = MetasrvKafkaConfig {
|
||||||
|
connection: KafkaConnectionConfig {
|
||||||
|
broker_endpoints,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
kafka_topic: KafkaTopicConfig {
|
||||||
|
num_topics,
|
||||||
|
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
auto_create_topics,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
if let Some(prefix) = topic_name_prefix {
|
||||||
|
config.kafka_topic.topic_name_prefix = prefix.to_string();
|
||||||
|
}
|
||||||
|
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
||||||
|
let topic_creator = build_kafka_topic_creator(&config.connection, &config.kafka_topic)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
KafkaTopicPool::new(&config, kv_backend, topic_creator)
|
||||||
|
}
|
||||||
|
|||||||
@@ -112,7 +112,9 @@ pub async fn build_wal_options_allocator(
|
|||||||
NAME_PATTERN_REGEX.is_match(prefix),
|
NAME_PATTERN_REGEX.is_match(prefix),
|
||||||
InvalidTopicNamePrefixSnafu { prefix }
|
InvalidTopicNamePrefixSnafu { prefix }
|
||||||
);
|
);
|
||||||
let topic_creator = build_kafka_topic_creator(kafka_config).await?;
|
let topic_creator =
|
||||||
|
build_kafka_topic_creator(&kafka_config.connection, &kafka_config.kafka_topic)
|
||||||
|
.await?;
|
||||||
let topic_pool = KafkaTopicPool::new(kafka_config, kv_backend, topic_creator);
|
let topic_pool = KafkaTopicPool::new(kafka_config, kv_backend, topic_creator);
|
||||||
Ok(WalOptionsAllocator::Kafka(topic_pool))
|
Ok(WalOptionsAllocator::Kafka(topic_pool))
|
||||||
}
|
}
|
||||||
@@ -151,13 +153,16 @@ pub fn prepare_wal_options(
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
|
|
||||||
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
use common_wal::config::kafka::common::KafkaTopicConfig;
|
||||||
use common_wal::config::kafka::MetasrvKafkaConfig;
|
use common_wal::config::kafka::MetasrvKafkaConfig;
|
||||||
use common_wal::test_util::run_test_with_kafka_wal;
|
use common_wal::maybe_skip_kafka_integration_test;
|
||||||
|
use common_wal::test_util::get_kafka_endpoints;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
|
use crate::test_util::test_kafka_topic_pool;
|
||||||
|
use crate::wal_options_allocator::selector::RoundRobinTopicSelector;
|
||||||
|
|
||||||
// Tests that the wal options allocator could successfully allocate raft-engine wal options.
|
// Tests that the wal options allocator could successfully allocate raft-engine wal options.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -197,55 +202,42 @@ mod tests {
|
|||||||
assert_matches!(got, Error::InvalidTopicNamePrefix { .. });
|
assert_matches!(got, Error::InvalidTopicNamePrefix { .. });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that the wal options allocator could successfully allocate Kafka wal options.
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_allocator_with_kafka() {
|
async fn test_allocator_with_kafka_allocate_wal_options() {
|
||||||
run_test_with_kafka_wal(|broker_endpoints| {
|
common_telemetry::init_default_ut_logging();
|
||||||
Box::pin(async {
|
maybe_skip_kafka_integration_test!();
|
||||||
let topics = (0..256)
|
let num_topics = 5;
|
||||||
.map(|i| format!("test_allocator_with_kafka_{}_{}", i, uuid::Uuid::new_v4()))
|
let mut topic_pool = test_kafka_topic_pool(
|
||||||
.collect::<Vec<_>>();
|
get_kafka_endpoints(),
|
||||||
|
num_topics,
|
||||||
// Creates a topic manager.
|
true,
|
||||||
let kafka_topic = KafkaTopicConfig {
|
Some("test_allocator_with_kafka"),
|
||||||
replication_factor: broker_endpoints.len() as i16,
|
)
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let config = MetasrvKafkaConfig {
|
|
||||||
connection: KafkaConnectionConfig {
|
|
||||||
broker_endpoints,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
kafka_topic,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
|
||||||
let topic_creator = build_kafka_topic_creator(&config).await.unwrap();
|
|
||||||
let mut topic_pool = KafkaTopicPool::new(&config, kv_backend, topic_creator);
|
|
||||||
topic_pool.topics.clone_from(&topics);
|
|
||||||
topic_pool.selector = Arc::new(selector::RoundRobinTopicSelector::default());
|
|
||||||
|
|
||||||
// Creates an options allocator.
|
|
||||||
let allocator = WalOptionsAllocator::Kafka(topic_pool);
|
|
||||||
allocator.start().await.unwrap();
|
|
||||||
|
|
||||||
let num_regions = 32;
|
|
||||||
let regions = (0..num_regions).collect::<Vec<_>>();
|
|
||||||
let got = allocate_region_wal_options(regions.clone(), &allocator, false).unwrap();
|
|
||||||
|
|
||||||
// Check the allocated wal options contain the expected topics.
|
|
||||||
let expected = (0..num_regions)
|
|
||||||
.map(|i| {
|
|
||||||
let options = WalOptions::Kafka(KafkaWalOptions {
|
|
||||||
topic: topics[i as usize].clone(),
|
|
||||||
});
|
|
||||||
(i, serde_json::to_string(&options).unwrap())
|
|
||||||
})
|
|
||||||
.collect::<HashMap<_, _>>();
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await;
|
.await;
|
||||||
|
topic_pool.selector = Arc::new(RoundRobinTopicSelector::default());
|
||||||
|
let topics = topic_pool.topics.clone();
|
||||||
|
// clean up the topics before test
|
||||||
|
let topic_creator = topic_pool.topic_creator();
|
||||||
|
topic_creator.delete_topics(&topics).await.unwrap();
|
||||||
|
|
||||||
|
// Creates an options allocator.
|
||||||
|
let allocator = WalOptionsAllocator::Kafka(topic_pool);
|
||||||
|
allocator.start().await.unwrap();
|
||||||
|
|
||||||
|
let num_regions = 3;
|
||||||
|
let regions = (0..num_regions).collect::<Vec<_>>();
|
||||||
|
let got = allocate_region_wal_options(regions.clone(), &allocator, false).unwrap();
|
||||||
|
|
||||||
|
// Check the allocated wal options contain the expected topics.
|
||||||
|
let expected = (0..num_regions)
|
||||||
|
.map(|i| {
|
||||||
|
let options = WalOptions::Kafka(KafkaWalOptions {
|
||||||
|
topic: topics[i as usize].clone(),
|
||||||
|
});
|
||||||
|
(i, serde_json::to_string(&options).unwrap())
|
||||||
|
})
|
||||||
|
.collect::<HashMap<_, _>>();
|
||||||
|
assert_eq!(got, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
@@ -12,20 +12,21 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_telemetry::{error, info};
|
use common_telemetry::{debug, error, info};
|
||||||
use common_wal::config::kafka::common::DEFAULT_BACKOFF_CONFIG;
|
use common_wal::config::kafka::common::{
|
||||||
use common_wal::config::kafka::MetasrvKafkaConfig;
|
KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_BACKOFF_CONFIG,
|
||||||
|
};
|
||||||
use rskafka::client::error::Error as RsKafkaError;
|
use rskafka::client::error::Error as RsKafkaError;
|
||||||
use rskafka::client::error::ProtocolError::TopicAlreadyExists;
|
use rskafka::client::error::ProtocolError::TopicAlreadyExists;
|
||||||
use rskafka::client::partition::{Compression, UnknownTopicHandling};
|
use rskafka::client::partition::{Compression, OffsetAt, PartitionClient, UnknownTopicHandling};
|
||||||
use rskafka::client::{Client, ClientBuilder};
|
use rskafka::client::{Client, ClientBuilder};
|
||||||
use rskafka::record::Record;
|
use rskafka::record::Record;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, BuildKafkaPartitionClientSnafu,
|
BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, CreateKafkaWalTopicSnafu,
|
||||||
CreateKafkaWalTopicSnafu, ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result,
|
KafkaGetOffsetSnafu, KafkaPartitionClientSnafu, ProduceRecordSnafu, ResolveKafkaEndpointSnafu,
|
||||||
TlsConfigSnafu,
|
Result, TlsConfigSnafu,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Each topic only has one partition for now.
|
// Each topic only has one partition for now.
|
||||||
@@ -70,21 +71,47 @@ impl KafkaTopicCreator {
|
|||||||
info!("The topic {} already exists", topic);
|
info!("The topic {} already exists", topic);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
error!("Failed to create a topic {}, error {:?}", topic, e);
|
error!(e; "Failed to create a topic {}", topic);
|
||||||
Err(e).context(CreateKafkaWalTopicSnafu)
|
Err(e).context(CreateKafkaWalTopicSnafu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn append_noop_record(&self, topic: &String, client: &Client) -> Result<()> {
|
async fn prepare_topic(&self, topic: &String) -> Result<()> {
|
||||||
let partition_client = client
|
let partition_client = self.partition_client(topic).await?;
|
||||||
|
self.append_noop_record(topic, &partition_client).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a [PartitionClient] for the given topic.
|
||||||
|
async fn partition_client(&self, topic: &str) -> Result<PartitionClient> {
|
||||||
|
self.client
|
||||||
.partition_client(topic, DEFAULT_PARTITION, UnknownTopicHandling::Retry)
|
.partition_client(topic, DEFAULT_PARTITION, UnknownTopicHandling::Retry)
|
||||||
.await
|
.await
|
||||||
.context(BuildKafkaPartitionClientSnafu {
|
.context(KafkaPartitionClientSnafu {
|
||||||
topic,
|
topic,
|
||||||
partition: DEFAULT_PARTITION,
|
partition: DEFAULT_PARTITION,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Appends a noop record to the topic.
|
||||||
|
/// It only appends a noop record if the topic is empty.
|
||||||
|
async fn append_noop_record(
|
||||||
|
&self,
|
||||||
|
topic: &String,
|
||||||
|
partition_client: &PartitionClient,
|
||||||
|
) -> Result<()> {
|
||||||
|
let end_offset = partition_client
|
||||||
|
.get_offset(OffsetAt::Latest)
|
||||||
|
.await
|
||||||
|
.context(KafkaGetOffsetSnafu {
|
||||||
|
topic: topic.to_string(),
|
||||||
|
partition: DEFAULT_PARTITION,
|
||||||
})?;
|
})?;
|
||||||
|
if end_offset > 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
partition_client
|
partition_client
|
||||||
.produce(
|
.produce(
|
||||||
@@ -98,22 +125,28 @@ impl KafkaTopicCreator {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(ProduceRecordSnafu { topic })?;
|
.context(ProduceRecordSnafu { topic })?;
|
||||||
|
debug!("Appended a noop record to topic {}", topic);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates topics in Kafka.
|
||||||
|
pub async fn create_topics(&self, topics: &[String]) -> Result<()> {
|
||||||
|
let tasks = topics
|
||||||
|
.iter()
|
||||||
|
.map(|topic| async { self.create_topic(topic, &self.client).await })
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
futures::future::try_join_all(tasks).await.map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
/// Prepares topics in Kafka.
|
/// Prepares topics in Kafka.
|
||||||
/// 1. Creates missing topics.
|
///
|
||||||
/// 2. Appends a noop record to each topic.
|
/// It appends a noop record to each topic if the topic is empty.
|
||||||
pub async fn prepare_topics(&self, topics: &[&String]) -> Result<()> {
|
pub async fn prepare_topics(&self, topics: &[String]) -> Result<()> {
|
||||||
// Try to create missing topics.
|
// Try to create missing topics.
|
||||||
let tasks = topics
|
let tasks = topics
|
||||||
.iter()
|
.iter()
|
||||||
.map(|topic| async {
|
.map(|topic| async { self.prepare_topic(topic).await })
|
||||||
self.create_topic(topic, &self.client).await?;
|
|
||||||
self.append_noop_record(topic, &self.client).await?;
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
futures::future::try_join_all(tasks).await.map(|_| ())
|
futures::future::try_join_all(tasks).await.map(|_| ())
|
||||||
}
|
}
|
||||||
@@ -129,34 +162,244 @@ impl KafkaTopicCreator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
impl KafkaTopicCreator {
|
||||||
|
pub async fn delete_topics(&self, topics: &[String]) -> Result<()> {
|
||||||
|
let tasks = topics
|
||||||
|
.iter()
|
||||||
|
.map(|topic| async { self.delete_topic(topic, &self.client).await })
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
futures::future::try_join_all(tasks).await.map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn delete_topic(&self, topic: &String, client: &Client) -> Result<()> {
|
||||||
|
let controller = client
|
||||||
|
.controller_client()
|
||||||
|
.context(BuildKafkaCtrlClientSnafu)?;
|
||||||
|
match controller.delete_topic(topic, 10).await {
|
||||||
|
Ok(_) => {
|
||||||
|
info!("Successfully deleted topic {}", topic);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if Self::is_unknown_topic_err(&e) {
|
||||||
|
info!("The topic {} does not exist", topic);
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
panic!("Failed to delete a topic {}, error: {}", topic, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_unknown_topic_err(e: &RsKafkaError) -> bool {
|
||||||
|
matches!(
|
||||||
|
e,
|
||||||
|
&RsKafkaError::ServerError {
|
||||||
|
protocol_error: rskafka::client::error::ProtocolError::UnknownTopicOrPartition,
|
||||||
|
..
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_partition_client(&self, topic: &str) -> PartitionClient {
|
||||||
|
self.partition_client(topic).await.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
/// Builds a kafka [Client](rskafka::client::Client).
|
/// Builds a kafka [Client](rskafka::client::Client).
|
||||||
pub async fn build_kafka_client(config: &MetasrvKafkaConfig) -> Result<Client> {
|
pub async fn build_kafka_client(connection: &KafkaConnectionConfig) -> Result<Client> {
|
||||||
// Builds an kafka controller client for creating topics.
|
// Builds an kafka controller client for creating topics.
|
||||||
let broker_endpoints = common_wal::resolve_to_ipv4(&config.connection.broker_endpoints)
|
let broker_endpoints = common_wal::resolve_to_ipv4(&connection.broker_endpoints)
|
||||||
.await
|
.await
|
||||||
.context(ResolveKafkaEndpointSnafu)?;
|
.context(ResolveKafkaEndpointSnafu)?;
|
||||||
let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(DEFAULT_BACKOFF_CONFIG);
|
let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(DEFAULT_BACKOFF_CONFIG);
|
||||||
if let Some(sasl) = &config.connection.sasl {
|
if let Some(sasl) = &connection.sasl {
|
||||||
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
|
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
|
||||||
};
|
};
|
||||||
if let Some(tls) = &config.connection.tls {
|
if let Some(tls) = &connection.tls {
|
||||||
builder = builder.tls_config(tls.to_tls_config().await.context(TlsConfigSnafu)?)
|
builder = builder.tls_config(tls.to_tls_config().await.context(TlsConfigSnafu)?)
|
||||||
};
|
};
|
||||||
builder
|
builder
|
||||||
.build()
|
.build()
|
||||||
.await
|
.await
|
||||||
.with_context(|_| BuildKafkaClientSnafu {
|
.with_context(|_| BuildKafkaClientSnafu {
|
||||||
broker_endpoints: config.connection.broker_endpoints.clone(),
|
broker_endpoints: connection.broker_endpoints.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a [KafkaTopicCreator].
|
/// Builds a [KafkaTopicCreator].
|
||||||
pub async fn build_kafka_topic_creator(config: &MetasrvKafkaConfig) -> Result<KafkaTopicCreator> {
|
pub async fn build_kafka_topic_creator(
|
||||||
let client = build_kafka_client(config).await?;
|
connection: &KafkaConnectionConfig,
|
||||||
|
kafka_topic: &KafkaTopicConfig,
|
||||||
|
) -> Result<KafkaTopicCreator> {
|
||||||
|
let client = build_kafka_client(connection).await?;
|
||||||
Ok(KafkaTopicCreator {
|
Ok(KafkaTopicCreator {
|
||||||
client,
|
client,
|
||||||
num_partitions: config.kafka_topic.num_partitions,
|
num_partitions: kafka_topic.num_partitions,
|
||||||
replication_factor: config.kafka_topic.replication_factor,
|
replication_factor: kafka_topic.replication_factor,
|
||||||
create_topic_timeout: config.kafka_topic.create_topic_timeout.as_millis() as i32,
|
create_topic_timeout: kafka_topic.create_topic_timeout.as_millis() as i32,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
||||||
|
use common_wal::maybe_skip_kafka_integration_test;
|
||||||
|
use common_wal::test_util::get_kafka_endpoints;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
async fn test_topic_creator(broker_endpoints: Vec<String>) -> KafkaTopicCreator {
|
||||||
|
let connection = KafkaConnectionConfig {
|
||||||
|
broker_endpoints,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let kafka_topic = KafkaTopicConfig::default();
|
||||||
|
|
||||||
|
build_kafka_topic_creator(&connection, &kafka_topic)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn append_records(partition_client: &PartitionClient, num_records: usize) -> Result<()> {
|
||||||
|
for i in 0..num_records {
|
||||||
|
partition_client
|
||||||
|
.produce(
|
||||||
|
vec![Record {
|
||||||
|
key: Some(b"test".to_vec()),
|
||||||
|
value: Some(format!("test {}", i).as_bytes().to_vec()),
|
||||||
|
timestamp: chrono::Utc::now(),
|
||||||
|
headers: Default::default(),
|
||||||
|
}],
|
||||||
|
Compression::Lz4,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_append_noop_record_to_empty_topic() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let prefix = "append_noop_record_to_empty_topic";
|
||||||
|
let creator = test_topic_creator(get_kafka_endpoints()).await;
|
||||||
|
|
||||||
|
let topic = format!("{}{}", prefix, "0");
|
||||||
|
// Clean up the topics before test
|
||||||
|
creator.delete_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
creator.create_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
|
||||||
|
let partition_client = creator.partition_client(&topic).await.unwrap();
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 0);
|
||||||
|
|
||||||
|
// The topic is not empty, so no noop record is appended.
|
||||||
|
creator
|
||||||
|
.append_noop_record(&topic, &partition_client)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_append_noop_record_to_non_empty_topic() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let prefix = "append_noop_record_to_non_empty_topic";
|
||||||
|
let creator = test_topic_creator(get_kafka_endpoints()).await;
|
||||||
|
|
||||||
|
let topic = format!("{}{}", prefix, "0");
|
||||||
|
// Clean up the topics before test
|
||||||
|
creator.delete_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
|
||||||
|
creator.create_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
let partition_client = creator.partition_client(&topic).await.unwrap();
|
||||||
|
append_records(&partition_client, 2).await.unwrap();
|
||||||
|
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 2);
|
||||||
|
|
||||||
|
// The topic is not empty, so no noop record is appended.
|
||||||
|
creator
|
||||||
|
.append_noop_record(&topic, &partition_client)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_topic() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let prefix = "create_topic";
|
||||||
|
let creator = test_topic_creator(get_kafka_endpoints()).await;
|
||||||
|
|
||||||
|
let topic = format!("{}{}", prefix, "0");
|
||||||
|
// Clean up the topics before test
|
||||||
|
creator.delete_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
|
||||||
|
creator.create_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
// Should be ok
|
||||||
|
creator.create_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
|
||||||
|
let partition_client = creator.partition_client(&topic).await.unwrap();
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_prepare_topic() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let prefix = "prepare_topic";
|
||||||
|
let creator = test_topic_creator(get_kafka_endpoints()).await;
|
||||||
|
|
||||||
|
let topic = format!("{}{}", prefix, "0");
|
||||||
|
// Clean up the topics before test
|
||||||
|
creator.delete_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
|
||||||
|
creator.create_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
creator.prepare_topic(&topic).await.unwrap();
|
||||||
|
|
||||||
|
let partition_client = creator.partition_client(&topic).await.unwrap();
|
||||||
|
let start_offset = partition_client
|
||||||
|
.get_offset(OffsetAt::Earliest)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(start_offset, 0);
|
||||||
|
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_prepare_topic_with_stale_records_without_pruning() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
|
||||||
|
let prefix = "prepare_topic_with_stale_records_without_pruning";
|
||||||
|
let creator = test_topic_creator(get_kafka_endpoints()).await;
|
||||||
|
|
||||||
|
let topic = format!("{}{}", prefix, "0");
|
||||||
|
// Clean up the topics before test
|
||||||
|
creator.delete_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
|
||||||
|
creator.create_topics(&[topic.to_string()]).await.unwrap();
|
||||||
|
let partition_client = creator.partition_client(&topic).await.unwrap();
|
||||||
|
append_records(&partition_client, 10).await.unwrap();
|
||||||
|
|
||||||
|
creator.prepare_topic(&topic).await.unwrap();
|
||||||
|
|
||||||
|
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
|
||||||
|
assert_eq!(end_offset, 10);
|
||||||
|
let start_offset = partition_client
|
||||||
|
.get_offset(OffsetAt::Earliest)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(start_offset, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -40,24 +40,21 @@ impl KafkaTopicManager {
|
|||||||
Ok(topics)
|
Ok(topics)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restores topics from the key-value backend. and returns the topics that are not stored in kvbackend.
|
/// Returns the topics that are not prepared.
|
||||||
pub async fn get_topics_to_create<'a>(
|
pub async fn unprepare_topics(&self, all_topics: &[String]) -> Result<Vec<String>> {
|
||||||
&self,
|
|
||||||
all_topics: &'a [String],
|
|
||||||
) -> Result<Vec<&'a String>> {
|
|
||||||
let existing_topics = self.restore_topics().await?;
|
let existing_topics = self.restore_topics().await?;
|
||||||
let existing_topic_set = existing_topics.iter().collect::<HashSet<_>>();
|
let existing_topic_set = existing_topics.iter().collect::<HashSet<_>>();
|
||||||
let mut topics_to_create = Vec::with_capacity(all_topics.len());
|
let mut topics_to_create = Vec::with_capacity(all_topics.len());
|
||||||
for topic in all_topics {
|
for topic in all_topics {
|
||||||
if !existing_topic_set.contains(topic) {
|
if !existing_topic_set.contains(topic) {
|
||||||
topics_to_create.push(topic);
|
topics_to_create.push(topic.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(topics_to_create)
|
Ok(topics_to_create)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Persists topics into the key-value backend.
|
/// Persists prepared topics into the key-value backend.
|
||||||
pub async fn persist_topics(&self, topics: &[String]) -> Result<()> {
|
pub async fn persist_prepared_topics(&self, topics: &[String]) -> Result<()> {
|
||||||
self.topic_name_manager
|
self.topic_name_manager
|
||||||
.batch_put(
|
.batch_put(
|
||||||
topics
|
topics
|
||||||
@@ -70,6 +67,14 @@ impl KafkaTopicManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
impl KafkaTopicManager {
|
||||||
|
/// Lists all topics in the key-value backend.
|
||||||
|
pub async fn list_topics(&self) -> Result<Vec<String>> {
|
||||||
|
self.topic_name_manager.range().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -90,11 +95,11 @@ mod tests {
|
|||||||
|
|
||||||
// No legacy topics.
|
// No legacy topics.
|
||||||
let mut topics_to_be_created = topic_kvbackend_manager
|
let mut topics_to_be_created = topic_kvbackend_manager
|
||||||
.get_topics_to_create(&all_topics)
|
.unprepare_topics(&all_topics)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
topics_to_be_created.sort();
|
topics_to_be_created.sort();
|
||||||
let mut expected = all_topics.iter().collect::<Vec<_>>();
|
let mut expected = all_topics.clone();
|
||||||
expected.sort();
|
expected.sort();
|
||||||
assert_eq!(expected, topics_to_be_created);
|
assert_eq!(expected, topics_to_be_created);
|
||||||
|
|
||||||
@@ -109,7 +114,7 @@ mod tests {
|
|||||||
assert!(res.prev_kv.is_none());
|
assert!(res.prev_kv.is_none());
|
||||||
|
|
||||||
let topics_to_be_created = topic_kvbackend_manager
|
let topics_to_be_created = topic_kvbackend_manager
|
||||||
.get_topics_to_create(&all_topics)
|
.unprepare_topics(&all_topics)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(topics_to_be_created.is_empty());
|
assert!(topics_to_be_created.is_empty());
|
||||||
@@ -144,21 +149,21 @@ mod tests {
|
|||||||
let topic_kvbackend_manager = KafkaTopicManager::new(kv_backend);
|
let topic_kvbackend_manager = KafkaTopicManager::new(kv_backend);
|
||||||
|
|
||||||
let mut topics_to_be_created = topic_kvbackend_manager
|
let mut topics_to_be_created = topic_kvbackend_manager
|
||||||
.get_topics_to_create(&all_topics)
|
.unprepare_topics(&all_topics)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
topics_to_be_created.sort();
|
topics_to_be_created.sort();
|
||||||
let mut expected = all_topics.iter().collect::<Vec<_>>();
|
let mut expected = all_topics.clone();
|
||||||
expected.sort();
|
expected.sort();
|
||||||
assert_eq!(expected, topics_to_be_created);
|
assert_eq!(expected, topics_to_be_created);
|
||||||
|
|
||||||
// Persists topics to kv backend.
|
// Persists topics to kv backend.
|
||||||
topic_kvbackend_manager
|
topic_kvbackend_manager
|
||||||
.persist_topics(&all_topics)
|
.persist_prepared_topics(&all_topics)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let topics_to_be_created = topic_kvbackend_manager
|
let topics_to_be_created = topic_kvbackend_manager
|
||||||
.get_topics_to_create(&all_topics)
|
.unprepare_topics(&all_topics)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(topics_to_be_created.is_empty());
|
assert!(topics_to_be_created.is_empty());
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
use std::fmt::{self, Formatter};
|
use std::fmt::{self, Formatter};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_telemetry::info;
|
||||||
use common_wal::config::kafka::MetasrvKafkaConfig;
|
use common_wal::config::kafka::MetasrvKafkaConfig;
|
||||||
use common_wal::TopicSelectorType;
|
use common_wal::TopicSelectorType;
|
||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
@@ -77,27 +78,35 @@ impl KafkaTopicPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to activate the topic manager when metasrv becomes the leader.
|
/// Tries to activate the topic manager when metasrv becomes the leader.
|
||||||
|
///
|
||||||
/// First tries to restore persisted topics from the kv backend.
|
/// First tries to restore persisted topics from the kv backend.
|
||||||
/// If not enough topics retrieved, it will try to contact the Kafka cluster and request creating more topics.
|
/// If there are unprepared topics (topics that exist in the configuration but not in the kv backend),
|
||||||
|
/// it will create these topics in Kafka if `auto_create_topics` is enabled.
|
||||||
|
///
|
||||||
|
/// Then it prepares all unprepared topics by appending a noop record if the topic is empty,
|
||||||
|
/// and persists them in the kv backend for future use.
|
||||||
pub async fn activate(&self) -> Result<()> {
|
pub async fn activate(&self) -> Result<()> {
|
||||||
if !self.auto_create_topics {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let num_topics = self.topics.len();
|
let num_topics = self.topics.len();
|
||||||
ensure!(num_topics > 0, InvalidNumTopicsSnafu { num_topics });
|
ensure!(num_topics > 0, InvalidNumTopicsSnafu { num_topics });
|
||||||
|
|
||||||
let topics_to_be_created = self
|
let unprepared_topics = self.topic_manager.unprepare_topics(&self.topics).await?;
|
||||||
.topic_manager
|
|
||||||
.get_topics_to_create(&self.topics)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if !topics_to_be_created.is_empty() {
|
if !unprepared_topics.is_empty() {
|
||||||
|
if self.auto_create_topics {
|
||||||
|
info!("Creating {} topics", unprepared_topics.len());
|
||||||
|
self.topic_creator.create_topics(&unprepared_topics).await?;
|
||||||
|
} else {
|
||||||
|
info!("Auto create topics is disabled, skipping topic creation.");
|
||||||
|
}
|
||||||
self.topic_creator
|
self.topic_creator
|
||||||
.prepare_topics(&topics_to_be_created)
|
.prepare_topics(&unprepared_topics)
|
||||||
|
.await?;
|
||||||
|
self.topic_manager
|
||||||
|
.persist_prepared_topics(&unprepared_topics)
|
||||||
.await?;
|
.await?;
|
||||||
self.topic_manager.persist_topics(&self.topics).await?;
|
|
||||||
}
|
}
|
||||||
|
info!("Activated topic pool with {} topics", self.topics.len());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,77 +123,147 @@ impl KafkaTopicPool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
impl KafkaTopicPool {
|
||||||
|
pub(crate) fn topic_manager(&self) -> &KafkaTopicManager {
|
||||||
|
&self.topic_manager
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn topic_creator(&self) -> &KafkaTopicCreator {
|
||||||
|
&self.topic_creator
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
use std::assert_matches::assert_matches;
|
||||||
use common_wal::test_util::run_test_with_kafka_wal;
|
|
||||||
|
use common_wal::maybe_skip_kafka_integration_test;
|
||||||
|
use common_wal::test_util::get_kafka_endpoints;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::error::Error;
|
||||||
use crate::wal_options_allocator::topic_creator::build_kafka_topic_creator;
|
use crate::test_util::test_kafka_topic_pool;
|
||||||
|
use crate::wal_options_allocator::selector::RoundRobinTopicSelector;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_pool_invalid_number_topics_err() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let endpoints = get_kafka_endpoints();
|
||||||
|
|
||||||
|
let pool = test_kafka_topic_pool(endpoints.clone(), 0, false, None).await;
|
||||||
|
let err = pool.activate().await.unwrap_err();
|
||||||
|
assert_matches!(err, Error::InvalidNumTopics { .. });
|
||||||
|
|
||||||
|
let pool = test_kafka_topic_pool(endpoints, 0, true, None).await;
|
||||||
|
let err = pool.activate().await.unwrap_err();
|
||||||
|
assert_matches!(err, Error::InvalidNumTopics { .. });
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_pool_activate_unknown_topics_err() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let pool =
|
||||||
|
test_kafka_topic_pool(get_kafka_endpoints(), 1, false, Some("unknown_topic")).await;
|
||||||
|
let err = pool.activate().await.unwrap_err();
|
||||||
|
assert_matches!(err, Error::KafkaPartitionClient { .. });
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_pool_activate() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let pool =
|
||||||
|
test_kafka_topic_pool(get_kafka_endpoints(), 2, true, Some("pool_activate")).await;
|
||||||
|
// clean up the topics before test
|
||||||
|
let topic_creator = pool.topic_creator();
|
||||||
|
topic_creator.delete_topics(&pool.topics).await.unwrap();
|
||||||
|
|
||||||
|
let topic_manager = pool.topic_manager();
|
||||||
|
pool.activate().await.unwrap();
|
||||||
|
let topics = topic_manager.list_topics().await.unwrap();
|
||||||
|
assert_eq!(topics.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_pool_activate_with_existing_topics() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
maybe_skip_kafka_integration_test!();
|
||||||
|
let prefix = "pool_activate_with_existing_topics";
|
||||||
|
let pool = test_kafka_topic_pool(get_kafka_endpoints(), 2, true, Some(prefix)).await;
|
||||||
|
let topic_creator = pool.topic_creator();
|
||||||
|
topic_creator.delete_topics(&pool.topics).await.unwrap();
|
||||||
|
|
||||||
|
let topic_manager = pool.topic_manager();
|
||||||
|
// persists one topic info, then pool.activate() will create new topics that not persisted.
|
||||||
|
topic_manager
|
||||||
|
.persist_prepared_topics(&pool.topics[0..1])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
pool.activate().await.unwrap();
|
||||||
|
let topics = topic_manager.list_topics().await.unwrap();
|
||||||
|
assert_eq!(topics.len(), 2);
|
||||||
|
|
||||||
|
let client = pool.topic_creator().client();
|
||||||
|
let topics = client
|
||||||
|
.list_topics()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|t| t.name.starts_with(prefix))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
assert_eq!(topics.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
/// Tests that the topic manager could allocate topics correctly.
|
/// Tests that the topic manager could allocate topics correctly.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_alloc_topics() {
|
async fn test_alloc_topics() {
|
||||||
run_test_with_kafka_wal(|broker_endpoints| {
|
common_telemetry::init_default_ut_logging();
|
||||||
Box::pin(async {
|
maybe_skip_kafka_integration_test!();
|
||||||
// Constructs topics that should be created.
|
let num_topics = 5;
|
||||||
let topics = (0..256)
|
let mut topic_pool = test_kafka_topic_pool(
|
||||||
.map(|i| format!("test_alloc_topics_{}_{}", i, uuid::Uuid::new_v4()))
|
get_kafka_endpoints(),
|
||||||
.collect::<Vec<_>>();
|
num_topics,
|
||||||
|
true,
|
||||||
// Creates a topic manager.
|
Some("test_allocator_with_kafka"),
|
||||||
let kafka_topic = KafkaTopicConfig {
|
)
|
||||||
replication_factor: broker_endpoints.len() as i16,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let config = MetasrvKafkaConfig {
|
|
||||||
connection: KafkaConnectionConfig {
|
|
||||||
broker_endpoints,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
kafka_topic,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
|
||||||
let topic_creator = build_kafka_topic_creator(&config).await.unwrap();
|
|
||||||
let mut topic_pool = KafkaTopicPool::new(&config, kv_backend, topic_creator);
|
|
||||||
// Replaces the default topic pool with the constructed topics.
|
|
||||||
topic_pool.topics.clone_from(&topics);
|
|
||||||
// Replaces the default selector with a round-robin selector without shuffled.
|
|
||||||
topic_pool.selector = Arc::new(RoundRobinTopicSelector::default());
|
|
||||||
topic_pool.activate().await.unwrap();
|
|
||||||
|
|
||||||
// Selects exactly the number of `num_topics` topics one by one.
|
|
||||||
let got = (0..topics.len())
|
|
||||||
.map(|_| topic_pool.select().unwrap())
|
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
assert_eq!(got, topics);
|
|
||||||
|
|
||||||
// Selects exactly the number of `num_topics` topics in a batching manner.
|
|
||||||
let got = topic_pool
|
|
||||||
.select_batch(topics.len())
|
|
||||||
.unwrap()
|
|
||||||
.into_iter()
|
|
||||||
.map(ToString::to_string)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
assert_eq!(got, topics);
|
|
||||||
|
|
||||||
// Selects more than the number of `num_topics` topics.
|
|
||||||
let got = topic_pool
|
|
||||||
.select_batch(2 * topics.len())
|
|
||||||
.unwrap()
|
|
||||||
.into_iter()
|
|
||||||
.map(ToString::to_string)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let expected = vec![topics.clone(); 2]
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await;
|
.await;
|
||||||
|
topic_pool.selector = Arc::new(RoundRobinTopicSelector::default());
|
||||||
|
let topics = topic_pool.topics.clone();
|
||||||
|
// clean up the topics before test
|
||||||
|
let topic_creator = topic_pool.topic_creator();
|
||||||
|
topic_creator.delete_topics(&topics).await.unwrap();
|
||||||
|
|
||||||
|
// Selects exactly the number of `num_topics` topics one by one.
|
||||||
|
let got = (0..topics.len())
|
||||||
|
.map(|_| topic_pool.select().unwrap())
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
assert_eq!(got, topics);
|
||||||
|
|
||||||
|
// Selects exactly the number of `num_topics` topics in a batching manner.
|
||||||
|
let got = topic_pool
|
||||||
|
.select_batch(topics.len())
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
assert_eq!(got, topics);
|
||||||
|
|
||||||
|
// Selects more than the number of `num_topics` topics.
|
||||||
|
let got = topic_pool
|
||||||
|
.select_batch(2 * topics.len())
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let expected = vec![topics.clone(); 2]
|
||||||
|
.into_iter()
|
||||||
|
.flatten()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
assert_eq!(got, expected);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,11 +23,16 @@ use serde::{Deserialize, Serialize};
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
/// The default backoff config for kafka client.
|
/// The default backoff config for kafka client.
|
||||||
|
///
|
||||||
|
/// If the operation fails, the client will retry 3 times.
|
||||||
|
/// The backoff time is 100ms, 300ms, 900ms.
|
||||||
pub const DEFAULT_BACKOFF_CONFIG: BackoffConfig = BackoffConfig {
|
pub const DEFAULT_BACKOFF_CONFIG: BackoffConfig = BackoffConfig {
|
||||||
init_backoff: Duration::from_millis(100),
|
init_backoff: Duration::from_millis(100),
|
||||||
max_backoff: Duration::from_secs(10),
|
max_backoff: Duration::from_secs(1),
|
||||||
base: 2.0,
|
base: 3.0,
|
||||||
deadline: Some(Duration::from_secs(120)),
|
// The deadline shouldn't be too long,
|
||||||
|
// otherwise the client will block the worker loop for a long time.
|
||||||
|
deadline: Some(Duration::from_secs(3)),
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Default interval for auto WAL pruning.
|
/// Default interval for auto WAL pruning.
|
||||||
|
|||||||
@@ -31,3 +31,33 @@ where
|
|||||||
|
|
||||||
test(endpoints).await
|
test(endpoints).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the kafka endpoints from the environment variable `GT_KAFKA_ENDPOINTS`.
|
||||||
|
///
|
||||||
|
/// The format of the environment variable is:
|
||||||
|
/// ```
|
||||||
|
/// GT_KAFKA_ENDPOINTS=localhost:9092,localhost:9093
|
||||||
|
/// ```
|
||||||
|
pub fn get_kafka_endpoints() -> Vec<String> {
|
||||||
|
let endpoints = std::env::var("GT_KAFKA_ENDPOINTS").unwrap();
|
||||||
|
endpoints
|
||||||
|
.split(',')
|
||||||
|
.map(|s| s.trim().to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
/// Skip the test if the environment variable `GT_KAFKA_ENDPOINTS` is not set.
|
||||||
|
///
|
||||||
|
/// The format of the environment variable is:
|
||||||
|
/// ```
|
||||||
|
/// GT_KAFKA_ENDPOINTS=localhost:9092,localhost:9093
|
||||||
|
/// ```
|
||||||
|
macro_rules! maybe_skip_kafka_integration_test {
|
||||||
|
() => {
|
||||||
|
if std::env::var("GT_KAFKA_ENDPOINTS").is_err() {
|
||||||
|
common_telemetry::warn!("The endpoints is empty, skipping the test");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|||||||
@@ -57,9 +57,9 @@ use tokio::sync::Notify;
|
|||||||
|
|
||||||
use crate::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
use crate::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu, MissingCacheSnafu,
|
self, BuildMetricEngineSnafu, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu,
|
||||||
MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result, ShutdownInstanceSnafu,
|
MissingCacheSnafu, MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result,
|
||||||
ShutdownServerSnafu, StartServerSnafu,
|
ShutdownInstanceSnafu, ShutdownServerSnafu, StartServerSnafu,
|
||||||
};
|
};
|
||||||
use crate::event_listener::{
|
use crate::event_listener::{
|
||||||
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
|
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
|
||||||
@@ -398,44 +398,46 @@ impl DatanodeBuilder {
|
|||||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||||
plugins: Plugins,
|
plugins: Plugins,
|
||||||
) -> Result<Vec<RegionEngineRef>> {
|
) -> Result<Vec<RegionEngineRef>> {
|
||||||
let mut engines = vec![];
|
let mut metric_engine_config = metric_engine::config::EngineConfig::default();
|
||||||
let mut metric_engine_config = opts.region_engine.iter().find_map(|c| match c {
|
let mut mito_engine_config = MitoConfig::default();
|
||||||
RegionEngineConfig::Metric(config) => Some(config.clone()),
|
let mut file_engine_config = file_engine::config::EngineConfig::default();
|
||||||
_ => None,
|
|
||||||
});
|
|
||||||
|
|
||||||
for engine in &opts.region_engine {
|
for engine in &opts.region_engine {
|
||||||
match engine {
|
match engine {
|
||||||
RegionEngineConfig::Mito(config) => {
|
RegionEngineConfig::Mito(config) => {
|
||||||
let mito_engine = Self::build_mito_engine(
|
mito_engine_config = config.clone();
|
||||||
opts,
|
|
||||||
object_store_manager.clone(),
|
|
||||||
config.clone(),
|
|
||||||
schema_metadata_manager.clone(),
|
|
||||||
plugins.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let metric_engine = MetricEngine::new(
|
|
||||||
mito_engine.clone(),
|
|
||||||
metric_engine_config.take().unwrap_or_default(),
|
|
||||||
);
|
|
||||||
engines.push(Arc::new(mito_engine) as _);
|
|
||||||
engines.push(Arc::new(metric_engine) as _);
|
|
||||||
}
|
}
|
||||||
RegionEngineConfig::File(config) => {
|
RegionEngineConfig::File(config) => {
|
||||||
let engine = FileRegionEngine::new(
|
file_engine_config = config.clone();
|
||||||
config.clone(),
|
|
||||||
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
|
|
||||||
);
|
|
||||||
engines.push(Arc::new(engine) as _);
|
|
||||||
}
|
}
|
||||||
RegionEngineConfig::Metric(_) => {
|
RegionEngineConfig::Metric(metric_config) => {
|
||||||
// Already handled in `build_mito_engine`.
|
metric_engine_config = metric_config.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(engines)
|
|
||||||
|
let mito_engine = Self::build_mito_engine(
|
||||||
|
opts,
|
||||||
|
object_store_manager.clone(),
|
||||||
|
mito_engine_config,
|
||||||
|
schema_metadata_manager.clone(),
|
||||||
|
plugins.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let metric_engine = MetricEngine::try_new(mito_engine.clone(), metric_engine_config)
|
||||||
|
.context(BuildMetricEngineSnafu)?;
|
||||||
|
|
||||||
|
let file_engine = FileRegionEngine::new(
|
||||||
|
file_engine_config,
|
||||||
|
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(vec![
|
||||||
|
Arc::new(mito_engine) as _,
|
||||||
|
Arc::new(metric_engine) as _,
|
||||||
|
Arc::new(file_engine) as _,
|
||||||
|
])
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds [MitoEngine] according to options.
|
/// Builds [MitoEngine] according to options.
|
||||||
|
|||||||
@@ -336,6 +336,13 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build metric engine"))]
|
||||||
|
BuildMetricEngine {
|
||||||
|
source: metric_engine::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to serialize options to TOML"))]
|
#[snafu(display("Failed to serialize options to TOML"))]
|
||||||
TomlFormat {
|
TomlFormat {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -452,6 +459,7 @@ impl ErrorExt for Error {
|
|||||||
|
|
||||||
FindLogicalRegions { source, .. } => source.status_code(),
|
FindLogicalRegions { source, .. } => source.status_code(),
|
||||||
BuildMitoEngine { source, .. } => source.status_code(),
|
BuildMitoEngine { source, .. } => source.status_code(),
|
||||||
|
BuildMetricEngine { source, .. } => source.status_code(),
|
||||||
ConcurrentQueryLimiterClosed { .. } | ConcurrentQueryLimiterTimeout { .. } => {
|
ConcurrentQueryLimiterClosed { .. } | ConcurrentQueryLimiterTimeout { .. } => {
|
||||||
StatusCode::RegionBusy
|
StatusCode::RegionBusy
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use common_telemetry::{info, warn};
|
use common_telemetry::{info, warn};
|
||||||
|
use mito2::access_layer::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
|
||||||
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
|
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
|
||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||||
@@ -168,9 +169,13 @@ async fn build_cache_layer(
|
|||||||
if let Some(path) = cache_path.as_ref()
|
if let Some(path) = cache_path.as_ref()
|
||||||
&& !path.trim().is_empty()
|
&& !path.trim().is_empty()
|
||||||
{
|
{
|
||||||
let atomic_temp_dir = join_dir(path, ".tmp/");
|
let atomic_temp_dir = join_dir(path, ATOMIC_WRITE_DIR);
|
||||||
clean_temp_dir(&atomic_temp_dir)?;
|
clean_temp_dir(&atomic_temp_dir)?;
|
||||||
|
|
||||||
|
// Compatible code. Remove this after a major release.
|
||||||
|
let old_atomic_temp_dir = join_dir(path, OLD_ATOMIC_WRITE_DIR);
|
||||||
|
clean_temp_dir(&old_atomic_temp_dir)?;
|
||||||
|
|
||||||
let cache_store = Fs::default()
|
let cache_store = Fs::default()
|
||||||
.root(path)
|
.root(path)
|
||||||
.atomic_write_dir(&atomic_temp_dir)
|
.atomic_write_dir(&atomic_temp_dir)
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
use std::{fs, path};
|
use std::{fs, path};
|
||||||
|
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use mito2::access_layer::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
|
||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
use object_store::util::join_dir;
|
use object_store::util::join_dir;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
@@ -33,9 +34,13 @@ pub async fn new_fs_object_store(
|
|||||||
.context(error::CreateDirSnafu { dir: data_home })?;
|
.context(error::CreateDirSnafu { dir: data_home })?;
|
||||||
info!("The file storage home is: {}", data_home);
|
info!("The file storage home is: {}", data_home);
|
||||||
|
|
||||||
let atomic_write_dir = join_dir(data_home, ".tmp/");
|
let atomic_write_dir = join_dir(data_home, ATOMIC_WRITE_DIR);
|
||||||
store::clean_temp_dir(&atomic_write_dir)?;
|
store::clean_temp_dir(&atomic_write_dir)?;
|
||||||
|
|
||||||
|
// Compatible code. Remove this after a major release.
|
||||||
|
let old_atomic_temp_dir = join_dir(data_home, OLD_ATOMIC_WRITE_DIR);
|
||||||
|
store::clean_temp_dir(&old_atomic_temp_dir)?;
|
||||||
|
|
||||||
let builder = Fs::default()
|
let builder = Fs::default()
|
||||||
.root(data_home)
|
.root(data_home)
|
||||||
.atomic_write_dir(&atomic_write_dir);
|
.atomic_write_dir(&atomic_write_dir);
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ async-trait.workspace = true
|
|||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
cache.workspace = true
|
cache.workspace = true
|
||||||
catalog.workspace = true
|
catalog.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
client.workspace = true
|
client.workspace = true
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-config.workspace = true
|
common-config.workspace = true
|
||||||
@@ -39,16 +40,13 @@ datafusion-expr.workspace = true
|
|||||||
datafusion-physical-expr.workspace = true
|
datafusion-physical-expr.workspace = true
|
||||||
datafusion-substrait.workspace = true
|
datafusion-substrait.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
|
dfir_rs = { version = "0.13.0", default-features = false }
|
||||||
enum-as-inner = "0.6.0"
|
enum-as-inner = "0.6.0"
|
||||||
enum_dispatch = "0.3"
|
enum_dispatch = "0.3"
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
get-size2 = "0.1.2"
|
get-size2 = "0.1.2"
|
||||||
greptime-proto.workspace = true
|
greptime-proto.workspace = true
|
||||||
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version
|
|
||||||
# otherwise it is the same with upstream repo
|
|
||||||
chrono.workspace = true
|
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
hydroflow = { git = "https://github.com/GreptimeTeam/hydroflow.git", branch = "main" }
|
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
@@ -60,6 +58,7 @@ partition.workspace = true
|
|||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
query.workspace = true
|
query.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
servers.workspace = true
|
servers.workspace = true
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
|
|||||||
@@ -135,14 +135,13 @@ impl Configurable for FlownodeOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Arc-ed FlowNodeManager, cheaper to clone
|
/// Arc-ed FlowNodeManager, cheaper to clone
|
||||||
pub type FlowWorkerManagerRef = Arc<FlowStreamingEngine>;
|
pub type FlowStreamingEngineRef = Arc<StreamingEngine>;
|
||||||
|
|
||||||
/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
|
/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
|
||||||
///
|
///
|
||||||
/// The choice of timestamp is just using current system timestamp for now
|
/// The choice of timestamp is just using current system timestamp for now
|
||||||
///
|
///
|
||||||
/// TODO(discord9): rename to FlowStreamingEngine
|
pub struct StreamingEngine {
|
||||||
pub struct FlowStreamingEngine {
|
|
||||||
/// The handler to the worker that will run the dataflow
|
/// The handler to the worker that will run the dataflow
|
||||||
/// which is `!Send` so a handle is used
|
/// which is `!Send` so a handle is used
|
||||||
pub worker_handles: Vec<WorkerHandle>,
|
pub worker_handles: Vec<WorkerHandle>,
|
||||||
@@ -171,7 +170,7 @@ pub struct FlowStreamingEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Building FlownodeManager
|
/// Building FlownodeManager
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
/// set frontend invoker
|
/// set frontend invoker
|
||||||
pub async fn set_frontend_invoker(&self, frontend: FrontendInvoker) {
|
pub async fn set_frontend_invoker(&self, frontend: FrontendInvoker) {
|
||||||
*self.frontend_invoker.write().await = Some(frontend);
|
*self.frontend_invoker.write().await = Some(frontend);
|
||||||
@@ -190,7 +189,7 @@ impl FlowStreamingEngine {
|
|||||||
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
|
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
|
||||||
let tick_manager = FlowTickManager::new();
|
let tick_manager = FlowTickManager::new();
|
||||||
let worker_handles = Vec::new();
|
let worker_handles = Vec::new();
|
||||||
FlowStreamingEngine {
|
StreamingEngine {
|
||||||
worker_handles,
|
worker_handles,
|
||||||
worker_selector: Mutex::new(0),
|
worker_selector: Mutex::new(0),
|
||||||
query_engine,
|
query_engine,
|
||||||
@@ -266,7 +265,7 @@ pub fn batches_to_rows_req(batches: Vec<Batch>) -> Result<Vec<DiffRequest>, Erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This impl block contains methods to send writeback requests to frontend
|
/// This impl block contains methods to send writeback requests to frontend
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
/// Return the number of requests it made
|
/// Return the number of requests it made
|
||||||
pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
|
pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
|
||||||
let all_reqs = self.generate_writeback_request().await?;
|
let all_reqs = self.generate_writeback_request().await?;
|
||||||
@@ -537,7 +536,7 @@ impl FlowStreamingEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Flow Runtime related methods
|
/// Flow Runtime related methods
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
/// Start state report handler, which will receive a sender from HeartbeatTask to send state size report back
|
/// Start state report handler, which will receive a sender from HeartbeatTask to send state size report back
|
||||||
///
|
///
|
||||||
/// if heartbeat task is shutdown, this future will exit too
|
/// if heartbeat task is shutdown, this future will exit too
|
||||||
@@ -662,7 +661,7 @@ impl FlowStreamingEngine {
|
|||||||
}
|
}
|
||||||
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
|
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
|
||||||
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
|
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
|
||||||
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_worker_manager.frontend_invoker
|
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_streaming_engine.frontend_invoker
|
||||||
self.frontend_invoker.write().await.take();
|
self.frontend_invoker.write().await.take();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -731,7 +730,7 @@ impl FlowStreamingEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create&Remove flow
|
/// Create&Remove flow
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
/// remove a flow by it's id
|
/// remove a flow by it's id
|
||||||
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
|
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||||
for handle in self.worker_handles.iter() {
|
for handle in self.worker_handles.iter() {
|
||||||
@@ -749,7 +748,6 @@ impl FlowStreamingEngine {
|
|||||||
/// steps to create task:
|
/// steps to create task:
|
||||||
/// 1. parse query into typed plan(and optional parse expire_after expr)
|
/// 1. parse query into typed plan(and optional parse expire_after expr)
|
||||||
/// 2. render source/sink with output table id and used input table id
|
/// 2. render source/sink with output table id and used input table id
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub async fn create_flow_inner(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
pub async fn create_flow_inner(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||||
let CreateFlowArgs {
|
let CreateFlowArgs {
|
||||||
flow_id,
|
flow_id,
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
//! impl `FlowNode` trait for FlowNodeManager so standalone can call them
|
//! impl `FlowNode` trait for FlowNodeManager so standalone can call them
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::flow::{
|
use api::v1::flow::{
|
||||||
@@ -35,13 +36,14 @@ use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
|||||||
use store_api::storage::{RegionId, TableId};
|
use store_api::storage::{RegionId, TableId};
|
||||||
use tokio::sync::{Mutex, RwLock};
|
use tokio::sync::{Mutex, RwLock};
|
||||||
|
|
||||||
use crate::adapter::{CreateFlowArgs, FlowStreamingEngine};
|
use crate::adapter::{CreateFlowArgs, StreamingEngine};
|
||||||
use crate::batching_mode::engine::BatchingEngine;
|
use crate::batching_mode::engine::BatchingEngine;
|
||||||
|
use crate::batching_mode::{FRONTEND_SCAN_TIMEOUT, MIN_REFRESH_DURATION};
|
||||||
use crate::engine::FlowEngine;
|
use crate::engine::FlowEngine;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu, IllegalCheckTaskStateSnafu,
|
CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu, FlowNotRecoveredSnafu,
|
||||||
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, SyncCheckTaskSnafu,
|
IllegalCheckTaskStateSnafu, InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu,
|
||||||
UnexpectedSnafu,
|
NoAvailableFrontendSnafu, SyncCheckTaskSnafu, UnexpectedSnafu,
|
||||||
};
|
};
|
||||||
use crate::metrics::METRIC_FLOW_TASK_COUNT;
|
use crate::metrics::METRIC_FLOW_TASK_COUNT;
|
||||||
use crate::repr::{self, DiffRow};
|
use crate::repr::{self, DiffRow};
|
||||||
@@ -55,18 +57,19 @@ pub type FlowDualEngineRef = Arc<FlowDualEngine>;
|
|||||||
/// including create/drop/flush flow
|
/// including create/drop/flush flow
|
||||||
/// and redirect insert requests to the appropriate engine
|
/// and redirect insert requests to the appropriate engine
|
||||||
pub struct FlowDualEngine {
|
pub struct FlowDualEngine {
|
||||||
streaming_engine: Arc<FlowStreamingEngine>,
|
streaming_engine: Arc<StreamingEngine>,
|
||||||
batching_engine: Arc<BatchingEngine>,
|
batching_engine: Arc<BatchingEngine>,
|
||||||
/// helper struct for faster query flow by table id or vice versa
|
/// helper struct for faster query flow by table id or vice versa
|
||||||
src_table2flow: RwLock<SrcTableToFlow>,
|
src_table2flow: RwLock<SrcTableToFlow>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
catalog_manager: Arc<dyn CatalogManager>,
|
catalog_manager: Arc<dyn CatalogManager>,
|
||||||
check_task: tokio::sync::Mutex<Option<ConsistentCheckTask>>,
|
check_task: tokio::sync::Mutex<Option<ConsistentCheckTask>>,
|
||||||
|
done_recovering: AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowDualEngine {
|
impl FlowDualEngine {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
streaming_engine: Arc<FlowStreamingEngine>,
|
streaming_engine: Arc<StreamingEngine>,
|
||||||
batching_engine: Arc<BatchingEngine>,
|
batching_engine: Arc<BatchingEngine>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
catalog_manager: Arc<dyn CatalogManager>,
|
catalog_manager: Arc<dyn CatalogManager>,
|
||||||
@@ -78,10 +81,61 @@ impl FlowDualEngine {
|
|||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
check_task: Mutex::new(None),
|
check_task: Mutex::new(None),
|
||||||
|
done_recovering: AtomicBool::new(false),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn streaming_engine(&self) -> Arc<FlowStreamingEngine> {
|
/// Set `done_recovering` to true
|
||||||
|
/// indicate that we are ready to handle requests
|
||||||
|
pub fn set_done_recovering(&self) {
|
||||||
|
info!("FlowDualEngine done recovering");
|
||||||
|
self.done_recovering
|
||||||
|
.store(true, std::sync::atomic::Ordering::Release);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if `done_recovering` is true
|
||||||
|
pub fn is_recover_done(&self) -> bool {
|
||||||
|
self.done_recovering
|
||||||
|
.load(std::sync::atomic::Ordering::Acquire)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// wait for recovering to be done, this will only happen when flownode just started
|
||||||
|
async fn wait_for_all_flow_recover(&self, waiting_req_cnt: usize) -> Result<(), Error> {
|
||||||
|
if self.is_recover_done() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
"FlowDualEngine is not done recovering, {} insert request waiting for recovery",
|
||||||
|
waiting_req_cnt
|
||||||
|
);
|
||||||
|
// wait 3 seconds, check every 1 second
|
||||||
|
// TODO(discord9): make this configurable
|
||||||
|
let mut retry = 0;
|
||||||
|
let max_retry = 3;
|
||||||
|
while retry < max_retry && !self.is_recover_done() {
|
||||||
|
warn!(
|
||||||
|
"FlowDualEngine is not done recovering, retry {} in 1s",
|
||||||
|
retry
|
||||||
|
);
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||||
|
retry += 1;
|
||||||
|
}
|
||||||
|
if retry == max_retry {
|
||||||
|
return FlowNotRecoveredSnafu.fail();
|
||||||
|
} else {
|
||||||
|
info!("FlowDualEngine is done recovering");
|
||||||
|
}
|
||||||
|
// TODO(discord9): also put to centralized logging for flow once it implemented
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine if the engine is in distributed mode
|
||||||
|
pub fn is_distributed(&self) -> bool {
|
||||||
|
self.streaming_engine.node_id.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn streaming_engine(&self) -> Arc<StreamingEngine> {
|
||||||
self.streaming_engine.clone()
|
self.streaming_engine.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,6 +143,39 @@ impl FlowDualEngine {
|
|||||||
self.batching_engine.clone()
|
self.batching_engine.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// In distributed mode, scan periodically(1s) until available frontend is found, or timeout,
|
||||||
|
/// in standalone mode, return immediately
|
||||||
|
/// notice here if any frontend appear in cluster info this function will return immediately
|
||||||
|
async fn wait_for_available_frontend(&self, timeout: std::time::Duration) -> Result<(), Error> {
|
||||||
|
if !self.is_distributed() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let frontend_client = self.batching_engine().frontend_client.clone();
|
||||||
|
let sleep_duration = std::time::Duration::from_millis(1_000);
|
||||||
|
let now = std::time::Instant::now();
|
||||||
|
loop {
|
||||||
|
let frontend_list = frontend_client.scan_for_frontend().await?;
|
||||||
|
if !frontend_list.is_empty() {
|
||||||
|
let fe_list = frontend_list
|
||||||
|
.iter()
|
||||||
|
.map(|(_, info)| &info.peer.addr)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
info!("Available frontend found: {:?}", fe_list);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let elapsed = now.elapsed();
|
||||||
|
tokio::time::sleep(sleep_duration).await;
|
||||||
|
info!("Waiting for available frontend, elapsed={:?}", elapsed);
|
||||||
|
if elapsed >= timeout {
|
||||||
|
return NoAvailableFrontendSnafu {
|
||||||
|
timeout,
|
||||||
|
context: "No available frontend found in cluster info",
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Try to sync with check task, this is only used in drop flow&flush flow, so a flow id is required
|
/// Try to sync with check task, this is only used in drop flow&flush flow, so a flow id is required
|
||||||
///
|
///
|
||||||
/// the need to sync is to make sure flush flow actually get called
|
/// the need to sync is to make sure flush flow actually get called
|
||||||
@@ -196,7 +283,7 @@ impl FlowDualEngine {
|
|||||||
to_be_created
|
to_be_created
|
||||||
);
|
);
|
||||||
let mut errors = vec![];
|
let mut errors = vec![];
|
||||||
for flow_id in to_be_created {
|
for flow_id in to_be_created.clone() {
|
||||||
let flow_id = *flow_id;
|
let flow_id = *flow_id;
|
||||||
let info = self
|
let info = self
|
||||||
.flow_metadata_manager
|
.flow_metadata_manager
|
||||||
@@ -225,11 +312,24 @@ impl FlowDualEngine {
|
|||||||
comment: Some(info.comment().clone()),
|
comment: Some(info.comment().clone()),
|
||||||
sql: info.raw_sql().clone(),
|
sql: info.raw_sql().clone(),
|
||||||
flow_options: info.options().clone(),
|
flow_options: info.options().clone(),
|
||||||
query_ctx: Some(
|
query_ctx: info
|
||||||
QueryContextBuilder::default()
|
.query_context()
|
||||||
.current_catalog(info.catalog_name().clone())
|
.clone()
|
||||||
.build(),
|
.map(|ctx| {
|
||||||
),
|
ctx.try_into()
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(ExternalSnafu)
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
// or use default QueryContext with catalog_name from info
|
||||||
|
// to keep compatibility with old version
|
||||||
|
.or_else(|| {
|
||||||
|
Some(
|
||||||
|
QueryContextBuilder::default()
|
||||||
|
.current_catalog(info.catalog_name().to_string())
|
||||||
|
.build(),
|
||||||
|
)
|
||||||
|
}),
|
||||||
};
|
};
|
||||||
if let Err(err) = self
|
if let Err(err) = self
|
||||||
.create_flow(args)
|
.create_flow(args)
|
||||||
@@ -242,12 +342,16 @@ impl FlowDualEngine {
|
|||||||
errors.push((flow_id, err));
|
errors.push((flow_id, err));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if errors.is_empty() {
|
||||||
|
info!("Recover flows successfully, flows: {:?}", to_be_created);
|
||||||
|
}
|
||||||
|
|
||||||
for (flow_id, err) in errors {
|
for (flow_id, err) in errors {
|
||||||
warn!("Failed to recreate flow {}, err={:#?}", flow_id, err);
|
warn!("Failed to recreate flow {}, err={:#?}", flow_id, err);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
warn!(
|
warn!(
|
||||||
"Flownode {:?} found flows not exist in flownode, flow_ids={:?}",
|
"Flows do not exist in flownode for node {:?}, flow_ids={:?}",
|
||||||
nodeid, to_be_created
|
nodeid, to_be_created
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -267,7 +371,7 @@ impl FlowDualEngine {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
warn!(
|
warn!(
|
||||||
"Flownode {:?} found flows not exist in flownode, flow_ids={:?}",
|
"Flows do not exist in metadata for node {:?}, flow_ids={:?}",
|
||||||
nodeid, to_be_dropped
|
nodeid, to_be_dropped
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -300,11 +404,12 @@ impl FlowDualEngine {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
check_task.take().expect("Already checked").stop().await?;
|
check_task.take().unwrap().stop().await?;
|
||||||
info!("Stopped flow consistent check task");
|
info!("Stopped flow consistent check task");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TODO(discord9): also add a `exists` api using flow metadata manager's `exists` method
|
||||||
async fn flow_exist_in_metadata(&self, flow_id: FlowId) -> Result<bool, Error> {
|
async fn flow_exist_in_metadata(&self, flow_id: FlowId) -> Result<bool, Error> {
|
||||||
self.flow_metadata_manager
|
self.flow_metadata_manager
|
||||||
.flow_info_manager()
|
.flow_info_manager()
|
||||||
@@ -324,31 +429,52 @@ struct ConsistentCheckTask {
|
|||||||
|
|
||||||
impl ConsistentCheckTask {
|
impl ConsistentCheckTask {
|
||||||
async fn start_check_task(engine: &Arc<FlowDualEngine>) -> Result<Self, Error> {
|
async fn start_check_task(engine: &Arc<FlowDualEngine>) -> Result<Self, Error> {
|
||||||
// first do recover flows
|
let engine = engine.clone();
|
||||||
engine.check_flow_consistent(true, false).await?;
|
|
||||||
|
|
||||||
let inner = engine.clone();
|
|
||||||
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
|
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
|
||||||
let (trigger_tx, mut trigger_rx) =
|
let (trigger_tx, mut trigger_rx) =
|
||||||
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
|
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
|
||||||
let handle = common_runtime::spawn_global(async move {
|
let handle = common_runtime::spawn_global(async move {
|
||||||
let mut args = (false, false);
|
// first check if available frontend is found
|
||||||
|
if let Err(err) = engine
|
||||||
|
.wait_for_available_frontend(FRONTEND_SCAN_TIMEOUT)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
warn!("No frontend is available yet:\n {err:?}");
|
||||||
|
}
|
||||||
|
|
||||||
|
// then do recover flows, if failed, always retry
|
||||||
|
let mut recover_retry = 0;
|
||||||
|
while let Err(err) = engine.check_flow_consistent(true, false).await {
|
||||||
|
recover_retry += 1;
|
||||||
|
error!(
|
||||||
|
"Failed to recover flows:\n {err:?}, retry {} in {}s",
|
||||||
|
recover_retry,
|
||||||
|
MIN_REFRESH_DURATION.as_secs()
|
||||||
|
);
|
||||||
|
tokio::time::sleep(MIN_REFRESH_DURATION).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
engine.set_done_recovering();
|
||||||
|
|
||||||
|
// then do check flows, with configurable allow_create and allow_drop
|
||||||
|
let (mut allow_create, mut allow_drop) = (false, false);
|
||||||
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
|
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
|
||||||
loop {
|
loop {
|
||||||
if let Err(err) = inner.check_flow_consistent(args.0, args.1).await {
|
if let Err(err) = engine.check_flow_consistent(allow_create, allow_drop).await {
|
||||||
error!(err; "Failed to check flow consistent");
|
error!(err; "Failed to check flow consistent");
|
||||||
}
|
}
|
||||||
if let Some(done) = ret_signal.take() {
|
if let Some(done) = ret_signal.take() {
|
||||||
let _ = done.send(());
|
let _ = done.send(());
|
||||||
}
|
}
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = rx.recv() => break,
|
_ = rx.recv() => break,
|
||||||
incoming = trigger_rx.recv() => if let Some(incoming) = incoming {
|
incoming = trigger_rx.recv() => if let Some(incoming) = incoming {
|
||||||
args = (incoming.0, incoming.1);
|
(allow_create, allow_drop) = (incoming.0, incoming.1);
|
||||||
ret_signal = Some(incoming.2);
|
ret_signal = Some(incoming.2);
|
||||||
},
|
},
|
||||||
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => args=(false,false),
|
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => {
|
||||||
|
(allow_create, allow_drop) = (false, false);
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -519,7 +645,12 @@ impl FlowEngine for FlowDualEngine {
|
|||||||
match flow_type {
|
match flow_type {
|
||||||
Some(FlowType::Batching) => self.batching_engine.flush_flow(flow_id).await,
|
Some(FlowType::Batching) => self.batching_engine.flush_flow(flow_id).await,
|
||||||
Some(FlowType::Streaming) => self.streaming_engine.flush_flow(flow_id).await,
|
Some(FlowType::Streaming) => self.streaming_engine.flush_flow(flow_id).await,
|
||||||
None => Ok(0),
|
None => {
|
||||||
|
warn!(
|
||||||
|
"Currently flow={flow_id} doesn't exist in flownode, ignore flush_flow request"
|
||||||
|
);
|
||||||
|
Ok(0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,11 +675,14 @@ impl FlowEngine for FlowDualEngine {
|
|||||||
&self,
|
&self,
|
||||||
request: api::v1::region::InsertRequests,
|
request: api::v1::region::InsertRequests,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
self.wait_for_all_flow_recover(request.requests.len())
|
||||||
|
.await?;
|
||||||
// TODO(discord9): make as little clone as possible
|
// TODO(discord9): make as little clone as possible
|
||||||
let mut to_stream_engine = Vec::with_capacity(request.requests.len());
|
let mut to_stream_engine = Vec::with_capacity(request.requests.len());
|
||||||
let mut to_batch_engine = request.requests;
|
let mut to_batch_engine = request.requests;
|
||||||
|
|
||||||
{
|
{
|
||||||
|
// not locking this, or recover flows will be starved when also handling flow inserts
|
||||||
let src_table2flow = self.src_table2flow.read().await;
|
let src_table2flow = self.src_table2flow.read().await;
|
||||||
to_batch_engine.retain(|req| {
|
to_batch_engine.retain(|req| {
|
||||||
let region_id = RegionId::from(req.region_id);
|
let region_id = RegionId::from(req.region_id);
|
||||||
@@ -684,15 +818,23 @@ fn to_meta_err(
|
|||||||
location: snafu::Location,
|
location: snafu::Location,
|
||||||
) -> impl FnOnce(crate::error::Error) -> common_meta::error::Error {
|
) -> impl FnOnce(crate::error::Error) -> common_meta::error::Error {
|
||||||
move |err: crate::error::Error| -> common_meta::error::Error {
|
move |err: crate::error::Error| -> common_meta::error::Error {
|
||||||
common_meta::error::Error::External {
|
match err {
|
||||||
location,
|
crate::error::Error::FlowNotFound { id, .. } => {
|
||||||
source: BoxedError::new(err),
|
common_meta::error::Error::FlowNotFound {
|
||||||
|
flow_name: format!("flow_id={id}"),
|
||||||
|
location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => common_meta::error::Error::External {
|
||||||
|
location,
|
||||||
|
source: BoxedError::new(err),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl common_meta::node_manager::Flownode for FlowStreamingEngine {
|
impl common_meta::node_manager::Flownode for StreamingEngine {
|
||||||
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
|
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
|
||||||
let query_ctx = request
|
let query_ctx = request
|
||||||
.header
|
.header
|
||||||
@@ -778,7 +920,7 @@ impl common_meta::node_manager::Flownode for FlowStreamingEngine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowEngine for FlowStreamingEngine {
|
impl FlowEngine for StreamingEngine {
|
||||||
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||||
self.create_flow_inner(args).await
|
self.create_flow_inner(args).await
|
||||||
}
|
}
|
||||||
@@ -830,7 +972,7 @@ impl FetchFromRow {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
async fn handle_inserts_inner(
|
async fn handle_inserts_inner(
|
||||||
&self,
|
&self,
|
||||||
request: InsertRequests,
|
request: InsertRequests,
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ use snafu::{ensure, OptionExt, ResultExt};
|
|||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
|
||||||
use crate::adapter::table_source::ManagedTableSource;
|
use crate::adapter::table_source::ManagedTableSource;
|
||||||
use crate::adapter::{FlowId, FlowStreamingEngine, FlowWorkerManagerRef};
|
use crate::adapter::{FlowId, FlowStreamingEngineRef, StreamingEngine};
|
||||||
use crate::error::{FlowNotFoundSnafu, JoinTaskSnafu, UnexpectedSnafu};
|
use crate::error::{FlowNotFoundSnafu, JoinTaskSnafu, UnexpectedSnafu};
|
||||||
use crate::expr::error::ExternalSnafu;
|
use crate::expr::error::ExternalSnafu;
|
||||||
use crate::expr::utils::find_plan_time_window_expr_lower_bound;
|
use crate::expr::utils::find_plan_time_window_expr_lower_bound;
|
||||||
@@ -39,10 +39,10 @@ use crate::repr::RelationDesc;
|
|||||||
use crate::server::get_all_flow_ids;
|
use crate::server::get_all_flow_ids;
|
||||||
use crate::{Error, FrontendInvoker};
|
use crate::{Error, FrontendInvoker};
|
||||||
|
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
/// Create and start refill flow tasks in background
|
/// Create and start refill flow tasks in background
|
||||||
pub async fn create_and_start_refill_flow_tasks(
|
pub async fn create_and_start_refill_flow_tasks(
|
||||||
self: &FlowWorkerManagerRef,
|
self: &FlowStreamingEngineRef,
|
||||||
flow_metadata_manager: &FlowMetadataManagerRef,
|
flow_metadata_manager: &FlowMetadataManagerRef,
|
||||||
catalog_manager: &CatalogManagerRef,
|
catalog_manager: &CatalogManagerRef,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@@ -130,7 +130,7 @@ impl FlowStreamingEngine {
|
|||||||
|
|
||||||
/// Starting to refill flows, if any error occurs, will rebuild the flow and retry
|
/// Starting to refill flows, if any error occurs, will rebuild the flow and retry
|
||||||
pub(crate) async fn starting_refill_flows(
|
pub(crate) async fn starting_refill_flows(
|
||||||
self: &FlowWorkerManagerRef,
|
self: &FlowStreamingEngineRef,
|
||||||
tasks: Vec<RefillTask>,
|
tasks: Vec<RefillTask>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// TODO(discord9): add a back pressure mechanism
|
// TODO(discord9): add a back pressure mechanism
|
||||||
@@ -266,7 +266,7 @@ impl TaskState<()> {
|
|||||||
fn start_running(
|
fn start_running(
|
||||||
&mut self,
|
&mut self,
|
||||||
task_data: &TaskData,
|
task_data: &TaskData,
|
||||||
manager: FlowWorkerManagerRef,
|
manager: FlowStreamingEngineRef,
|
||||||
mut output_stream: SendableRecordBatchStream,
|
mut output_stream: SendableRecordBatchStream,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let data = (*task_data).clone();
|
let data = (*task_data).clone();
|
||||||
@@ -383,7 +383,7 @@ impl RefillTask {
|
|||||||
/// Start running the task in background, non-blocking
|
/// Start running the task in background, non-blocking
|
||||||
pub async fn start_running(
|
pub async fn start_running(
|
||||||
&mut self,
|
&mut self,
|
||||||
manager: FlowWorkerManagerRef,
|
manager: FlowStreamingEngineRef,
|
||||||
invoker: &FrontendInvoker,
|
invoker: &FrontendInvoker,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let TaskState::Prepared { sql } = &mut self.state else {
|
let TaskState::Prepared { sql } = &mut self.state else {
|
||||||
|
|||||||
@@ -16,9 +16,9 @@ use std::collections::BTreeMap;
|
|||||||
|
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
|
|
||||||
use crate::FlowStreamingEngine;
|
use crate::StreamingEngine;
|
||||||
|
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
pub async fn gen_state_report(&self) -> FlowStat {
|
pub async fn gen_state_report(&self) -> FlowStat {
|
||||||
let mut full_report = BTreeMap::new();
|
let mut full_report = BTreeMap::new();
|
||||||
let mut last_exec_time_map = BTreeMap::new();
|
let mut last_exec_time_map = BTreeMap::new();
|
||||||
|
|||||||
@@ -33,8 +33,8 @@ use crate::adapter::table_source::TableDesc;
|
|||||||
use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||||
use crate::FlowStreamingEngine;
|
use crate::StreamingEngine;
|
||||||
impl FlowStreamingEngine {
|
impl StreamingEngine {
|
||||||
/// Get a worker handle for creating flow, using round robin to select a worker
|
/// Get a worker handle for creating flow, using round robin to select a worker
|
||||||
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
|
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
|
||||||
let use_idx = {
|
let use_idx = {
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
use enum_as_inner::EnumAsInner;
|
use enum_as_inner::EnumAsInner;
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
|
||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
use tokio::sync::{broadcast, mpsc, oneshot, Mutex};
|
use tokio::sync::{broadcast, mpsc, oneshot, Mutex};
|
||||||
|
|
||||||
@@ -49,9 +49,9 @@ pub fn create_worker<'a>() -> (WorkerHandle, Worker<'a>) {
|
|||||||
(worker_handle, worker)
|
(worker_handle, worker)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ActiveDataflowState is a wrapper around `Hydroflow` and `DataflowState`
|
/// ActiveDataflowState is a wrapper around `Dfir` and `DataflowState`
|
||||||
pub(crate) struct ActiveDataflowState<'subgraph> {
|
pub(crate) struct ActiveDataflowState<'subgraph> {
|
||||||
df: Hydroflow<'subgraph>,
|
df: Dfir<'subgraph>,
|
||||||
state: DataflowState,
|
state: DataflowState,
|
||||||
err_collector: ErrCollector,
|
err_collector: ErrCollector,
|
||||||
}
|
}
|
||||||
@@ -59,7 +59,7 @@ pub(crate) struct ActiveDataflowState<'subgraph> {
|
|||||||
impl std::fmt::Debug for ActiveDataflowState<'_> {
|
impl std::fmt::Debug for ActiveDataflowState<'_> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.debug_struct("ActiveDataflowState")
|
f.debug_struct("ActiveDataflowState")
|
||||||
.field("df", &"<Hydroflow>")
|
.field("df", &"<Dfir>")
|
||||||
.field("state", &self.state)
|
.field("state", &self.state)
|
||||||
.field("err_collector", &self.err_collector)
|
.field("err_collector", &self.err_collector)
|
||||||
.finish()
|
.finish()
|
||||||
@@ -69,7 +69,7 @@ impl std::fmt::Debug for ActiveDataflowState<'_> {
|
|||||||
impl Default for ActiveDataflowState<'_> {
|
impl Default for ActiveDataflowState<'_> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
ActiveDataflowState {
|
ActiveDataflowState {
|
||||||
df: Hydroflow::new(),
|
df: Dfir::new(),
|
||||||
state: DataflowState::default(),
|
state: DataflowState::default(),
|
||||||
err_collector: ErrCollector::default(),
|
err_collector: ErrCollector::default(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,4 +31,19 @@ pub const DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(
|
|||||||
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
/// The minimum duration between two queries execution by batching mode task
|
/// The minimum duration between two queries execution by batching mode task
|
||||||
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
pub const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
||||||
|
|
||||||
|
/// Grpc connection timeout
|
||||||
|
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
/// Grpc max retry number
|
||||||
|
const GRPC_MAX_RETRIES: u32 = 3;
|
||||||
|
|
||||||
|
/// Flow wait for available frontend timeout,
|
||||||
|
/// if failed to find available frontend after FRONTEND_SCAN_TIMEOUT elapsed, return error
|
||||||
|
/// which should prevent flownode from starting
|
||||||
|
pub const FRONTEND_SCAN_TIMEOUT: Duration = Duration::from_secs(30);
|
||||||
|
|
||||||
|
/// Frontend activity timeout
|
||||||
|
/// if frontend is down(not sending heartbeat) for more than FRONTEND_ACTIVITY_TIMEOUT, it will be removed from the list that flownode use to connect
|
||||||
|
pub const FRONTEND_ACTIVITY_TIMEOUT: Duration = Duration::from_secs(60);
|
||||||
|
|||||||
@@ -39,7 +39,8 @@ use crate::batching_mode::time_window::{find_time_window_expr, TimeWindowExpr};
|
|||||||
use crate::batching_mode::utils::sql_to_df_plan;
|
use crate::batching_mode::utils::sql_to_df_plan;
|
||||||
use crate::engine::FlowEngine;
|
use crate::engine::FlowEngine;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
ExternalSnafu, FlowAlreadyExistSnafu, TableNotFoundMetaSnafu, UnexpectedSnafu, UnsupportedSnafu,
|
ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, TableNotFoundMetaSnafu,
|
||||||
|
UnexpectedSnafu, UnsupportedSnafu,
|
||||||
};
|
};
|
||||||
use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
||||||
|
|
||||||
@@ -49,7 +50,8 @@ use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
|||||||
pub struct BatchingEngine {
|
pub struct BatchingEngine {
|
||||||
tasks: RwLock<BTreeMap<FlowId, BatchingTask>>,
|
tasks: RwLock<BTreeMap<FlowId, BatchingTask>>,
|
||||||
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
|
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
|
||||||
frontend_client: Arc<FrontendClient>,
|
/// frontend client for insert request
|
||||||
|
pub(crate) frontend_client: Arc<FrontendClient>,
|
||||||
flow_metadata_manager: FlowMetadataManagerRef,
|
flow_metadata_manager: FlowMetadataManagerRef,
|
||||||
table_meta: TableMetadataManagerRef,
|
table_meta: TableMetadataManagerRef,
|
||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
@@ -267,7 +269,8 @@ impl BatchingEngine {
|
|||||||
// also check table option to see if ttl!=instant
|
// also check table option to see if ttl!=instant
|
||||||
let table_name = get_table_name(self.table_meta.table_info_manager(), &src_id).await?;
|
let table_name = get_table_name(self.table_meta.table_info_manager(), &src_id).await?;
|
||||||
let table_info = get_table_info(self.table_meta.table_info_manager(), &src_id).await?;
|
let table_info = get_table_info(self.table_meta.table_info_manager(), &src_id).await?;
|
||||||
if table_info.table_info.meta.options.ttl == Some(TimeToLive::Instant) {
|
ensure!(
|
||||||
|
table_info.table_info.meta.options.ttl != Some(TimeToLive::Instant),
|
||||||
UnsupportedSnafu {
|
UnsupportedSnafu {
|
||||||
reason: format!(
|
reason: format!(
|
||||||
"Source table `{}`(id={}) has instant TTL, Instant TTL is not supported under batching mode. Consider using a TTL longer than flush interval",
|
"Source table `{}`(id={}) has instant TTL, Instant TTL is not supported under batching mode. Consider using a TTL longer than flush interval",
|
||||||
@@ -275,8 +278,8 @@ impl BatchingEngine {
|
|||||||
src_id
|
src_id
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
.fail()?;
|
);
|
||||||
}
|
|
||||||
source_table_names.push(table_name);
|
source_table_names.push(table_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -301,7 +304,7 @@ impl BatchingEngine {
|
|||||||
})
|
})
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
info!(
|
debug!(
|
||||||
"Flow id={}, found time window expr={}",
|
"Flow id={}, found time window expr={}",
|
||||||
flow_id,
|
flow_id,
|
||||||
phy_expr
|
phy_expr
|
||||||
@@ -328,7 +331,7 @@ impl BatchingEngine {
|
|||||||
let frontend = self.frontend_client.clone();
|
let frontend = self.frontend_client.clone();
|
||||||
|
|
||||||
// check execute once first to detect any error early
|
// check execute once first to detect any error early
|
||||||
task.check_execute(&engine, &frontend).await?;
|
task.check_or_create_sink_table(&engine, &frontend).await?;
|
||||||
|
|
||||||
// TODO(discord9): use time wheel or what for better
|
// TODO(discord9): use time wheel or what for better
|
||||||
let handle = common_runtime::spawn_global(async move {
|
let handle = common_runtime::spawn_global(async move {
|
||||||
@@ -347,7 +350,8 @@ impl BatchingEngine {
|
|||||||
|
|
||||||
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
|
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||||
if self.tasks.write().await.remove(&flow_id).is_none() {
|
if self.tasks.write().await.remove(&flow_id).is_none() {
|
||||||
warn!("Flow {flow_id} not found in tasks")
|
warn!("Flow {flow_id} not found in tasks");
|
||||||
|
FlowNotFoundSnafu { id: flow_id }.fail()?;
|
||||||
}
|
}
|
||||||
let Some(tx) = self.shutdown_txs.write().await.remove(&flow_id) else {
|
let Some(tx) = self.shutdown_txs.write().await.remove(&flow_id) else {
|
||||||
UnexpectedSnafu {
|
UnexpectedSnafu {
|
||||||
@@ -364,9 +368,7 @@ impl BatchingEngine {
|
|||||||
pub async fn flush_flow_inner(&self, flow_id: FlowId) -> Result<usize, Error> {
|
pub async fn flush_flow_inner(&self, flow_id: FlowId) -> Result<usize, Error> {
|
||||||
debug!("Try flush flow {flow_id}");
|
debug!("Try flush flow {flow_id}");
|
||||||
let task = self.tasks.read().await.get(&flow_id).cloned();
|
let task = self.tasks.read().await.get(&flow_id).cloned();
|
||||||
let task = task.with_context(|| UnexpectedSnafu {
|
let task = task.with_context(|| FlowNotFoundSnafu { id: flow_id })?;
|
||||||
reason: format!("Can't found task for flow {flow_id}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
task.mark_all_windows_as_dirty()?;
|
task.mark_all_windows_as_dirty()?;
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::time::SystemTime;
|
||||||
|
|
||||||
use api::v1::greptime_request::Request;
|
use api::v1::greptime_request::Request;
|
||||||
use api::v1::CreateTableExpr;
|
use api::v1::CreateTableExpr;
|
||||||
@@ -25,13 +26,19 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
|||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
|
use common_telemetry::warn;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
|
use rand::rng;
|
||||||
|
use rand::seq::SliceRandom;
|
||||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::batching_mode::DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT;
|
use crate::batching_mode::{
|
||||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, UnexpectedSnafu};
|
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
||||||
|
GRPC_MAX_RETRIES,
|
||||||
|
};
|
||||||
|
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
||||||
use crate::Error;
|
use crate::Error;
|
||||||
|
|
||||||
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
||||||
@@ -79,7 +86,6 @@ pub enum FrontendClient {
|
|||||||
Standalone {
|
Standalone {
|
||||||
/// for the sake of simplicity still use grpc even in standalone mode
|
/// for the sake of simplicity still use grpc even in standalone mode
|
||||||
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
|
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
|
||||||
/// TODO(discord9): not use grpc under standalone mode
|
|
||||||
database_client: HandlerMutable,
|
database_client: HandlerMutable,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -100,7 +106,9 @@ impl FrontendClient {
|
|||||||
Self::Distributed {
|
Self::Distributed {
|
||||||
meta_client,
|
meta_client,
|
||||||
chnl_mgr: {
|
chnl_mgr: {
|
||||||
let cfg = ChannelConfig::new().timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
|
let cfg = ChannelConfig::new()
|
||||||
|
.connect_timeout(GRPC_CONN_TIMEOUT)
|
||||||
|
.timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
|
||||||
ChannelManager::with_config(cfg)
|
ChannelManager::with_config(cfg)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -123,10 +131,24 @@ impl DatabaseWithPeer {
|
|||||||
fn new(database: Database, peer: Peer) -> Self {
|
fn new(database: Database, peer: Peer) -> Self {
|
||||||
Self { database, peer }
|
Self { database, peer }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Try sending a "SELECT 1" to the database
|
||||||
|
async fn try_select_one(&self) -> Result<(), Error> {
|
||||||
|
// notice here use `sql` for `SELECT 1` return 1 row
|
||||||
|
let _ = self
|
||||||
|
.database
|
||||||
|
.sql("SELECT 1")
|
||||||
|
.await
|
||||||
|
.with_context(|_| InvalidRequestSnafu {
|
||||||
|
context: format!("Failed to handle `SELECT 1` request at {:?}", self.peer),
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FrontendClient {
|
impl FrontendClient {
|
||||||
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
|
/// scan for available frontend from metadata
|
||||||
|
pub(crate) async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
|
||||||
let Self::Distributed { meta_client, .. } = self else {
|
let Self::Distributed { meta_client, .. } = self else {
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
@@ -156,8 +178,9 @@ impl FrontendClient {
|
|||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the database with max `last_activity_ts`
|
/// Get the frontend with recent enough(less than 1 minute from now) `last_activity_ts`
|
||||||
async fn get_last_active_frontend(
|
/// and is able to process query
|
||||||
|
async fn get_random_active_frontend(
|
||||||
&self,
|
&self,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
schema: &str,
|
schema: &str,
|
||||||
@@ -173,22 +196,50 @@ impl FrontendClient {
|
|||||||
.fail();
|
.fail();
|
||||||
};
|
};
|
||||||
|
|
||||||
let frontends = self.scan_for_frontend().await?;
|
let mut interval = tokio::time::interval(GRPC_CONN_TIMEOUT);
|
||||||
let mut peer = None;
|
interval.tick().await;
|
||||||
|
for retry in 0..GRPC_MAX_RETRIES {
|
||||||
|
let mut frontends = self.scan_for_frontend().await?;
|
||||||
|
let now_in_ms = SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis() as i64;
|
||||||
|
// shuffle the frontends to avoid always pick the same one
|
||||||
|
frontends.shuffle(&mut rng());
|
||||||
|
|
||||||
if let Some((_, val)) = frontends.iter().max_by_key(|(_, val)| val.last_activity_ts) {
|
// found node with maximum last_activity_ts
|
||||||
peer = Some(val.peer.clone());
|
for (_, node_info) in frontends
|
||||||
|
.iter()
|
||||||
|
// filter out frontend that have been down for more than 1 min
|
||||||
|
.filter(|(_, node_info)| {
|
||||||
|
node_info.last_activity_ts + FRONTEND_ACTIVITY_TIMEOUT.as_millis() as i64
|
||||||
|
> now_in_ms
|
||||||
|
})
|
||||||
|
{
|
||||||
|
let addr = &node_info.peer.addr;
|
||||||
|
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![addr.clone()]);
|
||||||
|
let database = Database::new(catalog, schema, client);
|
||||||
|
let db = DatabaseWithPeer::new(database, node_info.peer.clone());
|
||||||
|
match db.try_select_one().await {
|
||||||
|
Ok(_) => return Ok(db),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Failed to connect to frontend {} on retry={}: \n{e:?}",
|
||||||
|
addr, retry
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// no available frontend
|
||||||
|
// sleep and retry
|
||||||
|
interval.tick().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(peer) = peer else {
|
NoAvailableFrontendSnafu {
|
||||||
UnexpectedSnafu {
|
timeout: GRPC_CONN_TIMEOUT,
|
||||||
reason: format!("No frontend available: {:?}", frontends),
|
context: "No available frontend found that is able to process query",
|
||||||
}
|
}
|
||||||
.fail()?
|
.fail()
|
||||||
};
|
|
||||||
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![peer.addr.clone()]);
|
|
||||||
let database = Database::new(catalog, schema, client);
|
|
||||||
Ok(DatabaseWithPeer::new(database, peer))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create(
|
pub async fn create(
|
||||||
@@ -218,17 +269,17 @@ impl FrontendClient {
|
|||||||
) -> Result<u32, Error> {
|
) -> Result<u32, Error> {
|
||||||
match self {
|
match self {
|
||||||
FrontendClient::Distributed { .. } => {
|
FrontendClient::Distributed { .. } => {
|
||||||
let db = self.get_last_active_frontend(catalog, schema).await?;
|
let db = self.get_random_active_frontend(catalog, schema).await?;
|
||||||
|
|
||||||
*peer_desc = Some(PeerDesc::Dist {
|
*peer_desc = Some(PeerDesc::Dist {
|
||||||
peer: db.peer.clone(),
|
peer: db.peer.clone(),
|
||||||
});
|
});
|
||||||
|
|
||||||
db.database
|
db.database
|
||||||
.handle(req.clone())
|
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
||||||
.await
|
.await
|
||||||
.with_context(|_| InvalidRequestSnafu {
|
.with_context(|_| InvalidRequestSnafu {
|
||||||
context: format!("Failed to handle request: {:?}", req),
|
context: format!("Failed to handle request at {:?}: {:?}", db.peer, req),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
FrontendClient::Standalone { database_client } => {
|
FrontendClient::Standalone { database_client } => {
|
||||||
|
|||||||
@@ -71,18 +71,33 @@ impl TaskState {
|
|||||||
self.last_update_time = Instant::now();
|
self.last_update_time = Instant::now();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// wait for at least `last_query_duration`, at most `max_timeout` to start next query
|
/// Compute the next query delay based on the time window size or the last query duration.
|
||||||
|
/// Aiming to avoid too frequent queries. But also not too long delay.
|
||||||
|
/// The delay is computed as follows:
|
||||||
|
/// - If `time_window_size` is set, the delay is half the time window size, constrained to be
|
||||||
|
/// at least `last_query_duration` and at most `max_timeout`.
|
||||||
|
/// - If `time_window_size` is not set, the delay defaults to `last_query_duration`, constrained
|
||||||
|
/// to be at least `MIN_REFRESH_DURATION` and at most `max_timeout`.
|
||||||
///
|
///
|
||||||
/// if have more dirty time window, exec next query immediately
|
/// If there are dirty time windows, the function returns an immediate execution time to clean them.
|
||||||
|
/// TODO: Make this behavior configurable.
|
||||||
pub fn get_next_start_query_time(
|
pub fn get_next_start_query_time(
|
||||||
&self,
|
&self,
|
||||||
flow_id: FlowId,
|
flow_id: FlowId,
|
||||||
|
time_window_size: &Option<Duration>,
|
||||||
max_timeout: Option<Duration>,
|
max_timeout: Option<Duration>,
|
||||||
) -> Instant {
|
) -> Instant {
|
||||||
let next_duration = max_timeout
|
let last_duration = max_timeout
|
||||||
.unwrap_or(self.last_query_duration)
|
.unwrap_or(self.last_query_duration)
|
||||||
.min(self.last_query_duration);
|
.min(self.last_query_duration)
|
||||||
let next_duration = next_duration.max(MIN_REFRESH_DURATION);
|
.max(MIN_REFRESH_DURATION);
|
||||||
|
|
||||||
|
let next_duration = time_window_size
|
||||||
|
.map(|t| {
|
||||||
|
let half = t / 2;
|
||||||
|
half.max(last_duration)
|
||||||
|
})
|
||||||
|
.unwrap_or(last_duration);
|
||||||
|
|
||||||
// if have dirty time window, execute immediately to clean dirty time window
|
// if have dirty time window, execute immediately to clean dirty time window
|
||||||
if self.dirty_time_windows.windows.is_empty() {
|
if self.dirty_time_windows.windows.is_empty() {
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ use operator::expr_helper::column_schemas_to_defs;
|
|||||||
use query::query_engine::DefaultSerializer;
|
use query::query_engine::DefaultSerializer;
|
||||||
use query::QueryEngineRef;
|
use query::QueryEngineRef;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
use tokio::sync::oneshot::error::TryRecvError;
|
use tokio::sync::oneshot::error::TryRecvError;
|
||||||
@@ -53,6 +53,7 @@ use crate::batching_mode::utils::{
|
|||||||
use crate::batching_mode::{
|
use crate::batching_mode::{
|
||||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, MIN_REFRESH_DURATION, SLOW_QUERY_THRESHOLD,
|
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, MIN_REFRESH_DURATION, SLOW_QUERY_THRESHOLD,
|
||||||
};
|
};
|
||||||
|
use crate::df_optimizer::apply_df_optimizer;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu,
|
ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu,
|
||||||
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
||||||
@@ -141,26 +142,12 @@ impl BatchingTask {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test execute, for check syntax or such
|
/// Create sink table if not exists
|
||||||
pub async fn check_execute(
|
pub async fn check_or_create_sink_table(
|
||||||
&self,
|
&self,
|
||||||
engine: &QueryEngineRef,
|
engine: &QueryEngineRef,
|
||||||
frontend_client: &Arc<FrontendClient>,
|
frontend_client: &Arc<FrontendClient>,
|
||||||
) -> Result<Option<(u32, Duration)>, Error> {
|
) -> Result<Option<(u32, Duration)>, Error> {
|
||||||
// use current time to test get a dirty time window, which should be safe
|
|
||||||
let start = SystemTime::now();
|
|
||||||
let ts = Timestamp::new_second(
|
|
||||||
start
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("Time went backwards")
|
|
||||||
.as_secs() as _,
|
|
||||||
);
|
|
||||||
self.state
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.dirty_time_windows
|
|
||||||
.add_lower_bounds(vec![ts].into_iter());
|
|
||||||
|
|
||||||
if !self.is_table_exist(&self.config.sink_table_name).await? {
|
if !self.is_table_exist(&self.config.sink_table_name).await? {
|
||||||
let create_table = self.gen_create_table_expr(engine.clone()).await?;
|
let create_table = self.gen_create_table_expr(engine.clone()).await?;
|
||||||
info!(
|
info!(
|
||||||
@@ -173,7 +160,8 @@ impl BatchingTask {
|
|||||||
self.config.sink_table_name.join(".")
|
self.config.sink_table_name.join(".")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.gen_exec_once(engine, frontend_client).await
|
|
||||||
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn is_table_exist(&self, table_name: &[String; 3]) -> Result<bool, Error> {
|
async fn is_table_exist(&self, table_name: &[String; 3]) -> Result<bool, Error> {
|
||||||
@@ -191,7 +179,7 @@ impl BatchingTask {
|
|||||||
frontend_client: &Arc<FrontendClient>,
|
frontend_client: &Arc<FrontendClient>,
|
||||||
) -> Result<Option<(u32, Duration)>, Error> {
|
) -> Result<Option<(u32, Duration)>, Error> {
|
||||||
if let Some(new_query) = self.gen_insert_plan(engine).await? {
|
if let Some(new_query) = self.gen_insert_plan(engine).await? {
|
||||||
debug!("Generate new query: {:#?}", new_query);
|
debug!("Generate new query: {}", new_query);
|
||||||
self.execute_logical_plan(frontend_client, &new_query).await
|
self.execute_logical_plan(frontend_client, &new_query).await
|
||||||
} else {
|
} else {
|
||||||
debug!("Generate no query");
|
debug!("Generate no query");
|
||||||
@@ -222,15 +210,15 @@ impl BatchingTask {
|
|||||||
.map(|c| c.name)
|
.map(|c| c.name)
|
||||||
.collect::<BTreeSet<_>>();
|
.collect::<BTreeSet<_>>();
|
||||||
for column in new_query.schema().columns() {
|
for column in new_query.schema().columns() {
|
||||||
if !table_columns.contains(column.name()) {
|
ensure!(
|
||||||
return InvalidQuerySnafu {
|
table_columns.contains(column.name()),
|
||||||
|
InvalidQuerySnafu {
|
||||||
reason: format!(
|
reason: format!(
|
||||||
"Column {} not found in sink table with columns {:?}",
|
"Column {} not found in sink table with columns {:?}",
|
||||||
column, table_columns
|
column, table_columns
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
.fail();
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// update_at& time index placeholder (if exists) should have default value
|
// update_at& time index placeholder (if exists) should have default value
|
||||||
LogicalPlan::Dml(DmlStatement::new(
|
LogicalPlan::Dml(DmlStatement::new(
|
||||||
@@ -392,6 +380,23 @@ impl BatchingTask {
|
|||||||
frontend_client: Arc<FrontendClient>,
|
frontend_client: Arc<FrontendClient>,
|
||||||
) {
|
) {
|
||||||
loop {
|
loop {
|
||||||
|
// first check if shutdown signal is received
|
||||||
|
// if so, break the loop
|
||||||
|
{
|
||||||
|
let mut state = self.state.write().unwrap();
|
||||||
|
match state.shutdown_rx.try_recv() {
|
||||||
|
Ok(()) => break,
|
||||||
|
Err(TryRecvError::Closed) => {
|
||||||
|
warn!(
|
||||||
|
"Unexpected shutdown flow {}, shutdown anyway",
|
||||||
|
self.config.flow_id
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(TryRecvError::Empty) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut new_query = None;
|
let mut new_query = None;
|
||||||
let mut gen_and_exec = async || {
|
let mut gen_and_exec = async || {
|
||||||
new_query = self.gen_insert_plan(&engine).await?;
|
new_query = self.gen_insert_plan(&engine).await?;
|
||||||
@@ -405,20 +410,15 @@ impl BatchingTask {
|
|||||||
// normal execute, sleep for some time before doing next query
|
// normal execute, sleep for some time before doing next query
|
||||||
Ok(Some(_)) => {
|
Ok(Some(_)) => {
|
||||||
let sleep_until = {
|
let sleep_until = {
|
||||||
let mut state = self.state.write().unwrap();
|
let state = self.state.write().unwrap();
|
||||||
match state.shutdown_rx.try_recv() {
|
|
||||||
Ok(()) => break,
|
|
||||||
Err(TryRecvError::Closed) => {
|
|
||||||
warn!(
|
|
||||||
"Unexpected shutdown flow {}, shutdown anyway",
|
|
||||||
self.config.flow_id
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(TryRecvError::Empty) => (),
|
|
||||||
}
|
|
||||||
state.get_next_start_query_time(
|
state.get_next_start_query_time(
|
||||||
self.config.flow_id,
|
self.config.flow_id,
|
||||||
|
&self
|
||||||
|
.config
|
||||||
|
.time_window_expr
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| *t.time_window_size()),
|
||||||
Some(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT),
|
Some(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
@@ -541,7 +541,10 @@ impl BatchingTask {
|
|||||||
.clone()
|
.clone()
|
||||||
.rewrite(&mut add_auto_column)
|
.rewrite(&mut add_auto_column)
|
||||||
.with_context(|_| DatafusionSnafu {
|
.with_context(|_| DatafusionSnafu {
|
||||||
context: format!("Failed to rewrite plan {:?}", self.config.plan),
|
context: format!(
|
||||||
|
"Failed to rewrite plan:\n {}\n",
|
||||||
|
self.config.plan
|
||||||
|
),
|
||||||
})?
|
})?
|
||||||
.data;
|
.data;
|
||||||
let schema_len = plan.schema().fields().len();
|
let schema_len = plan.schema().fields().len();
|
||||||
@@ -573,16 +576,19 @@ impl BatchingTask {
|
|||||||
|
|
||||||
let mut add_filter = AddFilterRewriter::new(expr);
|
let mut add_filter = AddFilterRewriter::new(expr);
|
||||||
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_schema.clone());
|
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_schema.clone());
|
||||||
// make a not optimized plan for clearer unparse
|
|
||||||
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.config.query, false)
|
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.config.query, false)
|
||||||
.await?;
|
.await?;
|
||||||
plan.clone()
|
let rewrite = plan
|
||||||
|
.clone()
|
||||||
.rewrite(&mut add_filter)
|
.rewrite(&mut add_filter)
|
||||||
.and_then(|p| p.data.rewrite(&mut add_auto_column))
|
.and_then(|p| p.data.rewrite(&mut add_auto_column))
|
||||||
.with_context(|_| DatafusionSnafu {
|
.with_context(|_| DatafusionSnafu {
|
||||||
context: format!("Failed to rewrite plan {plan:?}"),
|
context: format!("Failed to rewrite plan:\n {}\n", plan),
|
||||||
})?
|
})?
|
||||||
.data
|
.data;
|
||||||
|
// only apply optimize after complex rewrite is done
|
||||||
|
apply_df_optimizer(rewrite).await?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Some((new_plan, schema_len)))
|
Ok(Some((new_plan, schema_len)))
|
||||||
|
|||||||
@@ -55,6 +55,9 @@ use crate::error::{
|
|||||||
use crate::expr::error::DataTypeSnafu;
|
use crate::expr::error::DataTypeSnafu;
|
||||||
use crate::Error;
|
use crate::Error;
|
||||||
|
|
||||||
|
/// Represents a test timestamp in seconds since the Unix epoch.
|
||||||
|
const DEFAULT_TEST_TIMESTAMP: Timestamp = Timestamp::new_second(17_0000_0000);
|
||||||
|
|
||||||
/// Time window expr like `date_bin(INTERVAL '1' MINUTE, ts)`, this type help with
|
/// Time window expr like `date_bin(INTERVAL '1' MINUTE, ts)`, this type help with
|
||||||
/// evaluating the expr using given timestamp
|
/// evaluating the expr using given timestamp
|
||||||
///
|
///
|
||||||
@@ -70,6 +73,7 @@ pub struct TimeWindowExpr {
|
|||||||
pub column_name: String,
|
pub column_name: String,
|
||||||
logical_expr: Expr,
|
logical_expr: Expr,
|
||||||
df_schema: DFSchema,
|
df_schema: DFSchema,
|
||||||
|
eval_time_window_size: Option<std::time::Duration>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for TimeWindowExpr {
|
impl std::fmt::Display for TimeWindowExpr {
|
||||||
@@ -84,6 +88,11 @@ impl std::fmt::Display for TimeWindowExpr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TimeWindowExpr {
|
impl TimeWindowExpr {
|
||||||
|
/// The time window size of the expr, get from calling `eval` with a test timestamp
|
||||||
|
pub fn time_window_size(&self) -> &Option<std::time::Duration> {
|
||||||
|
&self.eval_time_window_size
|
||||||
|
}
|
||||||
|
|
||||||
pub fn from_expr(
|
pub fn from_expr(
|
||||||
expr: &Expr,
|
expr: &Expr,
|
||||||
column_name: &str,
|
column_name: &str,
|
||||||
@@ -91,12 +100,28 @@ impl TimeWindowExpr {
|
|||||||
session: &SessionState,
|
session: &SessionState,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let phy_expr: PhysicalExprRef = to_phy_expr(expr, df_schema, session)?;
|
let phy_expr: PhysicalExprRef = to_phy_expr(expr, df_schema, session)?;
|
||||||
Ok(Self {
|
let mut zelf = Self {
|
||||||
phy_expr,
|
phy_expr,
|
||||||
column_name: column_name.to_string(),
|
column_name: column_name.to_string(),
|
||||||
logical_expr: expr.clone(),
|
logical_expr: expr.clone(),
|
||||||
df_schema: df_schema.clone(),
|
df_schema: df_schema.clone(),
|
||||||
})
|
eval_time_window_size: None,
|
||||||
|
};
|
||||||
|
let test_ts = DEFAULT_TEST_TIMESTAMP;
|
||||||
|
let (l, u) = zelf.eval(test_ts)?;
|
||||||
|
let time_window_size = match (l, u) {
|
||||||
|
(Some(l), Some(u)) => u.sub(&l).map(|r| r.to_std()).transpose().map_err(|_| {
|
||||||
|
UnexpectedSnafu {
|
||||||
|
reason: format!(
|
||||||
|
"Expect upper bound older than lower bound, found upper={u:?} and lower={l:?}"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?,
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
zelf.eval_time_window_size = time_window_size;
|
||||||
|
Ok(zelf)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn eval(
|
pub fn eval(
|
||||||
@@ -704,6 +729,28 @@ mod test {
|
|||||||
),
|
),
|
||||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||||
),
|
),
|
||||||
|
// complex time window index with where
|
||||||
|
(
|
||||||
|
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number in (2, 3, 4) GROUP BY time_window;",
|
||||||
|
Timestamp::new(1740394109, TimeUnit::Second),
|
||||||
|
(
|
||||||
|
"ts".to_string(),
|
||||||
|
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||||
|
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||||
|
),
|
||||||
|
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||||
|
),
|
||||||
|
// complex time window index with between and
|
||||||
|
(
|
||||||
|
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number BETWEEN 2 AND 4 GROUP BY time_window;",
|
||||||
|
Timestamp::new(1740394109, TimeUnit::Second),
|
||||||
|
(
|
||||||
|
"ts".to_string(),
|
||||||
|
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||||
|
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||||
|
),
|
||||||
|
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||||
|
),
|
||||||
// no time index
|
// no time index
|
||||||
(
|
(
|
||||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
||||||
|
|||||||
@@ -50,8 +50,8 @@ pub async fn get_table_info_df_schema(
|
|||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(ExternalSnafu)?
|
.context(ExternalSnafu)?
|
||||||
.with_context(|| TableNotFoundSnafu {
|
.context(TableNotFoundSnafu {
|
||||||
name: full_table_name.clone(),
|
name: &full_table_name,
|
||||||
})?;
|
})?;
|
||||||
let table_info = table.table_info().clone();
|
let table_info = table.table_info().clone();
|
||||||
|
|
||||||
@@ -138,9 +138,12 @@ impl TreeNodeVisitor<'_> for FindGroupByFinalName {
|
|||||||
fn f_down(&mut self, node: &Self::Node) -> datafusion_common::Result<TreeNodeRecursion> {
|
fn f_down(&mut self, node: &Self::Node) -> datafusion_common::Result<TreeNodeRecursion> {
|
||||||
if let LogicalPlan::Aggregate(aggregate) = node {
|
if let LogicalPlan::Aggregate(aggregate) = node {
|
||||||
self.group_exprs = Some(aggregate.group_expr.iter().cloned().collect());
|
self.group_exprs = Some(aggregate.group_expr.iter().cloned().collect());
|
||||||
debug!("Group by exprs: {:?}", self.group_exprs);
|
debug!(
|
||||||
|
"FindGroupByFinalName: Get Group by exprs from Aggregate: {:?}",
|
||||||
|
self.group_exprs
|
||||||
|
);
|
||||||
} else if let LogicalPlan::Distinct(distinct) = node {
|
} else if let LogicalPlan::Distinct(distinct) = node {
|
||||||
debug!("Distinct: {:#?}", distinct);
|
debug!("FindGroupByFinalName: Distinct: {}", node);
|
||||||
match distinct {
|
match distinct {
|
||||||
Distinct::All(input) => {
|
Distinct::All(input) => {
|
||||||
if let LogicalPlan::TableScan(table_scan) = &**input {
|
if let LogicalPlan::TableScan(table_scan) = &**input {
|
||||||
@@ -162,7 +165,10 @@ impl TreeNodeVisitor<'_> for FindGroupByFinalName {
|
|||||||
self.group_exprs = Some(distinct_on.on_expr.iter().cloned().collect())
|
self.group_exprs = Some(distinct_on.on_expr.iter().cloned().collect())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!("Group by exprs: {:?}", self.group_exprs);
|
debug!(
|
||||||
|
"FindGroupByFinalName: Get Group by exprs from Distinct: {:?}",
|
||||||
|
self.group_exprs
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(TreeNodeRecursion::Continue)
|
Ok(TreeNodeRecursion::Continue)
|
||||||
@@ -342,8 +348,8 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(DataFusionError::Plan(format!(
|
return Err(DataFusionError::Plan(format!(
|
||||||
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?} at node {:?}",
|
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}",
|
||||||
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas(), node
|
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas()
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -358,8 +364,6 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(discord9): a method to found out the precise time window
|
|
||||||
|
|
||||||
/// Find out the `Filter` Node corresponding to innermost(deepest) `WHERE` and add a new filter expr to it
|
/// Find out the `Filter` Node corresponding to innermost(deepest) `WHERE` and add a new filter expr to it
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct AddFilterRewriter {
|
pub struct AddFilterRewriter {
|
||||||
@@ -408,7 +412,9 @@ mod test {
|
|||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, Schema};
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
use query::query_engine::DefaultSerializer;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::create_test_query_engine;
|
use crate::test_utils::create_test_query_engine;
|
||||||
@@ -703,4 +709,18 @@ mod test {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_null_cast() {
|
||||||
|
let query_engine = create_test_query_engine();
|
||||||
|
let ctx = QueryContext::arc();
|
||||||
|
let sql = "SELECT NULL::DOUBLE FROM numbers_with_ts";
|
||||||
|
let plan = sql_to_df_plan(ctx, query_engine.clone(), sql, false)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let _sub_plan = DFLogicalSubstraitConvertor {}
|
||||||
|
.encode(&plan, DefaultSerializer)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,9 +18,9 @@
|
|||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
use dfir_rs::scheduled::graph_ext::GraphExt;
|
||||||
use hydroflow::scheduled::port::{PortCtx, SEND};
|
use dfir_rs::scheduled::port::{PortCtx, SEND};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ mod src_sink;
|
|||||||
/// The Context for build a Operator with id of `GlobalId`
|
/// The Context for build a Operator with id of `GlobalId`
|
||||||
pub struct Context<'referred, 'df> {
|
pub struct Context<'referred, 'df> {
|
||||||
pub id: GlobalId,
|
pub id: GlobalId,
|
||||||
pub df: &'referred mut Hydroflow<'df>,
|
pub df: &'referred mut Dfir<'df>,
|
||||||
pub compute_state: &'referred mut DataflowState,
|
pub compute_state: &'referred mut DataflowState,
|
||||||
/// a list of all collections being used in the operator
|
/// a list of all collections being used in the operator
|
||||||
///
|
///
|
||||||
@@ -361,16 +361,16 @@ mod test {
|
|||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
|
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
use dfir_rs::scheduled::graph_ext::GraphExt;
|
||||||
use hydroflow::scheduled::handoff::VecHandoff;
|
use dfir_rs::scheduled::handoff::VecHandoff;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::repr::Row;
|
use crate::repr::Row;
|
||||||
pub fn run_and_check(
|
pub fn run_and_check(
|
||||||
state: &mut DataflowState,
|
state: &mut DataflowState,
|
||||||
df: &mut Hydroflow,
|
df: &mut Dfir,
|
||||||
time_range: std::ops::Range<i64>,
|
time_range: std::ops::Range<i64>,
|
||||||
expected: BTreeMap<i64, Vec<DiffRow>>,
|
expected: BTreeMap<i64, Vec<DiffRow>>,
|
||||||
output: Rc<RefCell<Vec<DiffRow>>>,
|
output: Rc<RefCell<Vec<DiffRow>>>,
|
||||||
@@ -416,7 +416,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn harness_test_ctx<'r, 'h>(
|
pub fn harness_test_ctx<'r, 'h>(
|
||||||
df: &'r mut Hydroflow<'h>,
|
df: &'r mut Dfir<'h>,
|
||||||
state: &'r mut DataflowState,
|
state: &'r mut DataflowState,
|
||||||
) -> Context<'r, 'h> {
|
) -> Context<'r, 'h> {
|
||||||
let err_collector = state.get_err_collector();
|
let err_collector = state.get_err_collector();
|
||||||
@@ -436,7 +436,7 @@ mod test {
|
|||||||
/// that is it only emit once, not multiple times
|
/// that is it only emit once, not multiple times
|
||||||
#[test]
|
#[test]
|
||||||
fn test_render_constant() {
|
fn test_render_constant() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -473,7 +473,7 @@ mod test {
|
|||||||
/// a simple example to show how to use source and sink
|
/// a simple example to show how to use source and sink
|
||||||
#[test]
|
#[test]
|
||||||
fn example_source_sink() {
|
fn example_source_sink() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let (send_port, recv_port) = df.make_edge::<_, VecHandoff<i32>>("test_handoff");
|
let (send_port, recv_port) = df.make_edge::<_, VecHandoff<i32>>("test_handoff");
|
||||||
df.add_subgraph_source("test_handoff_source", send_port, move |_ctx, send| {
|
df.add_subgraph_source("test_handoff_source", send_port, move |_ctx, send| {
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
@@ -498,8 +498,8 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_tee_auto_schedule() {
|
fn test_tee_auto_schedule() {
|
||||||
use hydroflow::scheduled::handoff::TeeingHandoff as Toff;
|
use dfir_rs::scheduled::handoff::TeeingHandoff as Toff;
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let (send_port, recv_port) = df.make_edge::<_, Toff<i32>>("test_handoff");
|
let (send_port, recv_port) = df.make_edge::<_, Toff<i32>>("test_handoff");
|
||||||
let source = df.add_subgraph_source("test_handoff_source", send_port, move |_ctx, send| {
|
let source = df.add_subgraph_source("test_handoff_source", send_port, move |_ctx, send| {
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
use dfir_rs::scheduled::graph_ext::GraphExt;
|
||||||
use hydroflow::scheduled::port::{PortCtx, SEND};
|
use dfir_rs::scheduled::port::{PortCtx, SEND};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
@@ -256,7 +256,7 @@ fn eval_mfp_core(
|
|||||||
mod test {
|
mod test {
|
||||||
|
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::compute::render::test::{get_output_handle, harness_test_ctx, run_and_check};
|
use crate::compute::render::test::{get_output_handle, harness_test_ctx, run_and_check};
|
||||||
@@ -269,7 +269,7 @@ mod test {
|
|||||||
/// namely: if mfp operator can schedule a delete at the correct time
|
/// namely: if mfp operator can schedule a delete at the correct time
|
||||||
#[test]
|
#[test]
|
||||||
fn test_render_mfp_with_temporal() {
|
fn test_render_mfp_with_temporal() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -348,7 +348,7 @@ mod test {
|
|||||||
/// that is it filter the rows correctly
|
/// that is it filter the rows correctly
|
||||||
#[test]
|
#[test]
|
||||||
fn test_render_mfp() {
|
fn test_render_mfp() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -388,7 +388,7 @@ mod test {
|
|||||||
/// test if mfp operator can run multiple times within same tick
|
/// test if mfp operator can run multiple times within same tick
|
||||||
#[test]
|
#[test]
|
||||||
fn test_render_mfp_multiple_times() {
|
fn test_render_mfp_multiple_times() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use datatypes::data_type::ConcreteDataType;
|
|||||||
use datatypes::prelude::DataType;
|
use datatypes::prelude::DataType;
|
||||||
use datatypes::value::{ListValue, Value};
|
use datatypes::value::{ListValue, Value};
|
||||||
use datatypes::vectors::{BooleanVector, NullVector};
|
use datatypes::vectors::{BooleanVector, NullVector};
|
||||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
use dfir_rs::scheduled::graph_ext::GraphExt;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
|
||||||
@@ -1212,7 +1212,7 @@ mod test {
|
|||||||
|
|
||||||
use common_time::Timestamp;
|
use common_time::Timestamp;
|
||||||
use datatypes::data_type::{ConcreteDataType, ConcreteDataType as CDT};
|
use datatypes::data_type::{ConcreteDataType, ConcreteDataType as CDT};
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::compute::render::test::{get_output_handle, harness_test_ctx, run_and_check};
|
use crate::compute::render::test::{get_output_handle, harness_test_ctx, run_and_check};
|
||||||
@@ -1228,7 +1228,7 @@ mod test {
|
|||||||
/// expected: sum(number), window_start, window_end
|
/// expected: sum(number), window_start, window_end
|
||||||
#[test]
|
#[test]
|
||||||
fn test_tumble_group_by() {
|
fn test_tumble_group_by() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
const START: i64 = 1625097600000;
|
const START: i64 = 1625097600000;
|
||||||
@@ -1389,7 +1389,7 @@ mod test {
|
|||||||
/// select avg(number) from number;
|
/// select avg(number) from number;
|
||||||
#[test]
|
#[test]
|
||||||
fn test_avg_eval() {
|
fn test_avg_eval() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -1500,7 +1500,7 @@ mod test {
|
|||||||
/// | col | Int64 |
|
/// | col | Int64 |
|
||||||
#[test]
|
#[test]
|
||||||
fn test_basic_distinct() {
|
fn test_basic_distinct() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -1556,7 +1556,7 @@ mod test {
|
|||||||
/// | col | Int64 |
|
/// | col | Int64 |
|
||||||
#[test]
|
#[test]
|
||||||
fn test_basic_batch_reduce_accum() {
|
fn test_basic_batch_reduce_accum() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let now = state.current_time_ref();
|
let now = state.current_time_ref();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
@@ -1662,7 +1662,7 @@ mod test {
|
|||||||
/// | col | Int64 |
|
/// | col | Int64 |
|
||||||
#[test]
|
#[test]
|
||||||
fn test_basic_reduce_accum() {
|
fn test_basic_reduce_accum() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -1739,7 +1739,7 @@ mod test {
|
|||||||
/// this test include even more insert/delete case to cover all case for eval_distinct_core
|
/// this test include even more insert/delete case to cover all case for eval_distinct_core
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_reduce_distinct_accum() {
|
fn test_delete_reduce_distinct_accum() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -1818,7 +1818,7 @@ mod test {
|
|||||||
/// this test include insert and delete which should cover all case for eval_distinct_core
|
/// this test include insert and delete which should cover all case for eval_distinct_core
|
||||||
#[test]
|
#[test]
|
||||||
fn test_basic_reduce_distinct_accum() {
|
fn test_basic_reduce_distinct_accum() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
@@ -1896,7 +1896,7 @@ mod test {
|
|||||||
/// | col | Int64 |
|
/// | col | Int64 |
|
||||||
#[test]
|
#[test]
|
||||||
fn test_composite_reduce_distinct_accum() {
|
fn test_composite_reduce_distinct_accum() {
|
||||||
let mut df = Hydroflow::new();
|
let mut df = Dfir::new();
|
||||||
let mut state = DataflowState::default();
|
let mut state = DataflowState::default();
|
||||||
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
let mut ctx = harness_test_ctx(&mut df, &mut state);
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use common_telemetry::{debug, trace};
|
use common_telemetry::{debug, trace};
|
||||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
use dfir_rs::scheduled::graph_ext::GraphExt;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use tokio::sync::broadcast::error::TryRecvError;
|
use tokio::sync::broadcast::error::TryRecvError;
|
||||||
|
|||||||
@@ -16,16 +16,16 @@ use std::cell::RefCell;
|
|||||||
use std::collections::{BTreeMap, VecDeque};
|
use std::collections::{BTreeMap, VecDeque};
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
|
|
||||||
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
|
use dfir_rs::scheduled::SubgraphId;
|
||||||
use get_size2::GetSize;
|
use get_size2::GetSize;
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
|
||||||
use hydroflow::scheduled::SubgraphId;
|
|
||||||
|
|
||||||
use crate::compute::types::ErrCollector;
|
use crate::compute::types::ErrCollector;
|
||||||
use crate::repr::{self, Timestamp};
|
use crate::repr::{self, Timestamp};
|
||||||
use crate::utils::{ArrangeHandler, Arrangement};
|
use crate::utils::{ArrangeHandler, Arrangement};
|
||||||
|
|
||||||
/// input/output of a dataflow
|
/// input/output of a dataflow
|
||||||
/// One `ComputeState` manage the input/output/schedule of one `Hydroflow`
|
/// One `ComputeState` manage the input/output/schedule of one `Dfir`
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct DataflowState {
|
pub struct DataflowState {
|
||||||
/// it is important to use a deque to maintain the order of subgraph here
|
/// it is important to use a deque to maintain the order of subgraph here
|
||||||
@@ -38,7 +38,7 @@ pub struct DataflowState {
|
|||||||
/// Which means it's also the current time in temporal filter to get current correct result
|
/// Which means it's also the current time in temporal filter to get current correct result
|
||||||
as_of: Rc<RefCell<Timestamp>>,
|
as_of: Rc<RefCell<Timestamp>>,
|
||||||
/// error collector local to this `ComputeState`,
|
/// error collector local to this `ComputeState`,
|
||||||
/// useful for distinguishing errors from different `Hydroflow`
|
/// useful for distinguishing errors from different `Dfir`
|
||||||
err_collector: ErrCollector,
|
err_collector: ErrCollector,
|
||||||
/// save all used arrange in this dataflow, since usually there is no delete operation
|
/// save all used arrange in this dataflow, since usually there is no delete operation
|
||||||
/// we can just keep track of all used arrange and schedule subgraph when they need to be updated
|
/// we can just keep track of all used arrange and schedule subgraph when they need to be updated
|
||||||
@@ -65,7 +65,7 @@ impl DataflowState {
|
|||||||
/// schedule all subgraph that need to run with time <= `as_of` and run_available()
|
/// schedule all subgraph that need to run with time <= `as_of` and run_available()
|
||||||
///
|
///
|
||||||
/// return true if any subgraph actually executed
|
/// return true if any subgraph actually executed
|
||||||
pub fn run_available_with_schedule(&mut self, df: &mut Hydroflow) -> bool {
|
pub fn run_available_with_schedule(&mut self, df: &mut Dfir) -> bool {
|
||||||
// first split keys <= as_of into another map
|
// first split keys <= as_of into another map
|
||||||
let mut before = self
|
let mut before = self
|
||||||
.schedule_subgraph
|
.schedule_subgraph
|
||||||
|
|||||||
@@ -18,10 +18,10 @@ use std::rc::Rc;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use hydroflow::scheduled::graph::Hydroflow;
|
use dfir_rs::scheduled::graph::Dfir;
|
||||||
use hydroflow::scheduled::handoff::TeeingHandoff;
|
use dfir_rs::scheduled::handoff::TeeingHandoff;
|
||||||
use hydroflow::scheduled::port::RecvPort;
|
use dfir_rs::scheduled::port::RecvPort;
|
||||||
use hydroflow::scheduled::SubgraphId;
|
use dfir_rs::scheduled::SubgraphId;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ impl<T: 'static + Clone> Collection<T> {
|
|||||||
/// clone a collection, require a mutable reference to the hydroflow instance
|
/// clone a collection, require a mutable reference to the hydroflow instance
|
||||||
///
|
///
|
||||||
/// Note: need to be the same hydroflow instance that this collection is created from
|
/// Note: need to be the same hydroflow instance that this collection is created from
|
||||||
pub fn clone(&self, df: &mut Hydroflow) -> Self {
|
pub fn clone(&self, df: &mut Dfir) -> Self {
|
||||||
Collection {
|
Collection {
|
||||||
stream: self.stream.tee(df),
|
stream: self.stream.tee(df),
|
||||||
}
|
}
|
||||||
@@ -151,7 +151,7 @@ impl<T: 'static> CollectionBundle<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: 'static + Clone> CollectionBundle<T> {
|
impl<T: 'static + Clone> CollectionBundle<T> {
|
||||||
pub fn clone(&self, df: &mut Hydroflow) -> Self {
|
pub fn clone(&self, df: &mut Dfir) -> Self {
|
||||||
Self {
|
Self {
|
||||||
collection: self.collection.clone(df),
|
collection: self.collection.clone(df),
|
||||||
arranged: self
|
arranged: self
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ use datafusion::config::ConfigOptions;
|
|||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datafusion::functions_aggregate::count::count_udaf;
|
use datafusion::functions_aggregate::count::count_udaf;
|
||||||
use datafusion::functions_aggregate::sum::sum_udaf;
|
use datafusion::functions_aggregate::sum::sum_udaf;
|
||||||
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
|
|
||||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||||
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
|
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
|
||||||
use datafusion::optimizer::optimize_projections::OptimizeProjections;
|
use datafusion::optimizer::optimize_projections::OptimizeProjections;
|
||||||
@@ -42,6 +41,7 @@ use datafusion_expr::{
|
|||||||
BinaryExpr, ColumnarValue, Expr, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl,
|
BinaryExpr, ColumnarValue, Expr, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl,
|
||||||
Signature, TypeSignature, Volatility,
|
Signature, TypeSignature, Volatility,
|
||||||
};
|
};
|
||||||
|
use query::optimizer::count_wildcard::CountWildcardToTimeIndexRule;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::query_engine::DefaultSerializer;
|
use query::query_engine::DefaultSerializer;
|
||||||
use query::QueryEngine;
|
use query::QueryEngine;
|
||||||
@@ -61,9 +61,9 @@ pub async fn apply_df_optimizer(
|
|||||||
) -> Result<datafusion_expr::LogicalPlan, Error> {
|
) -> Result<datafusion_expr::LogicalPlan, Error> {
|
||||||
let cfg = ConfigOptions::new();
|
let cfg = ConfigOptions::new();
|
||||||
let analyzer = Analyzer::with_rules(vec![
|
let analyzer = Analyzer::with_rules(vec![
|
||||||
Arc::new(CountWildcardRule::new()),
|
Arc::new(CountWildcardToTimeIndexRule),
|
||||||
Arc::new(AvgExpandRule::new()),
|
Arc::new(AvgExpandRule),
|
||||||
Arc::new(TumbleExpandRule::new()),
|
Arc::new(TumbleExpandRule),
|
||||||
Arc::new(CheckGroupByRule::new()),
|
Arc::new(CheckGroupByRule::new()),
|
||||||
Arc::new(TypeCoercion::new()),
|
Arc::new(TypeCoercion::new()),
|
||||||
]);
|
]);
|
||||||
@@ -128,13 +128,7 @@ pub async fn sql_to_flow_plan(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct AvgExpandRule {}
|
struct AvgExpandRule;
|
||||||
|
|
||||||
impl AvgExpandRule {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AnalyzerRule for AvgExpandRule {
|
impl AnalyzerRule for AvgExpandRule {
|
||||||
fn analyze(
|
fn analyze(
|
||||||
@@ -331,13 +325,7 @@ impl TreeNodeRewriter for ExpandAvgRewriter<'_> {
|
|||||||
|
|
||||||
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
|
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct TumbleExpandRule {}
|
struct TumbleExpandRule;
|
||||||
|
|
||||||
impl TumbleExpandRule {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AnalyzerRule for TumbleExpandRule {
|
impl AnalyzerRule for TumbleExpandRule {
|
||||||
fn analyze(
|
fn analyze(
|
||||||
|
|||||||
@@ -46,6 +46,12 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Flow engine is still recovering"))]
|
||||||
|
FlowNotRecovered {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Error encountered while creating flow: {sql}"))]
|
#[snafu(display("Error encountered while creating flow: {sql}"))]
|
||||||
CreateFlow {
|
CreateFlow {
|
||||||
sql: String,
|
sql: String,
|
||||||
@@ -61,6 +67,16 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display(
|
||||||
|
"No available frontend found after timeout: {timeout:?}, context: {context}"
|
||||||
|
))]
|
||||||
|
NoAvailableFrontend {
|
||||||
|
timeout: std::time::Duration,
|
||||||
|
context: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("External error"))]
|
#[snafu(display("External error"))]
|
||||||
External {
|
External {
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
@@ -296,12 +312,14 @@ impl ErrorExt for Error {
|
|||||||
Self::Eval { .. }
|
Self::Eval { .. }
|
||||||
| Self::JoinTask { .. }
|
| Self::JoinTask { .. }
|
||||||
| Self::Datafusion { .. }
|
| Self::Datafusion { .. }
|
||||||
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
|
| Self::InsertIntoFlow { .. }
|
||||||
|
| Self::NoAvailableFrontend { .. }
|
||||||
|
| Self::FlowNotRecovered { .. } => StatusCode::Internal,
|
||||||
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||||
Self::TableNotFound { .. }
|
Self::TableNotFound { .. }
|
||||||
| Self::TableNotFoundMeta { .. }
|
| Self::TableNotFoundMeta { .. }
|
||||||
| Self::FlowNotFound { .. }
|
|
||||||
| Self::ListFlows { .. } => StatusCode::TableNotFound,
|
| Self::ListFlows { .. } => StatusCode::TableNotFound,
|
||||||
|
Self::FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||||
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
|
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
|
||||||
Self::CreateFlow { .. } | Self::Arrow { .. } | Self::Time { .. } => {
|
Self::CreateFlow { .. } | Self::Arrow { .. } | Self::Time { .. } => {
|
||||||
StatusCode::EngineExecuteQuery
|
StatusCode::EngineExecuteQuery
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use common_error::ext::BoxedError;
|
|||||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{BooleanVector, Helper, VectorRef};
|
use datatypes::vectors::{BooleanVector, Helper, VectorRef};
|
||||||
use hydroflow::lattices::cc_traits::Iter;
|
use dfir_rs::lattices::cc_traits::Iter;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ pub enum GenericFn {
|
|||||||
Mul,
|
Mul,
|
||||||
Div,
|
Div,
|
||||||
Mod,
|
Mod,
|
||||||
// varadic func
|
// variadic func
|
||||||
And,
|
And,
|
||||||
Or,
|
Or,
|
||||||
// unmaterized func
|
// unmaterized func
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ mod utils;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test_utils;
|
mod test_utils;
|
||||||
|
|
||||||
pub use adapter::{FlowConfig, FlowStreamingEngine, FlowWorkerManagerRef, FlownodeOptions};
|
pub use adapter::{FlowConfig, FlowStreamingEngineRef, FlownodeOptions, StreamingEngine};
|
||||||
pub use batching_mode::frontend_client::{FrontendClient, GrpcQueryHandlerWithBoxedError};
|
pub use batching_mode::frontend_client::{FrontendClient, GrpcQueryHandlerWithBoxedError};
|
||||||
pub(crate) use engine::{CreateFlowArgs, FlowId, TableName};
|
pub(crate) use engine::{CreateFlowArgs, FlowId, TableName};
|
||||||
pub use error::{Error, Result};
|
pub use error::{Error, Result};
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ use servers::error::{StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
|
|||||||
use servers::http::HttpServerBuilder;
|
use servers::http::HttpServerBuilder;
|
||||||
use servers::metrics_handler::MetricsHandler;
|
use servers::metrics_handler::MetricsHandler;
|
||||||
use servers::server::{ServerHandler, ServerHandlers};
|
use servers::server::{ServerHandler, ServerHandlers};
|
||||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
use tokio::sync::{broadcast, oneshot, Mutex};
|
use tokio::sync::{broadcast, oneshot, Mutex};
|
||||||
@@ -52,24 +52,23 @@ use tonic::transport::server::TcpIncoming;
|
|||||||
use tonic::{Request, Response, Status};
|
use tonic::{Request, Response, Status};
|
||||||
|
|
||||||
use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef};
|
use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef};
|
||||||
use crate::adapter::{create_worker, FlowWorkerManagerRef};
|
use crate::adapter::{create_worker, FlowStreamingEngineRef};
|
||||||
use crate::batching_mode::engine::BatchingEngine;
|
use crate::batching_mode::engine::BatchingEngine;
|
||||||
use crate::engine::FlowEngine;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
to_status_with_last_err, CacheRequiredSnafu, CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu,
|
to_status_with_last_err, CacheRequiredSnafu, ExternalSnafu, ListFlowsSnafu, ParseAddrSnafu,
|
||||||
ListFlowsSnafu, ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
|
ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
|
||||||
};
|
};
|
||||||
use crate::heartbeat::HeartbeatTask;
|
use crate::heartbeat::HeartbeatTask;
|
||||||
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
||||||
use crate::transform::register_function_to_query_engine;
|
use crate::transform::register_function_to_query_engine;
|
||||||
use crate::utils::{SizeReportSender, StateReportHandler};
|
use crate::utils::{SizeReportSender, StateReportHandler};
|
||||||
use crate::{CreateFlowArgs, Error, FlowStreamingEngine, FlownodeOptions, FrontendClient};
|
use crate::{Error, FlownodeOptions, FrontendClient, StreamingEngine};
|
||||||
|
|
||||||
pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
|
pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
|
||||||
|
|
||||||
/// wrapping flow node manager to avoid orphan rule with Arc<...>
|
/// wrapping flow node manager to avoid orphan rule with Arc<...>
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct FlowService {
|
pub struct FlowService {
|
||||||
/// TODO(discord9): replace with dual engine
|
|
||||||
pub dual_engine: FlowDualEngineRef,
|
pub dual_engine: FlowDualEngineRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,6 +172,8 @@ impl FlownodeServer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Start the background task for streaming computation.
|
/// Start the background task for streaming computation.
|
||||||
|
///
|
||||||
|
/// Should be called only after heartbeat is establish, hence can get cluster info
|
||||||
async fn start_workers(&self) -> Result<(), Error> {
|
async fn start_workers(&self) -> Result<(), Error> {
|
||||||
let manager_ref = self.inner.flow_service.dual_engine.clone();
|
let manager_ref = self.inner.flow_service.dual_engine.clone();
|
||||||
let handle = manager_ref
|
let handle = manager_ref
|
||||||
@@ -396,100 +397,12 @@ impl FlownodeBuilder {
|
|||||||
Ok(instance)
|
Ok(instance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// recover all flow tasks in this flownode in distributed mode(nodeid is Some(<num>))
|
|
||||||
///
|
|
||||||
/// or recover all existing flow tasks if in standalone mode(nodeid is None)
|
|
||||||
///
|
|
||||||
/// TODO(discord9): persistent flow tasks with internal state
|
|
||||||
async fn recover_flows(&self, manager: &FlowDualEngine) -> Result<usize, Error> {
|
|
||||||
let nodeid = self.opts.node_id;
|
|
||||||
let to_be_recovered: Vec<_> = if let Some(nodeid) = nodeid {
|
|
||||||
let to_be_recover = self
|
|
||||||
.flow_metadata_manager
|
|
||||||
.flownode_flow_manager()
|
|
||||||
.flows(nodeid)
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.context(ListFlowsSnafu { id: Some(nodeid) })?;
|
|
||||||
to_be_recover.into_iter().map(|(id, _)| id).collect()
|
|
||||||
} else {
|
|
||||||
let all_catalogs = self
|
|
||||||
.catalog_manager
|
|
||||||
.catalog_names()
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
let mut all_flow_ids = vec![];
|
|
||||||
for catalog in all_catalogs {
|
|
||||||
let flows = self
|
|
||||||
.flow_metadata_manager
|
|
||||||
.flow_name_manager()
|
|
||||||
.flow_names(&catalog)
|
|
||||||
.await
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
|
|
||||||
all_flow_ids.extend(flows.into_iter().map(|(_, id)| id.flow_id()));
|
|
||||||
}
|
|
||||||
all_flow_ids
|
|
||||||
};
|
|
||||||
let cnt = to_be_recovered.len();
|
|
||||||
|
|
||||||
// TODO(discord9): recover in parallel
|
|
||||||
for flow_id in to_be_recovered {
|
|
||||||
let info = self
|
|
||||||
.flow_metadata_manager
|
|
||||||
.flow_info_manager()
|
|
||||||
.get(flow_id)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?
|
|
||||||
.context(FlowNotFoundSnafu { id: flow_id })?;
|
|
||||||
|
|
||||||
let sink_table_name = [
|
|
||||||
info.sink_table_name().catalog_name.clone(),
|
|
||||||
info.sink_table_name().schema_name.clone(),
|
|
||||||
info.sink_table_name().table_name.clone(),
|
|
||||||
];
|
|
||||||
let args = CreateFlowArgs {
|
|
||||||
flow_id: flow_id as _,
|
|
||||||
sink_table_name,
|
|
||||||
source_table_ids: info.source_table_ids().to_vec(),
|
|
||||||
// because recover should only happen on restart the `create_if_not_exists` and `or_replace` can be arbitrary value(since flow doesn't exist)
|
|
||||||
// but for the sake of consistency and to make sure recover of flow actually happen, we set both to true
|
|
||||||
// (which is also fine since checks for not allow both to be true is on metasrv and we already pass that)
|
|
||||||
create_if_not_exists: true,
|
|
||||||
or_replace: true,
|
|
||||||
expire_after: info.expire_after(),
|
|
||||||
comment: Some(info.comment().clone()),
|
|
||||||
sql: info.raw_sql().clone(),
|
|
||||||
flow_options: info.options().clone(),
|
|
||||||
query_ctx: Some(
|
|
||||||
QueryContextBuilder::default()
|
|
||||||
.current_catalog(info.catalog_name().clone())
|
|
||||||
.build(),
|
|
||||||
),
|
|
||||||
};
|
|
||||||
manager
|
|
||||||
.create_flow(args)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.with_context(|_| CreateFlowSnafu {
|
|
||||||
sql: info.raw_sql().clone(),
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(cnt)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// build [`FlowWorkerManager`], note this doesn't take ownership of `self`,
|
/// build [`FlowWorkerManager`], note this doesn't take ownership of `self`,
|
||||||
/// nor does it actually start running the worker.
|
/// nor does it actually start running the worker.
|
||||||
async fn build_manager(
|
async fn build_manager(
|
||||||
&mut self,
|
&mut self,
|
||||||
query_engine: Arc<dyn QueryEngine>,
|
query_engine: Arc<dyn QueryEngine>,
|
||||||
) -> Result<FlowStreamingEngine, Error> {
|
) -> Result<StreamingEngine, Error> {
|
||||||
let table_meta = self.table_meta.clone();
|
let table_meta = self.table_meta.clone();
|
||||||
|
|
||||||
register_function_to_query_engine(&query_engine);
|
register_function_to_query_engine(&query_engine);
|
||||||
@@ -498,7 +411,7 @@ impl FlownodeBuilder {
|
|||||||
|
|
||||||
let node_id = self.opts.node_id.map(|id| id as u32);
|
let node_id = self.opts.node_id.map(|id| id as u32);
|
||||||
|
|
||||||
let mut man = FlowStreamingEngine::new(node_id, query_engine, table_meta);
|
let mut man = StreamingEngine::new(node_id, query_engine, table_meta);
|
||||||
for worker_id in 0..num_workers {
|
for worker_id in 0..num_workers {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
|
|
||||||
@@ -605,7 +518,7 @@ impl FrontendInvoker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn build_from(
|
pub async fn build_from(
|
||||||
flow_worker_manager: FlowWorkerManagerRef,
|
flow_streaming_engine: FlowStreamingEngineRef,
|
||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
layered_cache_registry: LayeredCacheRegistryRef,
|
layered_cache_registry: LayeredCacheRegistryRef,
|
||||||
@@ -640,7 +553,7 @@ impl FrontendInvoker {
|
|||||||
node_manager.clone(),
|
node_manager.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let query_engine = flow_worker_manager.query_engine.clone();
|
let query_engine = flow_streaming_engine.query_engine.clone();
|
||||||
|
|
||||||
let statement_executor = Arc::new(StatementExecutor::new(
|
let statement_executor = Arc::new(StatementExecutor::new(
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
@@ -668,7 +581,7 @@ impl FrontendInvoker {
|
|||||||
.start_timer();
|
.start_timer();
|
||||||
|
|
||||||
self.inserter
|
self.inserter
|
||||||
.handle_row_inserts(requests, ctx, &self.statement_executor)
|
.handle_row_inserts(requests, ctx, &self.statement_executor, false, false)
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(common_frontend::error::ExternalSnafu)
|
.context(common_frontend::error::ExternalSnafu)
|
||||||
|
|||||||
@@ -72,7 +72,10 @@ impl GrpcQueryHandler for Instance {
|
|||||||
|
|
||||||
let output = match request {
|
let output = match request {
|
||||||
Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
|
Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
|
||||||
Request::RowInserts(requests) => self.handle_row_inserts(requests, ctx.clone()).await?,
|
Request::RowInserts(requests) => {
|
||||||
|
self.handle_row_inserts(requests, ctx.clone(), false, false)
|
||||||
|
.await?
|
||||||
|
}
|
||||||
Request::Deletes(requests) => self.handle_deletes(requests, ctx.clone()).await?,
|
Request::Deletes(requests) => self.handle_deletes(requests, ctx.clone()).await?,
|
||||||
Request::RowDeletes(requests) => self.handle_row_deletes(requests, ctx.clone()).await?,
|
Request::RowDeletes(requests) => self.handle_row_deletes(requests, ctx.clone()).await?,
|
||||||
Request::Query(query_request) => {
|
Request::Query(query_request) => {
|
||||||
@@ -407,9 +410,17 @@ impl Instance {
|
|||||||
&self,
|
&self,
|
||||||
requests: RowInsertRequests,
|
requests: RowInsertRequests,
|
||||||
ctx: QueryContextRef,
|
ctx: QueryContextRef,
|
||||||
|
accommodate_existing_schema: bool,
|
||||||
|
is_single_value: bool,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
self.inserter
|
self.inserter
|
||||||
.handle_row_inserts(requests, ctx, self.statement_executor.as_ref())
|
.handle_row_inserts(
|
||||||
|
requests,
|
||||||
|
ctx,
|
||||||
|
self.statement_executor.as_ref(),
|
||||||
|
accommodate_existing_schema,
|
||||||
|
is_single_value,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.context(TableOperationSnafu)
|
.context(TableOperationSnafu)
|
||||||
}
|
}
|
||||||
@@ -421,7 +432,14 @@ impl Instance {
|
|||||||
ctx: QueryContextRef,
|
ctx: QueryContextRef,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
self.inserter
|
self.inserter
|
||||||
.handle_last_non_null_inserts(requests, ctx, self.statement_executor.as_ref())
|
.handle_last_non_null_inserts(
|
||||||
|
requests,
|
||||||
|
ctx,
|
||||||
|
self.statement_executor.as_ref(),
|
||||||
|
true,
|
||||||
|
// Influx protocol may writes multiple fields (values).
|
||||||
|
false,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.context(TableOperationSnafu)
|
.context(TableOperationSnafu)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,8 +52,9 @@ impl OpentsdbProtocolHandler for Instance {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// OpenTSDB is single value.
|
||||||
let output = self
|
let output = self
|
||||||
.handle_row_inserts(requests, ctx)
|
.handle_row_inserts(requests, ctx, true, true)
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(servers::error::ExecuteGrpcQuerySnafu)?;
|
.context(servers::error::ExecuteGrpcQuerySnafu)?;
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ impl OpenTelemetryProtocolHandler for Instance {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
self.handle_row_inserts(requests, ctx)
|
self.handle_row_inserts(requests, ctx, false, false)
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(error::ExecuteGrpcQuerySnafu)
|
.context(error::ExecuteGrpcQuerySnafu)
|
||||||
|
|||||||
@@ -195,7 +195,7 @@ impl PromStoreProtocolHandler for Instance {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(error::ExecuteGrpcQuerySnafu)?
|
.context(error::ExecuteGrpcQuerySnafu)?
|
||||||
} else {
|
} else {
|
||||||
self.handle_row_inserts(request, ctx.clone())
|
self.handle_row_inserts(request, ctx.clone(), true, true)
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(error::ExecuteGrpcQuerySnafu)?
|
.context(error::ExecuteGrpcQuerySnafu)?
|
||||||
|
|||||||
@@ -46,7 +46,11 @@ pub struct ChineseTokenizer;
|
|||||||
|
|
||||||
impl Tokenizer for ChineseTokenizer {
|
impl Tokenizer for ChineseTokenizer {
|
||||||
fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str> {
|
fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str> {
|
||||||
JIEBA.cut(text, false)
|
if text.is_ascii() {
|
||||||
|
EnglishTokenizer {}.tokenize(text)
|
||||||
|
} else {
|
||||||
|
JIEBA.cut(text, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -182,6 +182,14 @@ impl ClientManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
impl ClientManager {
|
||||||
|
/// Returns the controller client.
|
||||||
|
pub(crate) fn controller_client(&self) -> rskafka::client::controller::ControllerClient {
|
||||||
|
self.client.controller_client().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use common_wal::test_util::run_test_with_kafka_wal;
|
use common_wal::test_util::run_test_with_kafka_wal;
|
||||||
|
|||||||
@@ -552,6 +552,14 @@ mod tests {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn prepare_topic(logstore: &KafkaLogStore, topic_name: &str) {
|
||||||
|
let controller_client = logstore.client_manager.controller_client();
|
||||||
|
controller_client
|
||||||
|
.create_topic(topic_name.to_string(), 1, 1, 5000)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_append_batch_basic() {
|
async fn test_append_batch_basic() {
|
||||||
common_telemetry::init_default_ut_logging();
|
common_telemetry::init_default_ut_logging();
|
||||||
@@ -573,7 +581,9 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let logstore = KafkaLogStore::try_new(&config, None).await.unwrap();
|
let logstore = KafkaLogStore::try_new(&config, None).await.unwrap();
|
||||||
let topic_name = uuid::Uuid::new_v4().to_string();
|
let topic_name = uuid::Uuid::new_v4().to_string();
|
||||||
|
prepare_topic(&logstore, &topic_name).await;
|
||||||
let provider = Provider::kafka_provider(topic_name);
|
let provider = Provider::kafka_provider(topic_name);
|
||||||
|
|
||||||
let region_entries = (0..5)
|
let region_entries = (0..5)
|
||||||
.map(|i| {
|
.map(|i| {
|
||||||
let region_id = RegionId::new(1, i);
|
let region_id = RegionId::new(1, i);
|
||||||
@@ -647,6 +657,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let logstore = KafkaLogStore::try_new(&config, None).await.unwrap();
|
let logstore = KafkaLogStore::try_new(&config, None).await.unwrap();
|
||||||
let topic_name = uuid::Uuid::new_v4().to_string();
|
let topic_name = uuid::Uuid::new_v4().to_string();
|
||||||
|
prepare_topic(&logstore, &topic_name).await;
|
||||||
let provider = Provider::kafka_provider(topic_name);
|
let provider = Provider::kafka_provider(topic_name);
|
||||||
let region_entries = (0..5)
|
let region_entries = (0..5)
|
||||||
.map(|i| {
|
.map(|i| {
|
||||||
|
|||||||
@@ -66,10 +66,12 @@ use crate::election::postgres::PgElection;
|
|||||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||||
use crate::election::CANDIDATE_LEASE_SECS;
|
use crate::election::CANDIDATE_LEASE_SECS;
|
||||||
use crate::metasrv::builder::MetasrvBuilder;
|
use crate::metasrv::builder::MetasrvBuilder;
|
||||||
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
|
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectTarget, SelectorRef};
|
||||||
|
use crate::node_excluder::NodeExcluderRef;
|
||||||
use crate::selector::lease_based::LeaseBasedSelector;
|
use crate::selector::lease_based::LeaseBasedSelector;
|
||||||
use crate::selector::load_based::LoadBasedSelector;
|
use crate::selector::load_based::LoadBasedSelector;
|
||||||
use crate::selector::round_robin::RoundRobinSelector;
|
use crate::selector::round_robin::RoundRobinSelector;
|
||||||
|
use crate::selector::weight_compute::RegionNumsBasedWeightCompute;
|
||||||
use crate::selector::SelectorType;
|
use crate::selector::SelectorType;
|
||||||
use crate::service::admin;
|
use crate::service::admin;
|
||||||
use crate::{error, Result};
|
use crate::{error, Result};
|
||||||
@@ -294,14 +296,25 @@ pub async fn metasrv_builder(
|
|||||||
|
|
||||||
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
|
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
|
||||||
|
|
||||||
|
let node_excluder = plugins
|
||||||
|
.get::<NodeExcluderRef>()
|
||||||
|
.unwrap_or_else(|| Arc::new(Vec::new()) as NodeExcluderRef);
|
||||||
let selector = if let Some(selector) = plugins.get::<SelectorRef>() {
|
let selector = if let Some(selector) = plugins.get::<SelectorRef>() {
|
||||||
info!("Using selector from plugins");
|
info!("Using selector from plugins");
|
||||||
selector
|
selector
|
||||||
} else {
|
} else {
|
||||||
let selector = match opts.selector {
|
let selector = match opts.selector {
|
||||||
SelectorType::LoadBased => Arc::new(LoadBasedSelector::default()) as SelectorRef,
|
SelectorType::LoadBased => Arc::new(LoadBasedSelector::new(
|
||||||
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
|
RegionNumsBasedWeightCompute,
|
||||||
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::default()) as SelectorRef,
|
node_excluder,
|
||||||
|
)) as SelectorRef,
|
||||||
|
SelectorType::LeaseBased => {
|
||||||
|
Arc::new(LeaseBasedSelector::new(node_excluder)) as SelectorRef
|
||||||
|
}
|
||||||
|
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::new(
|
||||||
|
SelectTarget::Datanode,
|
||||||
|
node_excluder,
|
||||||
|
)) as SelectorRef,
|
||||||
};
|
};
|
||||||
info!(
|
info!(
|
||||||
"Using selector from options, selector type: {}",
|
"Using selector from options, selector type: {}",
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ pub mod metasrv;
|
|||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
#[cfg(feature = "mock")]
|
#[cfg(feature = "mock")]
|
||||||
pub mod mocks;
|
pub mod mocks;
|
||||||
|
pub mod node_excluder;
|
||||||
pub mod procedure;
|
pub mod procedure;
|
||||||
pub mod pubsub;
|
pub mod pubsub;
|
||||||
pub mod region;
|
pub mod region;
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
pub mod builder;
|
pub mod builder;
|
||||||
|
|
||||||
use std::fmt::Display;
|
use std::fmt::{self, Display};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -96,7 +96,7 @@ pub enum BackendImpl {
|
|||||||
MysqlStore,
|
MysqlStore,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct MetasrvOptions {
|
pub struct MetasrvOptions {
|
||||||
/// The address the server listens on.
|
/// The address the server listens on.
|
||||||
@@ -111,6 +111,11 @@ pub struct MetasrvOptions {
|
|||||||
pub use_memory_store: bool,
|
pub use_memory_store: bool,
|
||||||
/// Whether to enable region failover.
|
/// Whether to enable region failover.
|
||||||
pub enable_region_failover: bool,
|
pub enable_region_failover: bool,
|
||||||
|
/// Whether to allow region failover on local WAL.
|
||||||
|
///
|
||||||
|
/// If it's true, the region failover will be allowed even if the local WAL is used.
|
||||||
|
/// Note that this option is not recommended to be set to true, because it may lead to data loss during failover.
|
||||||
|
pub allow_region_failover_on_local_wal: bool,
|
||||||
/// The HTTP server options.
|
/// The HTTP server options.
|
||||||
pub http: HttpOptions,
|
pub http: HttpOptions,
|
||||||
/// The logging options.
|
/// The logging options.
|
||||||
@@ -161,6 +166,47 @@ pub struct MetasrvOptions {
|
|||||||
pub node_max_idle_time: Duration,
|
pub node_max_idle_time: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for MetasrvOptions {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
let mut debug_struct = f.debug_struct("MetasrvOptions");
|
||||||
|
debug_struct
|
||||||
|
.field("bind_addr", &self.bind_addr)
|
||||||
|
.field("server_addr", &self.server_addr)
|
||||||
|
.field("store_addrs", &self.sanitize_store_addrs())
|
||||||
|
.field("selector", &self.selector)
|
||||||
|
.field("use_memory_store", &self.use_memory_store)
|
||||||
|
.field("enable_region_failover", &self.enable_region_failover)
|
||||||
|
.field(
|
||||||
|
"allow_region_failover_on_local_wal",
|
||||||
|
&self.allow_region_failover_on_local_wal,
|
||||||
|
)
|
||||||
|
.field("http", &self.http)
|
||||||
|
.field("logging", &self.logging)
|
||||||
|
.field("procedure", &self.procedure)
|
||||||
|
.field("failure_detector", &self.failure_detector)
|
||||||
|
.field("datanode", &self.datanode)
|
||||||
|
.field("enable_telemetry", &self.enable_telemetry)
|
||||||
|
.field("data_home", &self.data_home)
|
||||||
|
.field("wal", &self.wal)
|
||||||
|
.field("export_metrics", &self.export_metrics)
|
||||||
|
.field("store_key_prefix", &self.store_key_prefix)
|
||||||
|
.field("max_txn_ops", &self.max_txn_ops)
|
||||||
|
.field("flush_stats_factor", &self.flush_stats_factor)
|
||||||
|
.field("tracing", &self.tracing)
|
||||||
|
.field("backend", &self.backend);
|
||||||
|
|
||||||
|
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||||
|
debug_struct.field("meta_table_name", &self.meta_table_name);
|
||||||
|
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
debug_struct.field("meta_election_lock_id", &self.meta_election_lock_id);
|
||||||
|
|
||||||
|
debug_struct
|
||||||
|
.field("node_max_idle_time", &self.node_max_idle_time)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||||
|
|
||||||
impl Default for MetasrvOptions {
|
impl Default for MetasrvOptions {
|
||||||
@@ -173,6 +219,7 @@ impl Default for MetasrvOptions {
|
|||||||
selector: SelectorType::default(),
|
selector: SelectorType::default(),
|
||||||
use_memory_store: false,
|
use_memory_store: false,
|
||||||
enable_region_failover: false,
|
enable_region_failover: false,
|
||||||
|
allow_region_failover_on_local_wal: false,
|
||||||
http: HttpOptions::default(),
|
http: HttpOptions::default(),
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
dir: format!("{METASRV_HOME}/logs"),
|
dir: format!("{METASRV_HOME}/logs"),
|
||||||
@@ -243,6 +290,13 @@ impl MetasrvOptions {
|
|||||||
common_telemetry::debug!("detect local IP is not supported on Android");
|
common_telemetry::debug!("detect local IP is not supported on Android");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn sanitize_store_addrs(&self) -> Vec<String> {
|
||||||
|
self.store_addrs
|
||||||
|
.iter()
|
||||||
|
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MetasrvInfo {
|
pub struct MetasrvInfo {
|
||||||
|
|||||||
@@ -40,7 +40,8 @@ use common_meta::state_store::KvStateStore;
|
|||||||
use common_meta::wal_options_allocator::{build_kafka_client, build_wal_options_allocator};
|
use common_meta::wal_options_allocator::{build_kafka_client, build_wal_options_allocator};
|
||||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use snafu::ResultExt;
|
use common_telemetry::warn;
|
||||||
|
use snafu::{ensure, ResultExt};
|
||||||
|
|
||||||
use crate::cache_invalidator::MetasrvCacheInvalidator;
|
use crate::cache_invalidator::MetasrvCacheInvalidator;
|
||||||
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
|
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
|
||||||
@@ -190,7 +191,7 @@ impl MetasrvBuilder {
|
|||||||
|
|
||||||
let meta_peer_client = meta_peer_client
|
let meta_peer_client = meta_peer_client
|
||||||
.unwrap_or_else(|| build_default_meta_peer_client(&election, &in_memory));
|
.unwrap_or_else(|| build_default_meta_peer_client(&election, &in_memory));
|
||||||
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector));
|
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector::default()));
|
||||||
let pushers = Pushers::default();
|
let pushers = Pushers::default();
|
||||||
let mailbox = build_mailbox(&kv_backend, &pushers);
|
let mailbox = build_mailbox(&kv_backend, &pushers);
|
||||||
let procedure_manager = build_procedure_manager(&options, &kv_backend);
|
let procedure_manager = build_procedure_manager(&options, &kv_backend);
|
||||||
@@ -234,13 +235,17 @@ impl MetasrvBuilder {
|
|||||||
))
|
))
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let flow_selector = Arc::new(RoundRobinSelector::new(
|
||||||
|
SelectTarget::Flownode,
|
||||||
|
Arc::new(Vec::new()),
|
||||||
|
)) as SelectorRef;
|
||||||
|
|
||||||
let flow_metadata_allocator = {
|
let flow_metadata_allocator = {
|
||||||
// for now flownode just use round-robin selector
|
// for now flownode just use round-robin selector
|
||||||
let flow_selector = RoundRobinSelector::new(SelectTarget::Flownode);
|
|
||||||
let flow_selector_ctx = selector_ctx.clone();
|
let flow_selector_ctx = selector_ctx.clone();
|
||||||
let peer_allocator = Arc::new(FlowPeerAllocator::new(
|
let peer_allocator = Arc::new(FlowPeerAllocator::new(
|
||||||
flow_selector_ctx,
|
flow_selector_ctx,
|
||||||
Arc::new(flow_selector),
|
flow_selector.clone(),
|
||||||
));
|
));
|
||||||
let seq = Arc::new(
|
let seq = Arc::new(
|
||||||
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
|
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
|
||||||
@@ -272,18 +277,25 @@ impl MetasrvBuilder {
|
|||||||
},
|
},
|
||||||
));
|
));
|
||||||
let peer_lookup_service = Arc::new(MetaPeerLookupService::new(meta_peer_client.clone()));
|
let peer_lookup_service = Arc::new(MetaPeerLookupService::new(meta_peer_client.clone()));
|
||||||
|
|
||||||
if !is_remote_wal && options.enable_region_failover {
|
if !is_remote_wal && options.enable_region_failover {
|
||||||
return error::UnexpectedSnafu {
|
ensure!(
|
||||||
violated: "Region failover is not supported in the local WAL implementation!",
|
options.allow_region_failover_on_local_wal,
|
||||||
|
error::UnexpectedSnafu {
|
||||||
|
violated: "Region failover is not supported in the local WAL implementation!
|
||||||
|
If you want to enable region failover for local WAL, please set `allow_region_failover_on_local_wal` to true.",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
if options.allow_region_failover_on_local_wal {
|
||||||
|
warn!("Region failover is force enabled in the local WAL implementation! This may lead to data loss during failover!");
|
||||||
}
|
}
|
||||||
.fail();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (tx, rx) = RegionSupervisor::channel();
|
let (tx, rx) = RegionSupervisor::channel();
|
||||||
let (region_failure_detector_controller, region_supervisor_ticker): (
|
let (region_failure_detector_controller, region_supervisor_ticker): (
|
||||||
RegionFailureDetectorControllerRef,
|
RegionFailureDetectorControllerRef,
|
||||||
Option<std::sync::Arc<RegionSupervisorTicker>>,
|
Option<std::sync::Arc<RegionSupervisorTicker>>,
|
||||||
) = if options.enable_region_failover && is_remote_wal {
|
) = if options.enable_region_failover {
|
||||||
(
|
(
|
||||||
Arc::new(RegionFailureDetectorControl::new(tx.clone())) as _,
|
Arc::new(RegionFailureDetectorControl::new(tx.clone())) as _,
|
||||||
Some(Arc::new(RegionSupervisorTicker::new(
|
Some(Arc::new(RegionSupervisorTicker::new(
|
||||||
@@ -309,7 +321,7 @@ impl MetasrvBuilder {
|
|||||||
));
|
));
|
||||||
region_migration_manager.try_start()?;
|
region_migration_manager.try_start()?;
|
||||||
|
|
||||||
let region_failover_handler = if options.enable_region_failover && is_remote_wal {
|
let region_failover_handler = if options.enable_region_failover {
|
||||||
let region_supervisor = RegionSupervisor::new(
|
let region_supervisor = RegionSupervisor::new(
|
||||||
rx,
|
rx,
|
||||||
options.failure_detector,
|
options.failure_detector,
|
||||||
@@ -353,7 +365,7 @@ impl MetasrvBuilder {
|
|||||||
let (tx, rx) = WalPruneManager::channel();
|
let (tx, rx) = WalPruneManager::channel();
|
||||||
// Safety: Must be remote WAL.
|
// Safety: Must be remote WAL.
|
||||||
let remote_wal_options = options.wal.remote_wal_options().unwrap();
|
let remote_wal_options = options.wal.remote_wal_options().unwrap();
|
||||||
let kafka_client = build_kafka_client(remote_wal_options)
|
let kafka_client = build_kafka_client(&remote_wal_options.connection)
|
||||||
.await
|
.await
|
||||||
.context(error::BuildKafkaClientSnafu)?;
|
.context(error::BuildKafkaClientSnafu)?;
|
||||||
let wal_prune_context = WalPruneContext {
|
let wal_prune_context = WalPruneContext {
|
||||||
@@ -420,7 +432,7 @@ impl MetasrvBuilder {
|
|||||||
meta_peer_client: meta_peer_client.clone(),
|
meta_peer_client: meta_peer_client.clone(),
|
||||||
selector,
|
selector,
|
||||||
// TODO(jeremy): We do not allow configuring the flow selector.
|
// TODO(jeremy): We do not allow configuring the flow selector.
|
||||||
flow_selector: Arc::new(RoundRobinSelector::new(SelectTarget::Flownode)),
|
flow_selector,
|
||||||
handler_group: RwLock::new(None),
|
handler_group: RwLock::new(None),
|
||||||
handler_group_builder: Mutex::new(Some(handler_group_builder)),
|
handler_group_builder: Mutex::new(Some(handler_group_builder)),
|
||||||
election,
|
election,
|
||||||
|
|||||||
@@ -71,4 +71,13 @@ lazy_static! {
|
|||||||
/// The remote WAL prune execute counter.
|
/// The remote WAL prune execute counter.
|
||||||
pub static ref METRIC_META_REMOTE_WAL_PRUNE_EXECUTE: IntCounterVec =
|
pub static ref METRIC_META_REMOTE_WAL_PRUNE_EXECUTE: IntCounterVec =
|
||||||
register_int_counter_vec!("greptime_meta_remote_wal_prune_execute", "meta remote wal prune execute", &["topic_name"]).unwrap();
|
register_int_counter_vec!("greptime_meta_remote_wal_prune_execute", "meta remote wal prune execute", &["topic_name"]).unwrap();
|
||||||
|
/// The migration stage elapsed histogram.
|
||||||
|
pub static ref METRIC_META_REGION_MIGRATION_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
|
||||||
|
"greptime_meta_region_migration_stage_elapsed",
|
||||||
|
"meta region migration stage elapsed",
|
||||||
|
&["stage"],
|
||||||
|
// 0.01 ~ 1000
|
||||||
|
exponential_buckets(0.01, 10.0, 7).unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
32
src/meta-srv/src/node_excluder.rs
Normal file
32
src/meta-srv/src/node_excluder.rs
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_meta::DatanodeId;
|
||||||
|
|
||||||
|
pub type NodeExcluderRef = Arc<dyn NodeExcluder>;
|
||||||
|
|
||||||
|
/// [NodeExcluder] is used to help decide whether some nodes should be excluded (out of consideration)
|
||||||
|
/// in certain situations. For example, in some node selectors.
|
||||||
|
pub trait NodeExcluder: Send + Sync {
|
||||||
|
/// Returns the excluded datanode ids.
|
||||||
|
fn excluded_datanode_ids(&self) -> &Vec<DatanodeId>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeExcluder for Vec<DatanodeId> {
|
||||||
|
fn excluded_datanode_ids(&self) -> &Vec<DatanodeId> {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -25,7 +25,7 @@ pub(crate) mod update_metadata;
|
|||||||
pub(crate) mod upgrade_candidate_region;
|
pub(crate) mod upgrade_candidate_region;
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::fmt::Debug;
|
use std::fmt::{Debug, Display};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
@@ -43,7 +43,7 @@ use common_procedure::error::{
|
|||||||
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||||
};
|
};
|
||||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status, StringKey};
|
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status, StringKey};
|
||||||
use common_telemetry::info;
|
use common_telemetry::{error, info};
|
||||||
use manager::RegionMigrationProcedureGuard;
|
use manager::RegionMigrationProcedureGuard;
|
||||||
pub use manager::{
|
pub use manager::{
|
||||||
RegionMigrationManagerRef, RegionMigrationProcedureTask, RegionMigrationProcedureTracker,
|
RegionMigrationManagerRef, RegionMigrationProcedureTask, RegionMigrationProcedureTracker,
|
||||||
@@ -55,7 +55,10 @@ use tokio::time::Instant;
|
|||||||
|
|
||||||
use self::migration_start::RegionMigrationStart;
|
use self::migration_start::RegionMigrationStart;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::metrics::{METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE};
|
use crate::metrics::{
|
||||||
|
METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE,
|
||||||
|
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED,
|
||||||
|
};
|
||||||
use crate::service::mailbox::MailboxRef;
|
use crate::service::mailbox::MailboxRef;
|
||||||
|
|
||||||
/// The default timeout for region migration.
|
/// The default timeout for region migration.
|
||||||
@@ -103,6 +106,82 @@ impl PersistentContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Metrics of region migration.
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct Metrics {
|
||||||
|
/// Elapsed time of downgrading region and upgrading region.
|
||||||
|
operations_elapsed: Duration,
|
||||||
|
/// Elapsed time of downgrading leader region.
|
||||||
|
downgrade_leader_region_elapsed: Duration,
|
||||||
|
/// Elapsed time of open candidate region.
|
||||||
|
open_candidate_region_elapsed: Duration,
|
||||||
|
/// Elapsed time of upgrade candidate region.
|
||||||
|
upgrade_candidate_region_elapsed: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Metrics {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"operations_elapsed: {:?}, downgrade_leader_region_elapsed: {:?}, open_candidate_region_elapsed: {:?}, upgrade_candidate_region_elapsed: {:?}",
|
||||||
|
self.operations_elapsed,
|
||||||
|
self.downgrade_leader_region_elapsed,
|
||||||
|
self.open_candidate_region_elapsed,
|
||||||
|
self.upgrade_candidate_region_elapsed
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Metrics {
|
||||||
|
/// Updates the elapsed time of downgrading region and upgrading region.
|
||||||
|
pub fn update_operations_elapsed(&mut self, elapsed: Duration) {
|
||||||
|
self.operations_elapsed += elapsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the elapsed time of downgrading leader region.
|
||||||
|
pub fn update_downgrade_leader_region_elapsed(&mut self, elapsed: Duration) {
|
||||||
|
self.downgrade_leader_region_elapsed += elapsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the elapsed time of open candidate region.
|
||||||
|
pub fn update_open_candidate_region_elapsed(&mut self, elapsed: Duration) {
|
||||||
|
self.open_candidate_region_elapsed += elapsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the elapsed time of upgrade candidate region.
|
||||||
|
pub fn update_upgrade_candidate_region_elapsed(&mut self, elapsed: Duration) {
|
||||||
|
self.upgrade_candidate_region_elapsed += elapsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Metrics {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if !self.operations_elapsed.is_zero() {
|
||||||
|
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||||
|
.with_label_values(&["operations"])
|
||||||
|
.observe(self.operations_elapsed.as_secs_f64());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.downgrade_leader_region_elapsed.is_zero() {
|
||||||
|
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||||
|
.with_label_values(&["downgrade_leader_region"])
|
||||||
|
.observe(self.downgrade_leader_region_elapsed.as_secs_f64());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.open_candidate_region_elapsed.is_zero() {
|
||||||
|
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||||
|
.with_label_values(&["open_candidate_region"])
|
||||||
|
.observe(self.open_candidate_region_elapsed.as_secs_f64());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.upgrade_candidate_region_elapsed.is_zero() {
|
||||||
|
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||||
|
.with_label_values(&["upgrade_candidate_region"])
|
||||||
|
.observe(self.upgrade_candidate_region_elapsed.as_secs_f64());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// It's shared in each step and available in executing (including retrying).
|
/// It's shared in each step and available in executing (including retrying).
|
||||||
///
|
///
|
||||||
/// It will be dropped if the procedure runner crashes.
|
/// It will be dropped if the procedure runner crashes.
|
||||||
@@ -132,8 +211,8 @@ pub struct VolatileContext {
|
|||||||
leader_region_last_entry_id: Option<u64>,
|
leader_region_last_entry_id: Option<u64>,
|
||||||
/// The last_entry_id of leader metadata region (Only used for metric engine).
|
/// The last_entry_id of leader metadata region (Only used for metric engine).
|
||||||
leader_region_metadata_last_entry_id: Option<u64>,
|
leader_region_metadata_last_entry_id: Option<u64>,
|
||||||
/// Elapsed time of downgrading region and upgrading region.
|
/// Metrics of region migration.
|
||||||
operations_elapsed: Duration,
|
metrics: Metrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VolatileContext {
|
impl VolatileContext {
|
||||||
@@ -231,12 +310,35 @@ impl Context {
|
|||||||
pub fn next_operation_timeout(&self) -> Option<Duration> {
|
pub fn next_operation_timeout(&self) -> Option<Duration> {
|
||||||
self.persistent_ctx
|
self.persistent_ctx
|
||||||
.timeout
|
.timeout
|
||||||
.checked_sub(self.volatile_ctx.operations_elapsed)
|
.checked_sub(self.volatile_ctx.metrics.operations_elapsed)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates operations elapsed.
|
/// Updates operations elapsed.
|
||||||
pub fn update_operations_elapsed(&mut self, instant: Instant) {
|
pub fn update_operations_elapsed(&mut self, instant: Instant) {
|
||||||
self.volatile_ctx.operations_elapsed += instant.elapsed();
|
self.volatile_ctx
|
||||||
|
.metrics
|
||||||
|
.update_operations_elapsed(instant.elapsed());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the elapsed time of downgrading leader region.
|
||||||
|
pub fn update_downgrade_leader_region_elapsed(&mut self, instant: Instant) {
|
||||||
|
self.volatile_ctx
|
||||||
|
.metrics
|
||||||
|
.update_downgrade_leader_region_elapsed(instant.elapsed());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the elapsed time of open candidate region.
|
||||||
|
pub fn update_open_candidate_region_elapsed(&mut self, instant: Instant) {
|
||||||
|
self.volatile_ctx
|
||||||
|
.metrics
|
||||||
|
.update_open_candidate_region_elapsed(instant.elapsed());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the elapsed time of upgrade candidate region.
|
||||||
|
pub fn update_upgrade_candidate_region_elapsed(&mut self, instant: Instant) {
|
||||||
|
self.volatile_ctx
|
||||||
|
.metrics
|
||||||
|
.update_upgrade_candidate_region_elapsed(instant.elapsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns address of meta server.
|
/// Returns address of meta server.
|
||||||
@@ -550,6 +652,14 @@ impl Procedure for RegionMigrationProcedure {
|
|||||||
.inc();
|
.inc();
|
||||||
ProcedureError::retry_later(e)
|
ProcedureError::retry_later(e)
|
||||||
} else {
|
} else {
|
||||||
|
error!(
|
||||||
|
e;
|
||||||
|
"Region migration procedure failed, region_id: {}, from_peer: {}, to_peer: {}, {}",
|
||||||
|
self.context.region_id(),
|
||||||
|
self.context.persistent_ctx.from_peer,
|
||||||
|
self.context.persistent_ctx.to_peer,
|
||||||
|
self.context.volatile_ctx.metrics,
|
||||||
|
);
|
||||||
METRIC_META_REGION_MIGRATION_ERROR
|
METRIC_META_REGION_MIGRATION_ERROR
|
||||||
.with_label_values(&[name, "external"])
|
.with_label_values(&[name, "external"])
|
||||||
.inc();
|
.inc();
|
||||||
|
|||||||
@@ -46,7 +46,13 @@ impl State for CloseDowngradedRegion {
|
|||||||
let region_id = ctx.region_id();
|
let region_id = ctx.region_id();
|
||||||
warn!(err; "Failed to close downgraded leader region: {region_id} on datanode {:?}", downgrade_leader_datanode);
|
warn!(err; "Failed to close downgraded leader region: {region_id} on datanode {:?}", downgrade_leader_datanode);
|
||||||
}
|
}
|
||||||
|
info!(
|
||||||
|
"Region migration is finished: region_id: {}, from_peer: {}, to_peer: {}, {}",
|
||||||
|
ctx.region_id(),
|
||||||
|
ctx.persistent_ctx.from_peer,
|
||||||
|
ctx.persistent_ctx.to_peer,
|
||||||
|
ctx.volatile_ctx.metrics,
|
||||||
|
);
|
||||||
Ok((Box::new(RegionMigrationEnd), Status::done()))
|
Ok((Box::new(RegionMigrationEnd), Status::done()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ impl Default for DowngradeLeaderRegion {
|
|||||||
#[typetag::serde]
|
#[typetag::serde]
|
||||||
impl State for DowngradeLeaderRegion {
|
impl State for DowngradeLeaderRegion {
|
||||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||||
|
let now = Instant::now();
|
||||||
// Ensures the `leader_region_lease_deadline` must exist after recovering.
|
// Ensures the `leader_region_lease_deadline` must exist after recovering.
|
||||||
ctx.volatile_ctx
|
ctx.volatile_ctx
|
||||||
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
|
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
|
||||||
@@ -77,6 +78,7 @@ impl State for DowngradeLeaderRegion {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ctx.update_downgrade_leader_region_elapsed(now);
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
Box::new(UpgradeCandidateRegion::default()),
|
Box::new(UpgradeCandidateRegion::default()),
|
||||||
@@ -348,7 +350,8 @@ mod tests {
|
|||||||
let env = TestingEnv::new();
|
let env = TestingEnv::new();
|
||||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||||
prepare_table_metadata(&ctx, HashMap::default()).await;
|
prepare_table_metadata(&ctx, HashMap::default()).await;
|
||||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||||
|
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||||
|
|
||||||
let err = state.downgrade_region(&mut ctx).await.unwrap_err();
|
let err = state.downgrade_region(&mut ctx).await.unwrap_err();
|
||||||
|
|
||||||
@@ -591,7 +594,8 @@ mod tests {
|
|||||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||||
let mailbox_ctx = env.mailbox_context();
|
let mailbox_ctx = env.mailbox_context();
|
||||||
let mailbox = mailbox_ctx.mailbox().clone();
|
let mailbox = mailbox_ctx.mailbox().clone();
|
||||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||||
|
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||||
|
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||||
mailbox_ctx
|
mailbox_ctx
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
|
||||||
use common_procedure::Status;
|
use common_procedure::Status;
|
||||||
|
use common_telemetry::warn;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
@@ -37,7 +38,15 @@ impl RegionMigrationAbort {
|
|||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
#[typetag::serde]
|
#[typetag::serde]
|
||||||
impl State for RegionMigrationAbort {
|
impl State for RegionMigrationAbort {
|
||||||
async fn next(&mut self, _: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||||
|
warn!(
|
||||||
|
"Region migration is aborted: {}, region_id: {}, from_peer: {}, to_peer: {}, {}",
|
||||||
|
self.reason,
|
||||||
|
ctx.region_id(),
|
||||||
|
ctx.persistent_ctx.from_peer,
|
||||||
|
ctx.persistent_ctx.to_peer,
|
||||||
|
ctx.volatile_ctx.metrics,
|
||||||
|
);
|
||||||
error::MigrationAbortSnafu {
|
error::MigrationAbortSnafu {
|
||||||
reason: &self.reason,
|
reason: &self.reason,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::Duration;
|
||||||
|
|
||||||
use api::v1::meta::MailboxMessage;
|
use api::v1::meta::MailboxMessage;
|
||||||
use common_meta::distributed_time_constants::REGION_LEASE_SECS;
|
use common_meta::distributed_time_constants::REGION_LEASE_SECS;
|
||||||
@@ -24,6 +24,7 @@ use common_procedure::Status;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::handler::HeartbeatMailbox;
|
use crate::handler::HeartbeatMailbox;
|
||||||
@@ -42,7 +43,9 @@ pub struct OpenCandidateRegion;
|
|||||||
impl State for OpenCandidateRegion {
|
impl State for OpenCandidateRegion {
|
||||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||||
let instruction = self.build_open_region_instruction(ctx).await?;
|
let instruction = self.build_open_region_instruction(ctx).await?;
|
||||||
|
let now = Instant::now();
|
||||||
self.open_candidate_region(ctx, instruction).await?;
|
self.open_candidate_region(ctx, instruction).await?;
|
||||||
|
ctx.update_open_candidate_region_elapsed(now);
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
Box::new(UpdateMetadata::Downgrade),
|
Box::new(UpdateMetadata::Downgrade),
|
||||||
|
|||||||
@@ -54,9 +54,12 @@ impl Default for UpgradeCandidateRegion {
|
|||||||
#[typetag::serde]
|
#[typetag::serde]
|
||||||
impl State for UpgradeCandidateRegion {
|
impl State for UpgradeCandidateRegion {
|
||||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||||
|
let now = Instant::now();
|
||||||
if self.upgrade_region_with_retry(ctx).await {
|
if self.upgrade_region_with_retry(ctx).await {
|
||||||
|
ctx.update_upgrade_candidate_region_elapsed(now);
|
||||||
Ok((Box::new(UpdateMetadata::Upgrade), Status::executing(false)))
|
Ok((Box::new(UpdateMetadata::Upgrade), Status::executing(false)))
|
||||||
} else {
|
} else {
|
||||||
|
ctx.update_upgrade_candidate_region_elapsed(now);
|
||||||
Ok((Box::new(UpdateMetadata::Rollback), Status::executing(false)))
|
Ok((Box::new(UpdateMetadata::Rollback), Status::executing(false)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -288,7 +291,8 @@ mod tests {
|
|||||||
let persistent_context = new_persistent_context();
|
let persistent_context = new_persistent_context();
|
||||||
let env = TestingEnv::new();
|
let env = TestingEnv::new();
|
||||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||||
|
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||||
|
|
||||||
let err = state.upgrade_region(&ctx).await.unwrap_err();
|
let err = state.upgrade_region(&ctx).await.unwrap_err();
|
||||||
|
|
||||||
@@ -558,7 +562,8 @@ mod tests {
|
|||||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||||
let mailbox_ctx = env.mailbox_context();
|
let mailbox_ctx = env.mailbox_context();
|
||||||
let mailbox = mailbox_ctx.mailbox().clone();
|
let mailbox = mailbox_ctx.mailbox().clone();
|
||||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||||
|
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||||
|
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||||
mailbox_ctx
|
mailbox_ctx
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ use crate::Result;
|
|||||||
|
|
||||||
pub type KafkaClientRef = Arc<Client>;
|
pub type KafkaClientRef = Arc<Client>;
|
||||||
|
|
||||||
const DELETE_RECORDS_TIMEOUT: Duration = Duration::from_secs(1);
|
const DELETE_RECORDS_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
/// The state of WAL pruning.
|
/// The state of WAL pruning.
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@@ -335,22 +335,21 @@ impl WalPruneProcedure {
|
|||||||
})?;
|
})?;
|
||||||
partition_client
|
partition_client
|
||||||
.delete_records(
|
.delete_records(
|
||||||
(self.data.prunable_entry_id + 1) as i64,
|
// notice here no "+1" is needed because the offset arg is exclusive, and it's defensive programming just in case somewhere else have a off by one error, see https://kafka.apache.org/36/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html#endOffsets(java.util.Collection) which we use to get the end offset from high watermark
|
||||||
|
self.data.prunable_entry_id as i64,
|
||||||
DELETE_RECORDS_TIMEOUT.as_millis() as i32,
|
DELETE_RECORDS_TIMEOUT.as_millis() as i32,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(DeleteRecordsSnafu {
|
.context(DeleteRecordsSnafu {
|
||||||
topic: &self.data.topic,
|
topic: &self.data.topic,
|
||||||
partition: DEFAULT_PARTITION,
|
partition: DEFAULT_PARTITION,
|
||||||
offset: (self.data.prunable_entry_id + 1),
|
offset: self.data.prunable_entry_id,
|
||||||
})
|
})
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.with_context(|_| error::RetryLaterWithSourceSnafu {
|
.with_context(|_| error::RetryLaterWithSourceSnafu {
|
||||||
reason: format!(
|
reason: format!(
|
||||||
"Failed to delete records for topic: {}, partition: {}, offset: {}",
|
"Failed to delete records for topic: {}, partition: {}, offset: {}",
|
||||||
self.data.topic,
|
self.data.topic, DEFAULT_PARTITION, self.data.prunable_entry_id
|
||||||
DEFAULT_PARTITION,
|
|
||||||
self.data.prunable_entry_id + 1
|
|
||||||
),
|
),
|
||||||
})?;
|
})?;
|
||||||
info!(
|
info!(
|
||||||
@@ -559,6 +558,7 @@ mod tests {
|
|||||||
topic_name = format!("test_procedure_execution-{}", topic_name);
|
topic_name = format!("test_procedure_execution-{}", topic_name);
|
||||||
let mut env = TestEnv::new();
|
let mut env = TestEnv::new();
|
||||||
let context = env.build_wal_prune_context(broker_endpoints).await;
|
let context = env.build_wal_prune_context(broker_endpoints).await;
|
||||||
|
TestEnv::prepare_topic(&context.client, &topic_name).await;
|
||||||
let mut procedure = WalPruneProcedure::new(topic_name.clone(), context, 10, None);
|
let mut procedure = WalPruneProcedure::new(topic_name.clone(), context, 10, None);
|
||||||
|
|
||||||
// Before any data in kvbackend is mocked, should return a retryable error.
|
// Before any data in kvbackend is mocked, should return a retryable error.
|
||||||
@@ -605,19 +605,19 @@ mod tests {
|
|||||||
// Step 3: Test `on_prune`.
|
// Step 3: Test `on_prune`.
|
||||||
let status = procedure.on_prune().await.unwrap();
|
let status = procedure.on_prune().await.unwrap();
|
||||||
assert_matches!(status, Status::Done { output: None });
|
assert_matches!(status, Status::Done { output: None });
|
||||||
// Check if the entry ids after `prunable_entry_id` still exist.
|
// Check if the entry ids after(include) `prunable_entry_id` still exist.
|
||||||
check_entry_id_existence(
|
|
||||||
procedure.context.client.clone(),
|
|
||||||
&topic_name,
|
|
||||||
procedure.data.prunable_entry_id as i64 + 1,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
// Check if the entry s before `prunable_entry_id` are deleted.
|
|
||||||
check_entry_id_existence(
|
check_entry_id_existence(
|
||||||
procedure.context.client.clone(),
|
procedure.context.client.clone(),
|
||||||
&topic_name,
|
&topic_name,
|
||||||
procedure.data.prunable_entry_id as i64,
|
procedure.data.prunable_entry_id as i64,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Check if the entry ids before `prunable_entry_id` are deleted.
|
||||||
|
check_entry_id_existence(
|
||||||
|
procedure.context.client.clone(),
|
||||||
|
&topic_name,
|
||||||
|
procedure.data.prunable_entry_id as i64 - 1,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ impl TestEnv {
|
|||||||
kafka_topic,
|
kafka_topic,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
Arc::new(build_kafka_client(&config).await.unwrap())
|
Arc::new(build_kafka_client(&config.connection).await.unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn build_wal_prune_context(&self, broker_endpoints: Vec<String>) -> WalPruneContext {
|
pub async fn build_wal_prune_context(&self, broker_endpoints: Vec<String>) -> WalPruneContext {
|
||||||
@@ -91,4 +91,12 @@ impl TestEnv {
|
|||||||
mailbox: self.mailbox.mailbox().clone(),
|
mailbox: self.mailbox.mailbox().clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn prepare_topic(client: &Arc<Client>, topic_name: &str) {
|
||||||
|
let controller_client = client.controller_client().unwrap();
|
||||||
|
controller_client
|
||||||
|
.create_topic(topic_name.to_string(), 1, 1, 5000)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -25,7 +25,7 @@ use common_meta::leadership_notifier::LeadershipChangeListener;
|
|||||||
use common_meta::peer::PeerLookupServiceRef;
|
use common_meta::peer::PeerLookupServiceRef;
|
||||||
use common_meta::DatanodeId;
|
use common_meta::DatanodeId;
|
||||||
use common_runtime::JoinHandle;
|
use common_runtime::JoinHandle;
|
||||||
use common_telemetry::{error, info, warn};
|
use common_telemetry::{debug, error, info, warn};
|
||||||
use common_time::util::current_time_millis;
|
use common_time::util::current_time_millis;
|
||||||
use error::Error::{LeaderPeerChanged, MigrationRunning, TableRouteNotFound};
|
use error::Error::{LeaderPeerChanged, MigrationRunning, TableRouteNotFound};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -208,6 +208,8 @@ pub const DEFAULT_TICK_INTERVAL: Duration = Duration::from_secs(1);
|
|||||||
pub struct RegionSupervisor {
|
pub struct RegionSupervisor {
|
||||||
/// Used to detect the failure of regions.
|
/// Used to detect the failure of regions.
|
||||||
failure_detector: RegionFailureDetector,
|
failure_detector: RegionFailureDetector,
|
||||||
|
/// Tracks the number of failovers for each region.
|
||||||
|
failover_counts: HashMap<DetectingRegion, u32>,
|
||||||
/// Receives [Event]s.
|
/// Receives [Event]s.
|
||||||
receiver: Receiver<Event>,
|
receiver: Receiver<Event>,
|
||||||
/// The context of [`SelectorRef`]
|
/// The context of [`SelectorRef`]
|
||||||
@@ -293,6 +295,7 @@ impl RegionSupervisor {
|
|||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
failure_detector: RegionFailureDetector::new(options),
|
failure_detector: RegionFailureDetector::new(options),
|
||||||
|
failover_counts: HashMap::new(),
|
||||||
receiver: event_receiver,
|
receiver: event_receiver,
|
||||||
selector_context,
|
selector_context,
|
||||||
selector,
|
selector,
|
||||||
@@ -336,13 +339,14 @@ impl RegionSupervisor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn deregister_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>) {
|
async fn deregister_failure_detectors(&mut self, detecting_regions: Vec<DetectingRegion>) {
|
||||||
for region in detecting_regions {
|
for region in detecting_regions {
|
||||||
self.failure_detector.remove(®ion)
|
self.failure_detector.remove(®ion);
|
||||||
|
self.failover_counts.remove(®ion);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) {
|
async fn handle_region_failures(&mut self, mut regions: Vec<(DatanodeId, RegionId)>) {
|
||||||
if regions.is_empty() {
|
if regions.is_empty() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -365,8 +369,7 @@ impl RegionSupervisor {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for (datanode_id, region_id) in migrating_regions {
|
for (datanode_id, region_id) in migrating_regions {
|
||||||
self.failure_detector.remove(&(datanode_id, region_id));
|
debug!(
|
||||||
warn!(
|
|
||||||
"Removed region failover for region: {region_id}, datanode: {datanode_id} because it's migrating"
|
"Removed region failover for region: {region_id}, datanode: {datanode_id} because it's migrating"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -386,7 +389,12 @@ impl RegionSupervisor {
|
|||||||
.context(error::MaintenanceModeManagerSnafu)
|
.context(error::MaintenanceModeManagerSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
|
async fn do_failover(&mut self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
|
||||||
|
let count = *self
|
||||||
|
.failover_counts
|
||||||
|
.entry((datanode_id, region_id))
|
||||||
|
.and_modify(|count| *count += 1)
|
||||||
|
.or_insert(1);
|
||||||
let from_peer = self
|
let from_peer = self
|
||||||
.peer_lookup
|
.peer_lookup
|
||||||
.datanode(datanode_id)
|
.datanode(datanode_id)
|
||||||
@@ -415,11 +423,14 @@ impl RegionSupervisor {
|
|||||||
);
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
info!(
|
||||||
|
"Failover for region: {region_id}, from_peer: {from_peer}, to_peer: {to_peer}, tries: {count}"
|
||||||
|
);
|
||||||
let task = RegionMigrationProcedureTask {
|
let task = RegionMigrationProcedureTask {
|
||||||
region_id,
|
region_id,
|
||||||
from_peer,
|
from_peer,
|
||||||
to_peer,
|
to_peer,
|
||||||
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT,
|
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT * count,
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(err) = self.region_migration_manager.submit_procedure(task).await {
|
if let Err(err) = self.region_migration_manager.submit_procedure(task).await {
|
||||||
@@ -433,7 +444,8 @@ impl RegionSupervisor {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
TableRouteNotFound { .. } => {
|
TableRouteNotFound { .. } => {
|
||||||
self.failure_detector.remove(&(datanode_id, region_id));
|
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||||
|
.await;
|
||||||
info!(
|
info!(
|
||||||
"Table route is not found, the table is dropped, removed failover detector for region: {}, datanode: {}",
|
"Table route is not found, the table is dropped, removed failover detector for region: {}, datanode: {}",
|
||||||
region_id, datanode_id
|
region_id, datanode_id
|
||||||
@@ -441,7 +453,8 @@ impl RegionSupervisor {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
LeaderPeerChanged { .. } => {
|
LeaderPeerChanged { .. } => {
|
||||||
self.failure_detector.remove(&(datanode_id, region_id));
|
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||||
|
.await;
|
||||||
info!(
|
info!(
|
||||||
"Region's leader peer changed, removed failover detector for region: {}, datanode: {}",
|
"Region's leader peer changed, removed failover detector for region: {}, datanode: {}",
|
||||||
region_id, datanode_id
|
region_id, datanode_id
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ pub mod load_based;
|
|||||||
pub mod round_robin;
|
pub mod round_robin;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) mod test_utils;
|
pub(crate) mod test_utils;
|
||||||
mod weight_compute;
|
pub mod weight_compute;
|
||||||
pub mod weighted_choose;
|
pub mod weighted_choose;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
|||||||
@@ -12,17 +12,37 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::lease;
|
use crate::lease;
|
||||||
use crate::metasrv::SelectorContext;
|
use crate::metasrv::SelectorContext;
|
||||||
|
use crate::node_excluder::NodeExcluderRef;
|
||||||
use crate::selector::common::{choose_items, filter_out_excluded_peers};
|
use crate::selector::common::{choose_items, filter_out_excluded_peers};
|
||||||
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
|
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
|
||||||
use crate::selector::{Selector, SelectorOptions};
|
use crate::selector::{Selector, SelectorOptions};
|
||||||
|
|
||||||
/// Select all alive datanodes based using a random weighted choose.
|
/// Select all alive datanodes based using a random weighted choose.
|
||||||
pub struct LeaseBasedSelector;
|
pub struct LeaseBasedSelector {
|
||||||
|
node_excluder: NodeExcluderRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LeaseBasedSelector {
|
||||||
|
pub fn new(node_excluder: NodeExcluderRef) -> Self {
|
||||||
|
Self { node_excluder }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LeaseBasedSelector {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
node_excluder: Arc::new(Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Selector for LeaseBasedSelector {
|
impl Selector for LeaseBasedSelector {
|
||||||
@@ -47,7 +67,14 @@ impl Selector for LeaseBasedSelector {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// 3. choose peers by weight_array.
|
// 3. choose peers by weight_array.
|
||||||
filter_out_excluded_peers(&mut weight_array, &opts.exclude_peer_ids);
|
let mut exclude_peer_ids = self
|
||||||
|
.node_excluder
|
||||||
|
.excluded_datanode_ids()
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
|
||||||
|
filter_out_excluded_peers(&mut weight_array, &exclude_peer_ids);
|
||||||
let mut weighted_choose = RandomWeightedChoose::new(weight_array);
|
let mut weighted_choose = RandomWeightedChoose::new(weight_array);
|
||||||
let selected = choose_items(&opts, &mut weighted_choose)?;
|
let selected = choose_items(&opts, &mut weighted_choose)?;
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user