Compare commits

..

20 Commits

Author SHA1 Message Date
discord9
ed676d97c7 refactor: rename FlowWorkerManager to FlowStreamingEngine 2025-04-23 11:36:30 +08:00
discord9
14b2badded chore: better error variant 2025-04-22 19:52:19 +08:00
discord9
3626a50395 chore: use better error variant 2025-04-22 17:30:24 +08:00
discord9
0d0dad4ba2 chore: update docs 2025-04-22 17:09:42 +08:00
discord9
ae00e28b2a refactor: per review partially 2025-04-22 17:09:42 +08:00
discord9
92d2fafb33 chore: per review rename args 2025-04-22 17:09:42 +08:00
discord9
30b3600597 chore: per review 2025-04-22 17:09:42 +08:00
discord9
87f1a8c622 refactor: per review 2025-04-22 17:09:42 +08:00
discord9
8e815fc385 chore: add comments per review 2025-04-22 17:09:42 +08:00
discord9
ca46bd04ee chore: better logging 2025-04-22 17:09:42 +08:00
discord9
d32ade7399 fix: query without time window also clean dirty time window 2025-04-22 17:09:42 +08:00
discord9
b4aa0c8b8b refactor: per review 2025-04-22 17:09:42 +08:00
discord9
e647559d27 refactor: AddAutoColumnRewriter check for Projection 2025-04-22 17:09:42 +08:00
discord9
d2c4767d41 docs: explain nodeid use in check task 2025-04-22 17:09:42 +08:00
discord9
82cee11eea test: add align time window test 2025-04-22 17:09:42 +08:00
discord9
6d0470c3fb feat: flush_flow flush all ranges now 2025-04-22 17:09:42 +08:00
discord9
47a267e29c fix: add locks for create/drop flow&docs: update docs 2025-04-22 17:09:42 +08:00
discord9
fa13d06fc6 chore: update proto to main branch 2025-04-22 17:09:42 +08:00
discord9
26d9517c3e chore: update proto 2025-04-22 17:09:42 +08:00
discord9
a7da9af5de feat: use flow batching engine
broken: try using logical plan

fix: use dummy catalog for logical plan

fix: insert plan exec&sqlness grpc addr

feat: use frontend instance in flownode in standalone

feat: flow type in metasrv&fix: flush flow out of sync& column name alias

tests: sqlness update

tests: sqlness flow rebuild udpate

chore: per review

refactor: keep chnl mgr

refactor: use catalog mgr for get table

tests: use valid sql

fix: add more check

refactor: put flow type determine to frontend
2025-04-22 17:09:42 +08:00
130 changed files with 5286 additions and 7617 deletions

15
.coderabbit.yaml Normal file
View File

@@ -0,0 +1,15 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

View File

@@ -1,37 +0,0 @@
#!/bin/bash
DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
# Configure Git configs.
git config --global user.email greptimedb-ci@greptime.com
git config --global user.name greptimedb-ci
# Checkout a new branch.
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile
git commit -m "ci: update dev-builder image tag"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "ci: update dev-builder image tag" \
--body "This PR updates the dev-builder image tag" \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_dev_builder_version

View File

@@ -24,19 +24,11 @@ on:
description: Release dev-builder-android image
required: false
default: false
update_dev_builder_image_tag:
type: boolean
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
required: false
default: false
jobs:
release-dev-builder-images:
name: Release dev builder images
# The jobs are triggered by the following events:
# 1. Manually triggered workflow_dispatch event
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-latest
outputs:
version: ${{ steps.set-version.outputs.version }}
@@ -65,9 +57,9 @@ jobs:
version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
@@ -93,7 +85,7 @@ jobs:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -114,7 +106,7 @@ jobs:
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -135,7 +127,7 @@ jobs:
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -170,7 +162,7 @@ jobs:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -184,7 +176,7 @@ jobs:
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -198,7 +190,7 @@ jobs:
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -209,24 +201,3 @@ jobs:
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
update-dev-builder-image-tag:
name: Update dev-builder image tag
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
needs: [
release-dev-builder-images
]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update dev-builder image tag
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}

233
Cargo.lock generated
View File

@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-base",
"common-decimal",
@@ -915,7 +915,7 @@ dependencies = [
[[package]]
name = "auth"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -1537,7 +1537,7 @@ dependencies = [
[[package]]
name = "cache"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"catalog",
"common-error",
@@ -1561,7 +1561,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arrow 54.2.1",
@@ -1619,9 +1619,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.2.20"
version = "1.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
dependencies = [
"jobserver",
"libc",
@@ -1874,7 +1874,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
[[package]]
name = "cli"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"auth",
@@ -1917,7 +1917,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"store-api",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tempfile",
"tokio",
@@ -1926,7 +1926,7 @@ dependencies = [
[[package]]
name = "client"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arc-swap",
@@ -1955,7 +1955,7 @@ dependencies = [
"rand 0.9.0",
"serde_json",
"snafu 0.8.5",
"substrait 0.14.2",
"substrait 0.14.0",
"substrait 0.37.3",
"tokio",
"tokio-stream",
@@ -1996,7 +1996,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"auth",
@@ -2056,7 +2056,7 @@ dependencies = [
"similar-asserts",
"snafu 0.8.5",
"store-api",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"temp-env",
"tempfile",
@@ -2102,7 +2102,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"anymap2",
"async-trait",
@@ -2124,11 +2124,11 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.14.2"
version = "0.14.0"
[[package]]
name = "common-config"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-base",
"common-error",
@@ -2153,7 +2153,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"arrow 54.2.1",
"arrow-schema 54.3.1",
@@ -2190,7 +2190,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"bigdecimal 0.4.8",
"common-error",
@@ -2203,7 +2203,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-macro",
"http 1.1.0",
@@ -2214,7 +2214,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"common-error",
@@ -2224,7 +2224,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -2277,7 +2277,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"common-runtime",
@@ -2294,7 +2294,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arrow-flight",
@@ -2325,7 +2325,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"common-base",
@@ -2344,7 +2344,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"arc-swap",
"common-query",
@@ -2358,7 +2358,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-error",
"common-macro",
@@ -2371,7 +2371,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"anymap2",
"api",
@@ -2432,7 +2432,7 @@ dependencies = [
[[package]]
name = "common-options"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-grpc",
"humantime-serde",
@@ -2441,11 +2441,11 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "0.14.2"
version = "0.14.0"
[[package]]
name = "common-pprof"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-error",
"common-macro",
@@ -2457,7 +2457,7 @@ dependencies = [
[[package]]
name = "common-procedure"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-stream",
"async-trait",
@@ -2484,7 +2484,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"common-procedure",
@@ -2493,7 +2493,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -2510,7 +2510,7 @@ dependencies = [
"futures-util",
"serde",
"snafu 0.8.5",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"sqlparser_derive 0.1.1",
"statrs",
"store-api",
@@ -2519,7 +2519,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"arc-swap",
"common-error",
@@ -2539,7 +2539,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"clap 4.5.19",
@@ -2569,14 +2569,14 @@ dependencies = [
[[package]]
name = "common-session"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"strum 0.27.1",
]
[[package]]
name = "common-telemetry"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"atty",
"backtrace",
@@ -2604,7 +2604,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"client",
"common-query",
@@ -2616,7 +2616,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"arrow 54.2.1",
"chrono",
@@ -2634,7 +2634,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"build-data",
"const_format",
@@ -2644,7 +2644,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"common-base",
"common-error",
@@ -2946,9 +2946,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
version = "0.5.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
dependencies = [
"crossbeam-utils",
]
@@ -3117,7 +3117,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "datafusion"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"arrow-array 54.2.1",
@@ -3168,7 +3168,7 @@ dependencies = [
[[package]]
name = "datafusion-catalog"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"async-trait",
@@ -3188,7 +3188,7 @@ dependencies = [
[[package]]
name = "datafusion-catalog-listing"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"arrow-schema 54.3.1",
@@ -3211,7 +3211,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"ahash 0.8.11",
"arrow 54.2.1",
@@ -3236,7 +3236,7 @@ dependencies = [
[[package]]
name = "datafusion-common-runtime"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"log",
"tokio",
@@ -3245,12 +3245,12 @@ dependencies = [
[[package]]
name = "datafusion-doc"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
[[package]]
name = "datafusion-execution"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"dashmap",
@@ -3268,7 +3268,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"chrono",
@@ -3288,7 +3288,7 @@ dependencies = [
[[package]]
name = "datafusion-expr-common"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"datafusion-common",
@@ -3299,7 +3299,7 @@ dependencies = [
[[package]]
name = "datafusion-functions"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"arrow-buffer 54.3.1",
@@ -3328,7 +3328,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"ahash 0.8.11",
"arrow 54.2.1",
@@ -3349,7 +3349,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate-common"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"ahash 0.8.11",
"arrow 54.2.1",
@@ -3361,7 +3361,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-nested"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"arrow-array 54.2.1",
@@ -3383,7 +3383,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-table"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"async-trait",
@@ -3398,7 +3398,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-window"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"datafusion-common",
"datafusion-doc",
@@ -3414,7 +3414,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-window-common"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"datafusion-common",
"datafusion-physical-expr-common",
@@ -3423,7 +3423,7 @@ dependencies = [
[[package]]
name = "datafusion-macros"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"datafusion-expr",
"quote",
@@ -3433,7 +3433,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"chrono",
@@ -3451,7 +3451,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"ahash 0.8.11",
"arrow 54.2.1",
@@ -3474,7 +3474,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr-common"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"ahash 0.8.11",
"arrow 54.2.1",
@@ -3487,7 +3487,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-optimizer"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"arrow-schema 54.3.1",
@@ -3508,7 +3508,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-plan"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"ahash 0.8.11",
"arrow 54.2.1",
@@ -3538,7 +3538,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"arrow 54.2.1",
"arrow-array 54.2.1",
@@ -3556,7 +3556,7 @@ dependencies = [
[[package]]
name = "datafusion-substrait"
version = "45.0.0"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
dependencies = [
"async-recursion",
"async-trait",
@@ -3572,7 +3572,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arrow-flight",
@@ -3624,7 +3624,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"store-api",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tokio",
"toml 0.8.19",
@@ -3633,7 +3633,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"arrow 54.2.1",
"arrow-array 54.2.1",
@@ -3656,7 +3656,7 @@ dependencies = [
"serde",
"serde_json",
"snafu 0.8.5",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"sqlparser_derive 0.1.1",
]
@@ -4259,7 +4259,7 @@ dependencies = [
[[package]]
name = "file-engine"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -4382,7 +4382,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
[[package]]
name = "flow"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arrow 54.2.1",
@@ -4444,7 +4444,7 @@ dependencies = [
"snafu 0.8.5",
"store-api",
"strum 0.27.1",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tokio",
"tonic 0.12.3",
@@ -4499,7 +4499,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arc-swap",
@@ -4553,10 +4553,10 @@ dependencies = [
"session",
"snafu 0.8.5",
"sql",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"store-api",
"strfmt",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tokio",
"toml 0.8.19",
@@ -5795,7 +5795,7 @@ dependencies = [
[[package]]
name = "index"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -6509,7 +6509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
dependencies = [
"cfg-if",
"windows-targets 0.48.5",
"windows-targets 0.52.6",
]
[[package]]
@@ -6605,7 +6605,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "log-query"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"chrono",
"common-error",
@@ -6617,7 +6617,7 @@ dependencies = [
[[package]]
name = "log-store"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-stream",
"async-trait",
@@ -6911,7 +6911,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -6939,7 +6939,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -7029,7 +7029,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"aquamarine",
@@ -7041,14 +7041,12 @@ dependencies = [
"common-macro",
"common-query",
"common-recordbatch",
"common-runtime",
"common-telemetry",
"common-test-util",
"common-time",
"datafusion",
"datatypes",
"futures-util",
"humantime-serde",
"itertools 0.14.0",
"lazy_static",
"mito2",
@@ -7118,7 +7116,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"aquamarine",
@@ -7824,7 +7822,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"anyhow",
"bytes",
@@ -8119,7 +8117,7 @@ dependencies = [
[[package]]
name = "operator"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8166,9 +8164,9 @@ dependencies = [
"session",
"snafu 0.8.5",
"sql",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"store-api",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tokio",
"tokio-util",
@@ -8423,7 +8421,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -8443,7 +8441,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"sql",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"store-api",
"table",
]
@@ -8705,7 +8703,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8847,7 +8845,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"auth",
"clap 4.5.19",
@@ -9127,7 +9125,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"ahash 0.8.11",
"async-trait",
@@ -9373,7 +9371,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-compression 0.4.13",
"async-trait",
@@ -9414,7 +9412,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -9477,10 +9475,10 @@ dependencies = [
"session",
"snafu 0.8.5",
"sql",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"statrs",
"store-api",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tokio",
"tokio-stream",
@@ -10005,14 +10003,15 @@ dependencies = [
[[package]]
name = "ring"
version = "0.17.14"
version = "0.17.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
dependencies = [
"cc",
"cfg-if",
"getrandom 0.2.15",
"libc",
"spin",
"untrusted",
"windows-sys 0.52.0",
]
@@ -10830,7 +10829,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -10950,7 +10949,7 @@ dependencies = [
[[package]]
name = "session"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arc-swap",
@@ -11275,7 +11274,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"chrono",
@@ -11303,7 +11302,7 @@ dependencies = [
"serde",
"serde_json",
"snafu 0.8.5",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"sqlparser_derive 0.1.1",
"store-api",
"table",
@@ -11330,7 +11329,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"clap 4.5.19",
@@ -11372,7 +11371,7 @@ dependencies = [
[[package]]
name = "sqlparser"
version = "0.54.0"
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089#e98e6b322426a9d397a71efef17075966223c089"
dependencies = [
"lazy_static",
"log",
@@ -11380,7 +11379,7 @@ dependencies = [
"regex",
"serde",
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
]
[[package]]
@@ -11408,7 +11407,7 @@ dependencies = [
[[package]]
name = "sqlparser_derive"
version = "0.3.0"
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089#e98e6b322426a9d397a71efef17075966223c089"
dependencies = [
"proc-macro2",
"quote",
@@ -11649,7 +11648,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"aquamarine",
@@ -11798,7 +11797,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"async-trait",
"bytes",
@@ -11978,7 +11977,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"async-trait",
@@ -12229,7 +12228,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-fuzz"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"arbitrary",
"async-trait",
@@ -12263,7 +12262,7 @@ dependencies = [
"serde_yaml",
"snafu 0.8.5",
"sql",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
"sqlx",
"store-api",
"strum 0.27.1",
@@ -12273,7 +12272,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.14.2"
version = "0.14.0"
dependencies = [
"api",
"arrow-flight",
@@ -12340,7 +12339,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
"substrait 0.14.2",
"substrait 0.14.0",
"table",
"tempfile",
"time",

View File

@@ -68,7 +68,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.14.2"
version = "0.14.0"
edition = "2021"
license = "Apache-2.0"
@@ -112,15 +112,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "6.1"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
@@ -162,7 +162,7 @@ paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.5.1", features = ["ser"] }
prost = { version = "0.13", features = ["no-recursion-limit"] }
prost = "0.13"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
ratelimit = "0.10"
@@ -191,7 +191,7 @@ simd-json = "0.15"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e98e6b322426a9d397a71efef17075966223c089", features = [
"visitor",
"serde",
] } # branch = "v0.54.x"

View File

@@ -319,7 +319,6 @@
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
| `runtime` | -- | -- | The runtime options. |

View File

@@ -50,10 +50,6 @@ use_memory_store = false
## - Using shared storage (e.g., s3).
enable_region_failover = false
## Whether to allow region failover on local WAL.
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
allow_region_failover_on_local_wal = false
## Max allowed idle time before removing node info from metasrv memory.
node_max_idle_time = "24hours"

View File

@@ -4,21 +4,15 @@
This repository maintains the Grafana dashboards for GreptimeDB. It has two types of dashboards:
- `cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
- `standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
- `cluster/`: The dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
- `standalone/`: The dashboard for the standalone GreptimeDB instance. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
As the rapid development of GreptimeDB, the metrics may be changed, and please feel free to submit your feedback and/or contribution to this dashboard 🤗
**NOTE**:
- The Grafana version should be greater than 9.0.
- If you want to modify the dashboards, you only need to modify the `cluster/dashboard.json` and run the `make dashboards` command to generate the `standalone/dashboard.json` and other related files.
To maintain the dashboards easily, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
To maintain the dashboards, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
- `cluster/dashboard.yaml`: The intermediate dashboard for the GreptimeDB cluster.
- `standalone/dashboard.yaml`: The intermediate dashboard for the standalone GreptimeDB instance.
- `standalone/dashboard.yaml`: The intermediatedashboard for the standalone GreptimeDB instance.
## Data Sources

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +1,96 @@
# Overview
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `s` | `prometheus` | `__auto` |
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | -- | `mysql` | -- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `rowsps` | `prometheus` | `__auto` |
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `decbytes` | `mysql` | -- |
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `sishort` | `mysql` | -- |
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | -- | `mysql` | -- |
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | -- | `mysql` | -- |
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `decbytes` | `mysql` | -- |
# Ingestion
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `ingestion` |
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `http-logs` |
# Queries
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `reqps` | `prometheus` | `mysql` |
# Resources
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{instance}}]-[{{ pod }}]` |
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-cpu` |
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-resident` |
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
# Frontend Requests
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `s` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-p99` |
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
# Frontend to Datanode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `ops` | `prometheus` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `bytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
# Metasrv
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `none` | `prometheus` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `none` | `prometheus` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `none` | `prometheus` | `__auto` |
# Flownode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | -- | `prometheus` | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-p95` |
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}]` |
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{code}}]` |

View File

@@ -426,6 +426,7 @@ groups:
- title: Write Stall per Instance
type: timeseries
description: Write Stall per Instance.
unit: decbytes
queries:
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})
datasource:
@@ -657,22 +658,13 @@ groups:
- title: Opendal traffic
type: timeseries
description: Total traffic as in bytes by instance and operation
unit: decbytes
unit: ops
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: OpenDAL errors per Instance
type: timeseries
description: OpenDAL error counts per Instance.
queries:
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
- title: Metasrv
panels:
- title: Region migration datanode

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +1,96 @@
# Overview
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `s` | `prometheus` | `__auto` |
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | -- | `mysql` | -- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `rowsps` | `prometheus` | `__auto` |
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `decbytes` | `mysql` | -- |
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `sishort` | `mysql` | -- |
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | -- | `mysql` | -- |
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | -- | `mysql` | -- |
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `decbytes` | `mysql` | -- |
# Ingestion
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `ingestion` |
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `http-logs` |
# Queries
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `reqps` | `prometheus` | `mysql` |
# Resources
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{instance}}]-[{{ pod }}]` |
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-cpu` |
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-resident` |
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
# Frontend Requests
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `s` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-p99` |
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
# Frontend to Datanode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `ops` | `prometheus` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `bytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
# Metasrv
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `none` | `prometheus` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `none` | `prometheus` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `none` | `prometheus` | `__auto` |
# Flownode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | -- | `prometheus` | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-p95` |
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}]` |
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{code}}]` |

View File

@@ -426,6 +426,7 @@ groups:
- title: Write Stall per Instance
type: timeseries
description: Write Stall per Instance.
unit: decbytes
queries:
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{})
datasource:
@@ -657,22 +658,13 @@ groups:
- title: Opendal traffic
type: timeseries
description: Total traffic as in bytes by instance and operation
unit: decbytes
unit: ops
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: OpenDAL errors per Instance
type: timeseries
description: OpenDAL error counts per Instance.
queries:
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
- title: Metasrv
panels:
- title: Region migration datanode

View File

@@ -2,7 +2,7 @@
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/cluster}
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/standalone}
DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
DAC_IMAGE=ghcr.io/zyy17/dac:20250422-c9435ce
remove_instance_filters() {
# Remove the instance filters for the standalone dashboards.
@@ -10,15 +10,8 @@ remove_instance_filters() {
}
generate_intermediate_dashboards_and_docs() {
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
-i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json \
-o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml \
-m /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.md
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
-i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json \
-o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml \
-m /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.md
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} -i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json -o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml -m > $CLUSTER_DASHBOARD_DIR/dashboard.md
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} -i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json -o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml -m > $STANDALONE_DASHBOARD_DIR/dashboard.md
}
remove_instance_filters

View File

@@ -36,8 +36,8 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::RecordBatchStreamWrapper;
use common_telemetry::error;
use common_telemetry::tracing_context::W3cTrace;
use common_telemetry::{error, warn};
use futures::future;
use futures_util::{Stream, StreamExt, TryStreamExt};
use prost::Message;
@@ -192,36 +192,6 @@ impl Database {
from_grpc_response(response)
}
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
/// is `max_retries * GRPC_CONN_TIMEOUT`
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
let mut client = make_database_client(&self.client)?.inner;
let mut retries = 0;
let request = self.to_rpc_request(request);
loop {
let raw_response = client.handle(request.clone()).await;
match (raw_response, retries < max_retries) {
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
(Err(err), true) => {
// determine if the error is retryable
if is_grpc_retryable(&err) {
// retry
retries += 1;
warn!("Retrying {} times with error = {:?}", retries, err);
continue;
}
}
(Err(err), false) => {
error!(
"Failed to send request to grpc handle after {} retries, error = {:?}",
retries, err
);
return Err(err.into());
}
}
}
}
#[inline]
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
@@ -398,11 +368,6 @@ impl Database {
}
}
/// by grpc standard, only `Unavailable` is retryable, see: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc
pub fn is_grpc_retryable(err: &tonic::Status) -> bool {
matches!(err.code(), tonic::Code::Unavailable)
}
#[derive(Default, Debug, Clone)]
struct FlightContext {
auth_header: Option<AuthHeader>,

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::time::Duration;
use async_trait::async_trait;
@@ -132,8 +131,8 @@ impl SubCommand {
}
}
#[derive(Default, Parser)]
pub struct StartCommand {
#[derive(Debug, Default, Parser)]
struct StartCommand {
/// The address to bind the gRPC server.
#[clap(long, alias = "bind-addr")]
rpc_bind_addr: Option<String>,
@@ -172,29 +171,8 @@ pub struct StartCommand {
backend: Option<BackendImpl>,
}
impl fmt::Debug for StartCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StartCommand")
.field("rpc_bind_addr", &self.rpc_bind_addr)
.field("rpc_server_addr", &self.rpc_server_addr)
.field("store_addrs", &self.sanitize_store_addrs())
.field("config_file", &self.config_file)
.field("selector", &self.selector)
.field("use_memory_store", &self.use_memory_store)
.field("enable_region_failover", &self.enable_region_failover)
.field("http_addr", &self.http_addr)
.field("http_timeout", &self.http_timeout)
.field("env_prefix", &self.env_prefix)
.field("data_home", &self.data_home)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("backend", &self.backend)
.finish()
}
}
impl StartCommand {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
let mut opts = MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
@@ -206,15 +184,6 @@ impl StartCommand {
Ok(opts)
}
fn sanitize_store_addrs(&self) -> Option<Vec<String>> {
self.store_addrs.as_ref().map(|addrs| {
addrs
.iter()
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
.collect()
})
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
@@ -292,7 +261,7 @@ impl StartCommand {
Ok(())
}
pub async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(

View File

@@ -56,8 +56,8 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig;
use flow::{
FlowConfig, FlownodeBuilder, FlownodeInstance, FlownodeOptions, FrontendClient,
FrontendInvoker, GrpcQueryHandlerWithBoxedError, StreamingEngine,
FlowConfig, FlowStreamingEngine, FlownodeBuilder, FlownodeInstance, FlownodeOptions,
FrontendClient, FrontendInvoker, GrpcQueryHandlerWithBoxedError,
};
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::instance::builder::FrontendBuilder;
@@ -544,9 +544,9 @@ impl StartCommand {
// set the ref to query for the local flow state
{
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
let flow_worker_manager = flownode.flow_engine().streaming_engine();
information_extension
.set_flow_streaming_engine(flow_streaming_engine)
.set_flow_worker_manager(flow_worker_manager)
.await;
}
@@ -615,10 +615,10 @@ impl StartCommand {
.replace(weak_grpc_handler);
// set the frontend invoker for flownode
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
let flow_worker_manager = flownode.flow_engine().streaming_engine();
// flow server need to be able to use frontend to write insert requests back
let invoker = FrontendInvoker::build_from(
flow_streaming_engine.clone(),
flow_worker_manager.clone(),
catalog_manager.clone(),
kv_backend.clone(),
layered_cache_registry.clone(),
@@ -627,7 +627,7 @@ impl StartCommand {
)
.await
.context(error::StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
flow_worker_manager.set_frontend_invoker(invoker).await;
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(error::ServersSnafu)?;
@@ -703,7 +703,7 @@ pub struct StandaloneInformationExtension {
region_server: RegionServer,
procedure_manager: ProcedureManagerRef,
start_time_ms: u64,
flow_streaming_engine: RwLock<Option<Arc<StreamingEngine>>>,
flow_worker_manager: RwLock<Option<Arc<FlowStreamingEngine>>>,
}
impl StandaloneInformationExtension {
@@ -712,14 +712,14 @@ impl StandaloneInformationExtension {
region_server,
procedure_manager,
start_time_ms: common_time::util::current_time_millis() as u64,
flow_streaming_engine: RwLock::new(None),
flow_worker_manager: RwLock::new(None),
}
}
/// Set the flow streaming engine for the standalone instance.
pub async fn set_flow_streaming_engine(&self, flow_streaming_engine: Arc<StreamingEngine>) {
let mut guard = self.flow_streaming_engine.write().await;
*guard = Some(flow_streaming_engine);
/// Set the flow worker manager for the standalone instance.
pub async fn set_flow_worker_manager(&self, flow_worker_manager: Arc<FlowStreamingEngine>) {
let mut guard = self.flow_worker_manager.write().await;
*guard = Some(flow_worker_manager);
}
}
@@ -798,7 +798,7 @@ impl InformationExtension for StandaloneInformationExtension {
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
Ok(Some(
self.flow_streaming_engine
self.flow_worker_manager
.read()
.await
.as_ref()

View File

@@ -74,7 +74,6 @@ fn test_load_datanode_example_config() {
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
experimental_sparse_primary_key_encoding: false,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
logging: LoggingOptions {
@@ -217,7 +216,6 @@ fn test_load_standalone_example_config() {
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
experimental_sparse_primary_key_encoding: false,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
storage: StorageConfig {

View File

@@ -13,8 +13,10 @@
// limitations under the License.
use std::sync::Arc;
mod greatest;
mod to_unixtime;
use greatest::GreatestFunction;
use to_unixtime::ToUnixtimeFunction;
use crate::function_registry::FunctionRegistry;
@@ -24,5 +26,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ToUnixtimeFunction));
registry.register(Arc::new(GreatestFunction));
}
}

View File

@@ -0,0 +1,328 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self};
use common_query::error::{
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datafusion::arrow::compute::kernels::cmp::gt;
use datatypes::arrow::array::AsArray;
use datatypes::arrow::compute::cast;
use datatypes::arrow::compute::kernels::zip;
use datatypes::arrow::datatypes::{
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct GreatestFunction;
const NAME: &str = "greatest";
macro_rules! gt_time_types {
($ty: ident, $columns:expr) => {{
let column1 = $columns[0].to_arrow_array();
let column2 = $columns[1].to_arrow_array();
let column1 = column1.as_primitive::<$ty>();
let column2 = column2.as_primitive::<$ty>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
}};
}
impl Function for GreatestFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
ensure!(
input_types.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
input_types.len()
)
}
);
match &input_types[0] {
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: input_types,
}
.fail(),
}
}
fn signature(&self) -> Signature {
Signature::uniform(
2,
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::date_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_second_datatype(),
],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
columns.len()
),
}
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
let column1 = cast(
&columns[0].to_arrow_array(),
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
)
.context(ArrowComputeSnafu)?;
let column1 = column1.as_primitive::<TimestampMillisecondType>();
let column2 = cast(
&columns[1].to_arrow_array(),
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
)
.context(ArrowComputeSnafu)?;
let column2 = column2.as_primitive::<TimestampMillisecondType>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
ConcreteDataType::Timestamp(ts_type) => match ts_type {
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
TimestampType::Millisecond(_) => {
gt_time_types!(TimestampMillisecondType, columns)
}
TimestampType::Microsecond(_) => {
gt_time_types!(TimestampMicrosecondType, columns)
}
TimestampType::Nanosecond(_) => {
gt_time_types!(TimestampNanosecondType, columns)
}
},
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for GreatestFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GREATEST")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_time::timestamp::TimeUnit;
use common_time::{Date, Timestamp};
use datatypes::types::{
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
TimestampSecondType,
};
use datatypes::value::Value;
use datatypes::vectors::{
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, Vector,
};
use paste::paste;
use super::*;
#[test]
fn test_greatest_takes_string_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype()
])
.unwrap(),
ConcreteDataType::timestamp_millisecond_datatype()
);
let columns = vec![
Arc::new(StringVector::from(vec![
"1970-01-01".to_string(),
"2012-12-23".to_string(),
])) as _,
Arc::new(StringVector::from(vec![
"2001-02-01".to_string(),
"1999-01-01".to_string(),
])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result
.as_any()
.downcast_ref::<TimestampMillisecondVector>()
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
);
}
#[test]
fn test_greatest_takes_date_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::date_datatype(),
ConcreteDataType::date_datatype()
])
.unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Date(Date::from_str_utc("1970-01-01").unwrap())
);
assert_eq!(
result.get(1),
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
);
}
#[test]
fn test_greatest_takes_datetime_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype()
])
.unwrap(),
ConcreteDataType::timestamp_millisecond_datatype()
);
let columns = vec![
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result
.as_any()
.downcast_ref::<TimestampMillisecondVector>()
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
);
}
macro_rules! test_timestamp {
($type: expr,$unit: ident) => {
paste! {
#[test]
fn [<test_greatest_takes_ $unit:lower _vector>]() {
let function = GreatestFunction;
assert_eq!(
function.return_type(&[$type, $type]).unwrap(),
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
);
let columns = vec![
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
];
let result = function.eval(&FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
);
}
}
}
}
test_timestamp!(
ConcreteDataType::timestamp_nanosecond_datatype(),
Nanosecond
);
test_timestamp!(
ConcreteDataType::timestamp_microsecond_datatype(),
Microsecond
);
test_timestamp!(
ConcreteDataType::timestamp_millisecond_datatype(),
Millisecond
);
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
}

View File

@@ -115,13 +115,6 @@ impl Function for UddSketchCalcFunction {
}
};
// Check if the sketch is empty, if so, return null
// This is important to avoid panics when calling estimate_quantile on an empty sketch
// In practice, this will happen if input is all null
if sketch.bucket_iter().count() == 0 {
builder.push_null();
continue;
}
// Compute the estimated quantile from the sketch
let result = sketch.estimate_quantile(perc);
builder.push(Some(result));

View File

@@ -187,7 +187,6 @@ mod tests {
},
flownode_ids: BTreeMap::from([(0, 1), (1, 2), (2, 3)]),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
query_context: None,
flow_name: "my_flow".to_string(),
raw_sql: "sql".to_string(),
expire_after: Some(300),

View File

@@ -449,7 +449,6 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
sink_table_name,
flownode_ids,
catalog_name,
query_context: Some(value.query_context.clone()),
flow_name,
raw_sql: sql,
expire_after,

View File

@@ -790,14 +790,6 @@ pub enum Error {
#[snafu(source)]
source: common_procedure::error::Error,
},
#[snafu(display("Failed to parse timezone"))]
InvalidTimeZone {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: common_time::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -869,8 +861,7 @@ impl ErrorExt for Error {
| InvalidSetDatabaseOption { .. }
| InvalidUnsetDatabaseOption { .. }
| InvalidTopicNamePrefix { .. }
| InvalidTimeZone { .. } => StatusCode::InvalidArguments,
InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
| InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
FlowNotFound { .. } => StatusCode::FlowNotFound,
FlowRouteNotFound { .. } => StatusCode::Unexpected,

View File

@@ -452,7 +452,6 @@ mod tests {
};
FlowInfoValue {
catalog_name: catalog_name.to_string(),
query_context: None,
flow_name: flow_name.to_string(),
source_table_ids,
sink_table_name,
@@ -626,7 +625,6 @@ mod tests {
};
let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
query_context: None,
flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: another_sink_table_name,
@@ -866,7 +864,6 @@ mod tests {
};
let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
query_context: None,
flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: another_sink_table_name,

View File

@@ -121,13 +121,6 @@ pub struct FlowInfoValue {
pub(crate) flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
/// The catalog name.
pub(crate) catalog_name: String,
/// The query context used when create flow.
/// Although flow doesn't belong to any schema, this query_context is needed to remember
/// the query context when `create_flow` is executed
/// for recovering flow using the same sql&query_context after db restart.
/// if none, should use default query context
#[serde(default)]
pub(crate) query_context: Option<crate::rpc::ddl::QueryContext>,
/// The flow name.
pub(crate) flow_name: String,
/// The raw sql.
@@ -162,10 +155,6 @@ impl FlowInfoValue {
&self.catalog_name
}
pub fn query_context(&self) -> &Option<crate::rpc::ddl::QueryContext> {
&self.query_context
}
pub fn flow_name(&self) -> &String {
&self.flow_name
}

View File

@@ -35,7 +35,7 @@ pub mod memory;
pub mod rds;
pub mod test;
pub mod txn;
pub mod util;
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
#[async_trait]

View File

@@ -1,85 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Removes sensitive information like passwords from connection strings.
///
/// This function sanitizes connection strings by removing credentials:
/// - For URL format (mysql://user:password@host:port/db): Removes everything before '@'
/// - For parameter format (host=localhost password=secret): Removes the password parameter
/// - For URL format without credentials (mysql://host:port/db): Removes the protocol prefix
///
/// # Arguments
///
/// * `conn_str` - The connection string to sanitize
///
/// # Returns
///
/// A sanitized version of the connection string with sensitive information removed
pub fn sanitize_connection_string(conn_str: &str) -> String {
// Case 1: URL format with credentials (mysql://user:password@host:port/db)
// Extract everything after the '@' symbol
if let Some(at_pos) = conn_str.find('@') {
return conn_str[at_pos + 1..].to_string();
}
// Case 2: Parameter format with password (host=localhost password=secret dbname=mydb)
// Filter out any parameter that starts with "password="
if conn_str.contains("password=") {
return conn_str
.split_whitespace()
.filter(|param| !param.starts_with("password="))
.collect::<Vec<_>>()
.join(" ");
}
// Case 3: URL format without credentials (mysql://host:port/db)
// Extract everything after the protocol prefix
if let Some(host_part) = conn_str.split("://").nth(1) {
return host_part.to_string();
}
// Case 4: Already sanitized or unknown format
// Return as is
conn_str.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sanitize_connection_string() {
// Test URL format with username/password
let conn_str = "mysql://user:password123@localhost:3306/db";
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
// Test URL format without credentials
let conn_str = "mysql://localhost:3306/db";
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
// Test parameter format with password
let conn_str = "host=localhost port=5432 user=postgres password=secret dbname=mydb";
assert_eq!(
sanitize_connection_string(conn_str),
"host=localhost port=5432 user=postgres dbname=mydb"
);
// Test parameter format without password
let conn_str = "host=localhost port=5432 user=postgres dbname=mydb";
assert_eq!(
sanitize_connection_string(conn_str),
"host=localhost port=5432 user=postgres dbname=mydb"
);
}
}

View File

@@ -113,10 +113,8 @@ impl LeaderRegionManifestInfo {
pub fn prunable_entry_id(&self) -> u64 {
match self {
LeaderRegionManifestInfo::Mito {
flushed_entry_id,
topic_latest_entry_id,
..
} => (*flushed_entry_id).max(*topic_latest_entry_id),
flushed_entry_id, ..
} => *flushed_entry_id,
LeaderRegionManifestInfo::Metric {
data_flushed_entry_id,
data_topic_latest_entry_id,

View File

@@ -35,20 +35,17 @@ use api::v1::{
};
use base64::engine::general_purpose;
use base64::Engine as _;
use common_time::{DatabaseTimeToLive, Timezone};
use common_time::DatabaseTimeToLive;
use prost::Message;
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DefaultOnNull};
use session::context::{QueryContextBuilder, QueryContextRef};
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table::table_reference::TableReference;
use crate::error::{
self, InvalidSetDatabaseOptionSnafu, InvalidTimeZoneSnafu, InvalidUnsetDatabaseOptionSnafu,
Result,
};
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
use crate::key::FlowId;
/// DDL tasks
@@ -1205,7 +1202,7 @@ impl From<DropFlowTask> for PbDropFlowTask {
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryContext {
current_catalog: String,
current_schema: String,
@@ -1226,19 +1223,6 @@ impl From<QueryContextRef> for QueryContext {
}
}
impl TryFrom<QueryContext> for session::context::QueryContext {
type Error = error::Error;
fn try_from(value: QueryContext) -> std::result::Result<Self, Self::Error> {
Ok(QueryContextBuilder::default()
.current_catalog(value.current_catalog)
.current_schema(value.current_schema)
.timezone(Timezone::from_tz_string(&value.timezone).context(InvalidTimeZoneSnafu)?)
.extensions(value.extensions)
.channel((value.channel as u32).into())
.build())
}
}
impl From<QueryContext> for PbQueryContext {
fn from(
QueryContext {

View File

@@ -57,9 +57,9 @@ use tokio::sync::Notify;
use crate::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use crate::error::{
self, BuildMetricEngineSnafu, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu,
MissingCacheSnafu, MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result,
ShutdownInstanceSnafu, ShutdownServerSnafu, StartServerSnafu,
self, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu, MissingCacheSnafu,
MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result, ShutdownInstanceSnafu,
ShutdownServerSnafu, StartServerSnafu,
};
use crate::event_listener::{
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
@@ -398,46 +398,44 @@ impl DatanodeBuilder {
schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<Vec<RegionEngineRef>> {
let mut metric_engine_config = metric_engine::config::EngineConfig::default();
let mut mito_engine_config = MitoConfig::default();
let mut file_engine_config = file_engine::config::EngineConfig::default();
let mut engines = vec![];
let mut metric_engine_config = opts.region_engine.iter().find_map(|c| match c {
RegionEngineConfig::Metric(config) => Some(config.clone()),
_ => None,
});
for engine in &opts.region_engine {
match engine {
RegionEngineConfig::Mito(config) => {
mito_engine_config = config.clone();
let mito_engine = Self::build_mito_engine(
opts,
object_store_manager.clone(),
config.clone(),
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
let metric_engine = MetricEngine::new(
mito_engine.clone(),
metric_engine_config.take().unwrap_or_default(),
);
engines.push(Arc::new(mito_engine) as _);
engines.push(Arc::new(metric_engine) as _);
}
RegionEngineConfig::File(config) => {
file_engine_config = config.clone();
let engine = FileRegionEngine::new(
config.clone(),
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
);
engines.push(Arc::new(engine) as _);
}
RegionEngineConfig::Metric(metric_config) => {
metric_engine_config = metric_config.clone();
RegionEngineConfig::Metric(_) => {
// Already handled in `build_mito_engine`.
}
}
}
let mito_engine = Self::build_mito_engine(
opts,
object_store_manager.clone(),
mito_engine_config,
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
let metric_engine = MetricEngine::try_new(mito_engine.clone(), metric_engine_config)
.context(BuildMetricEngineSnafu)?;
let file_engine = FileRegionEngine::new(
file_engine_config,
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
);
Ok(vec![
Arc::new(mito_engine) as _,
Arc::new(metric_engine) as _,
Arc::new(file_engine) as _,
])
Ok(engines)
}
/// Builds [MitoEngine] according to options.

View File

@@ -336,13 +336,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to build metric engine"))]
BuildMetricEngine {
source: metric_engine::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to serialize options to TOML"))]
TomlFormat {
#[snafu(implicit)]
@@ -459,7 +452,6 @@ impl ErrorExt for Error {
FindLogicalRegions { source, .. } => source.status_code(),
BuildMitoEngine { source, .. } => source.status_code(),
BuildMetricEngine { source, .. } => source.status_code(),
ConcurrentQueryLimiterClosed { .. } | ConcurrentQueryLimiterTimeout { .. } => {
StatusCode::RegionBusy
}

View File

@@ -135,13 +135,14 @@ impl Configurable for FlownodeOptions {
}
/// Arc-ed FlowNodeManager, cheaper to clone
pub type FlowStreamingEngineRef = Arc<StreamingEngine>;
pub type FlowWorkerManagerRef = Arc<FlowStreamingEngine>;
/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
///
/// The choice of timestamp is just using current system timestamp for now
///
pub struct StreamingEngine {
/// TODO(discord9): rename to FlowStreamingEngine
pub struct FlowStreamingEngine {
/// The handler to the worker that will run the dataflow
/// which is `!Send` so a handle is used
pub worker_handles: Vec<WorkerHandle>,
@@ -170,7 +171,7 @@ pub struct StreamingEngine {
}
/// Building FlownodeManager
impl StreamingEngine {
impl FlowStreamingEngine {
/// set frontend invoker
pub async fn set_frontend_invoker(&self, frontend: FrontendInvoker) {
*self.frontend_invoker.write().await = Some(frontend);
@@ -189,7 +190,7 @@ impl StreamingEngine {
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
let tick_manager = FlowTickManager::new();
let worker_handles = Vec::new();
StreamingEngine {
FlowStreamingEngine {
worker_handles,
worker_selector: Mutex::new(0),
query_engine,
@@ -265,7 +266,7 @@ pub fn batches_to_rows_req(batches: Vec<Batch>) -> Result<Vec<DiffRequest>, Erro
}
/// This impl block contains methods to send writeback requests to frontend
impl StreamingEngine {
impl FlowStreamingEngine {
/// Return the number of requests it made
pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
let all_reqs = self.generate_writeback_request().await?;
@@ -536,7 +537,7 @@ impl StreamingEngine {
}
/// Flow Runtime related methods
impl StreamingEngine {
impl FlowStreamingEngine {
/// Start state report handler, which will receive a sender from HeartbeatTask to send state size report back
///
/// if heartbeat task is shutdown, this future will exit too
@@ -661,7 +662,7 @@ impl StreamingEngine {
}
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_streaming_engine.frontend_invoker
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_worker_manager.frontend_invoker
self.frontend_invoker.write().await.take();
}
@@ -730,7 +731,7 @@ impl StreamingEngine {
}
/// Create&Remove flow
impl StreamingEngine {
impl FlowStreamingEngine {
/// remove a flow by it's id
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
for handle in self.worker_handles.iter() {
@@ -748,6 +749,7 @@ impl StreamingEngine {
/// steps to create task:
/// 1. parse query into typed plan(and optional parse expire_after expr)
/// 2. render source/sink with output table id and used input table id
#[allow(clippy::too_many_arguments)]
pub async fn create_flow_inner(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
let CreateFlowArgs {
flow_id,

View File

@@ -35,14 +35,13 @@ use snafu::{ensure, IntoError, OptionExt, ResultExt};
use store_api::storage::{RegionId, TableId};
use tokio::sync::{Mutex, RwLock};
use crate::adapter::{CreateFlowArgs, StreamingEngine};
use crate::adapter::{CreateFlowArgs, FlowStreamingEngine};
use crate::batching_mode::engine::BatchingEngine;
use crate::batching_mode::{FRONTEND_SCAN_TIMEOUT, MIN_REFRESH_DURATION};
use crate::engine::FlowEngine;
use crate::error::{
CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu, IllegalCheckTaskStateSnafu,
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, NoAvailableFrontendSnafu,
SyncCheckTaskSnafu, UnexpectedSnafu,
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, SyncCheckTaskSnafu,
UnexpectedSnafu,
};
use crate::metrics::METRIC_FLOW_TASK_COUNT;
use crate::repr::{self, DiffRow};
@@ -56,7 +55,7 @@ pub type FlowDualEngineRef = Arc<FlowDualEngine>;
/// including create/drop/flush flow
/// and redirect insert requests to the appropriate engine
pub struct FlowDualEngine {
streaming_engine: Arc<StreamingEngine>,
streaming_engine: Arc<FlowStreamingEngine>,
batching_engine: Arc<BatchingEngine>,
/// helper struct for faster query flow by table id or vice versa
src_table2flow: RwLock<SrcTableToFlow>,
@@ -67,7 +66,7 @@ pub struct FlowDualEngine {
impl FlowDualEngine {
pub fn new(
streaming_engine: Arc<StreamingEngine>,
streaming_engine: Arc<FlowStreamingEngine>,
batching_engine: Arc<BatchingEngine>,
flow_metadata_manager: Arc<FlowMetadataManager>,
catalog_manager: Arc<dyn CatalogManager>,
@@ -82,12 +81,7 @@ impl FlowDualEngine {
}
}
/// Determine if the engine is in distributed mode
pub fn is_distributed(&self) -> bool {
self.streaming_engine.node_id.is_some()
}
pub fn streaming_engine(&self) -> Arc<StreamingEngine> {
pub fn streaming_engine(&self) -> Arc<FlowStreamingEngine> {
self.streaming_engine.clone()
}
@@ -95,39 +89,6 @@ impl FlowDualEngine {
self.batching_engine.clone()
}
/// In distributed mode, scan periodically(1s) until available frontend is found, or timeout,
/// in standalone mode, return immediately
/// notice here if any frontend appear in cluster info this function will return immediately
async fn wait_for_available_frontend(&self, timeout: std::time::Duration) -> Result<(), Error> {
if !self.is_distributed() {
return Ok(());
}
let frontend_client = self.batching_engine().frontend_client.clone();
let sleep_duration = std::time::Duration::from_millis(1_000);
let now = std::time::Instant::now();
loop {
let frontend_list = frontend_client.scan_for_frontend().await?;
if !frontend_list.is_empty() {
let fe_list = frontend_list
.iter()
.map(|(_, info)| &info.peer.addr)
.collect::<Vec<_>>();
info!("Available frontend found: {:?}", fe_list);
return Ok(());
}
let elapsed = now.elapsed();
tokio::time::sleep(sleep_duration).await;
info!("Waiting for available frontend, elapsed={:?}", elapsed);
if elapsed >= timeout {
return NoAvailableFrontendSnafu {
timeout,
context: "No available frontend found in cluster info",
}
.fail();
}
}
}
/// Try to sync with check task, this is only used in drop flow&flush flow, so a flow id is required
///
/// the need to sync is to make sure flush flow actually get called
@@ -264,24 +225,11 @@ impl FlowDualEngine {
comment: Some(info.comment().clone()),
sql: info.raw_sql().clone(),
flow_options: info.options().clone(),
query_ctx: info
.query_context()
.clone()
.map(|ctx| {
ctx.try_into()
.map_err(BoxedError::new)
.context(ExternalSnafu)
})
.transpose()?
// or use default QueryContext with catalog_name from info
// to keep compatibility with old version
.or_else(|| {
Some(
QueryContextBuilder::default()
.current_catalog(info.catalog_name().to_string())
.build(),
)
}),
query_ctx: Some(
QueryContextBuilder::default()
.current_catalog(info.catalog_name().clone())
.build(),
),
};
if let Err(err) = self
.create_flow(args)
@@ -352,12 +300,11 @@ impl FlowDualEngine {
}
);
check_task.take().unwrap().stop().await?;
check_task.take().expect("Already checked").stop().await?;
info!("Stopped flow consistent check task");
Ok(())
}
/// TODO(discord9): also add a `exists` api using flow metadata manager's `exists` method
async fn flow_exist_in_metadata(&self, flow_id: FlowId) -> Result<bool, Error> {
self.flow_metadata_manager
.flow_info_manager()
@@ -377,50 +324,31 @@ struct ConsistentCheckTask {
impl ConsistentCheckTask {
async fn start_check_task(engine: &Arc<FlowDualEngine>) -> Result<Self, Error> {
let engine = engine.clone();
// first do recover flows
engine.check_flow_consistent(true, false).await?;
let inner = engine.clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let (trigger_tx, mut trigger_rx) =
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
let handle = common_runtime::spawn_global(async move {
// first check if available frontend is found
if let Err(err) = engine
.wait_for_available_frontend(FRONTEND_SCAN_TIMEOUT)
.await
{
warn!("No frontend is available yet:\n {err:?}");
}
// then do recover flows, if failed, always retry
let mut recover_retry = 0;
while let Err(err) = engine.check_flow_consistent(true, false).await {
recover_retry += 1;
error!(
"Failed to recover flows:\n {err:?}, retry {} in {}s",
recover_retry,
MIN_REFRESH_DURATION.as_secs()
);
tokio::time::sleep(MIN_REFRESH_DURATION).await;
}
// then do check flows, with configurable allow_create and allow_drop
let (mut allow_create, mut allow_drop) = (false, false);
let mut args = (false, false);
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
loop {
if let Err(err) = engine.check_flow_consistent(allow_create, allow_drop).await {
if let Err(err) = inner.check_flow_consistent(args.0, args.1).await {
error!(err; "Failed to check flow consistent");
}
if let Some(done) = ret_signal.take() {
let _ = done.send(());
}
tokio::select! {
_ = rx.recv() => break,
incoming = trigger_rx.recv() => if let Some(incoming) = incoming {
(allow_create, allow_drop) = (incoming.0, incoming.1);
args = (incoming.0, incoming.1);
ret_signal = Some(incoming.2);
},
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => {
(allow_create, allow_drop) = (false, false);
},
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => args=(false,false),
}
}
});
@@ -591,12 +519,7 @@ impl FlowEngine for FlowDualEngine {
match flow_type {
Some(FlowType::Batching) => self.batching_engine.flush_flow(flow_id).await,
Some(FlowType::Streaming) => self.streaming_engine.flush_flow(flow_id).await,
None => {
warn!(
"Currently flow={flow_id} doesn't exist in flownode, ignore flush_flow request"
);
Ok(0)
}
None => Ok(0),
}
}
@@ -769,7 +692,7 @@ fn to_meta_err(
}
#[async_trait::async_trait]
impl common_meta::node_manager::Flownode for StreamingEngine {
impl common_meta::node_manager::Flownode for FlowStreamingEngine {
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
let query_ctx = request
.header
@@ -855,7 +778,7 @@ impl common_meta::node_manager::Flownode for StreamingEngine {
}
}
impl FlowEngine for StreamingEngine {
impl FlowEngine for FlowStreamingEngine {
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
self.create_flow_inner(args).await
}
@@ -907,7 +830,7 @@ impl FetchFromRow {
}
}
impl StreamingEngine {
impl FlowStreamingEngine {
async fn handle_inserts_inner(
&self,
request: InsertRequests,

View File

@@ -31,7 +31,7 @@ use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
use crate::adapter::table_source::ManagedTableSource;
use crate::adapter::{FlowId, FlowStreamingEngineRef, StreamingEngine};
use crate::adapter::{FlowId, FlowStreamingEngine, FlowWorkerManagerRef};
use crate::error::{FlowNotFoundSnafu, JoinTaskSnafu, UnexpectedSnafu};
use crate::expr::error::ExternalSnafu;
use crate::expr::utils::find_plan_time_window_expr_lower_bound;
@@ -39,10 +39,10 @@ use crate::repr::RelationDesc;
use crate::server::get_all_flow_ids;
use crate::{Error, FrontendInvoker};
impl StreamingEngine {
impl FlowStreamingEngine {
/// Create and start refill flow tasks in background
pub async fn create_and_start_refill_flow_tasks(
self: &FlowStreamingEngineRef,
self: &FlowWorkerManagerRef,
flow_metadata_manager: &FlowMetadataManagerRef,
catalog_manager: &CatalogManagerRef,
) -> Result<(), Error> {
@@ -130,7 +130,7 @@ impl StreamingEngine {
/// Starting to refill flows, if any error occurs, will rebuild the flow and retry
pub(crate) async fn starting_refill_flows(
self: &FlowStreamingEngineRef,
self: &FlowWorkerManagerRef,
tasks: Vec<RefillTask>,
) -> Result<(), Error> {
// TODO(discord9): add a back pressure mechanism
@@ -266,7 +266,7 @@ impl TaskState<()> {
fn start_running(
&mut self,
task_data: &TaskData,
manager: FlowStreamingEngineRef,
manager: FlowWorkerManagerRef,
mut output_stream: SendableRecordBatchStream,
) -> Result<(), Error> {
let data = (*task_data).clone();
@@ -383,7 +383,7 @@ impl RefillTask {
/// Start running the task in background, non-blocking
pub async fn start_running(
&mut self,
manager: FlowStreamingEngineRef,
manager: FlowWorkerManagerRef,
invoker: &FrontendInvoker,
) -> Result<(), Error> {
let TaskState::Prepared { sql } = &mut self.state else {

View File

@@ -16,9 +16,9 @@ use std::collections::BTreeMap;
use common_meta::key::flow::flow_state::FlowStat;
use crate::StreamingEngine;
use crate::FlowStreamingEngine;
impl StreamingEngine {
impl FlowStreamingEngine {
pub async fn gen_state_report(&self) -> FlowStat {
let mut full_report = BTreeMap::new();
let mut last_exec_time_map = BTreeMap::new();

View File

@@ -33,8 +33,8 @@ use crate::adapter::table_source::TableDesc;
use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
use crate::repr::{ColumnType, RelationDesc, RelationType};
use crate::StreamingEngine;
impl StreamingEngine {
use crate::FlowStreamingEngine;
impl FlowStreamingEngine {
/// Get a worker handle for creating flow, using round robin to select a worker
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
let use_idx = {

View File

@@ -31,19 +31,4 @@ pub const DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
/// The minimum duration between two queries execution by batching mode task
pub const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
/// Grpc connection timeout
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
/// Grpc max retry number
const GRPC_MAX_RETRIES: u32 = 3;
/// Flow wait for available frontend timeout,
/// if failed to find available frontend after FRONTEND_SCAN_TIMEOUT elapsed, return error
/// which should prevent flownode from starting
pub const FRONTEND_SCAN_TIMEOUT: Duration = Duration::from_secs(30);
/// Frontend activity timeout
/// if frontend is down(not sending heartbeat) for more than FRONTEND_ACTIVITY_TIMEOUT, it will be removed from the list that flownode use to connect
pub const FRONTEND_ACTIVITY_TIMEOUT: Duration = Duration::from_secs(60);
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);

View File

@@ -49,8 +49,7 @@ use crate::{CreateFlowArgs, Error, FlowId, TableName};
pub struct BatchingEngine {
tasks: RwLock<BTreeMap<FlowId, BatchingTask>>,
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
/// frontend client for insert request
pub(crate) frontend_client: Arc<FrontendClient>,
frontend_client: Arc<FrontendClient>,
flow_metadata_manager: FlowMetadataManagerRef,
table_meta: TableMetadataManagerRef,
catalog_manager: CatalogManagerRef,
@@ -268,8 +267,7 @@ impl BatchingEngine {
// also check table option to see if ttl!=instant
let table_name = get_table_name(self.table_meta.table_info_manager(), &src_id).await?;
let table_info = get_table_info(self.table_meta.table_info_manager(), &src_id).await?;
ensure!(
table_info.table_info.meta.options.ttl != Some(TimeToLive::Instant),
if table_info.table_info.meta.options.ttl == Some(TimeToLive::Instant) {
UnsupportedSnafu {
reason: format!(
"Source table `{}`(id={}) has instant TTL, Instant TTL is not supported under batching mode. Consider using a TTL longer than flush interval",
@@ -277,8 +275,8 @@ impl BatchingEngine {
src_id
),
}
);
.fail()?;
}
source_table_names.push(table_name);
}

View File

@@ -15,7 +15,6 @@
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
use std::sync::{Arc, Weak};
use std::time::SystemTime;
use api::v1::greptime_request::Request;
use api::v1::CreateTableExpr;
@@ -26,18 +25,13 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
use common_meta::peer::Peer;
use common_meta::rpc::store::RangeRequest;
use common_query::Output;
use common_telemetry::warn;
use itertools::Itertools;
use meta_client::client::MetaClient;
use servers::query_handler::grpc::GrpcQueryHandler;
use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use crate::batching_mode::{
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
GRPC_MAX_RETRIES,
};
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
use crate::batching_mode::DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT;
use crate::error::{ExternalSnafu, InvalidRequestSnafu, UnexpectedSnafu};
use crate::Error;
/// Just like [`GrpcQueryHandler`] but use BoxedError
@@ -85,6 +79,7 @@ pub enum FrontendClient {
Standalone {
/// for the sake of simplicity still use grpc even in standalone mode
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
/// TODO(discord9): not use grpc under standalone mode
database_client: HandlerMutable,
},
}
@@ -105,9 +100,7 @@ impl FrontendClient {
Self::Distributed {
meta_client,
chnl_mgr: {
let cfg = ChannelConfig::new()
.connect_timeout(GRPC_CONN_TIMEOUT)
.timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
let cfg = ChannelConfig::new().timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
ChannelManager::with_config(cfg)
},
}
@@ -130,24 +123,10 @@ impl DatabaseWithPeer {
fn new(database: Database, peer: Peer) -> Self {
Self { database, peer }
}
/// Try sending a "SELECT 1" to the database
async fn try_select_one(&self) -> Result<(), Error> {
// notice here use `sql` for `SELECT 1` return 1 row
let _ = self
.database
.sql("SELECT 1")
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to handle `SELECT 1` request at {:?}", self.peer),
})?;
Ok(())
}
}
impl FrontendClient {
/// scan for available frontend from metadata
pub(crate) async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
let Self::Distributed { meta_client, .. } = self else {
return Ok(vec![]);
};
@@ -177,8 +156,8 @@ impl FrontendClient {
Ok(res)
}
/// Get the database with maximum `last_activity_ts`& is able to process query
async fn get_latest_active_frontend(
/// Get the database with max `last_activity_ts`
async fn get_last_active_frontend(
&self,
catalog: &str,
schema: &str,
@@ -194,50 +173,22 @@ impl FrontendClient {
.fail();
};
let mut interval = tokio::time::interval(GRPC_CONN_TIMEOUT);
interval.tick().await;
for retry in 0..GRPC_MAX_RETRIES {
let frontends = self.scan_for_frontend().await?;
let now_in_ms = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_millis() as i64;
let frontends = self.scan_for_frontend().await?;
let mut peer = None;
// found node with maximum last_activity_ts
for (_, node_info) in frontends
.iter()
.sorted_by_key(|(_, node_info)| node_info.last_activity_ts)
.rev()
// filter out frontend that have been down for more than 1 min
.filter(|(_, node_info)| {
node_info.last_activity_ts + FRONTEND_ACTIVITY_TIMEOUT.as_millis() as i64
> now_in_ms
})
{
let addr = &node_info.peer.addr;
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![addr.clone()]);
let database = Database::new(catalog, schema, client);
let db = DatabaseWithPeer::new(database, node_info.peer.clone());
match db.try_select_one().await {
Ok(_) => return Ok(db),
Err(e) => {
warn!(
"Failed to connect to frontend {} on retry={}: \n{e:?}",
addr, retry
);
}
}
if let Some((_, val)) = frontends.iter().max_by_key(|(_, val)| val.last_activity_ts) {
peer = Some(val.peer.clone());
}
let Some(peer) = peer else {
UnexpectedSnafu {
reason: format!("No frontend available: {:?}", frontends),
}
// no available frontend
// sleep and retry
interval.tick().await;
}
NoAvailableFrontendSnafu {
timeout: GRPC_CONN_TIMEOUT,
context: "No available frontend found that is able to process query",
}
.fail()
.fail()?
};
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![peer.addr.clone()]);
let database = Database::new(catalog, schema, client);
Ok(DatabaseWithPeer::new(database, peer))
}
pub async fn create(
@@ -267,17 +218,17 @@ impl FrontendClient {
) -> Result<u32, Error> {
match self {
FrontendClient::Distributed { .. } => {
let db = self.get_latest_active_frontend(catalog, schema).await?;
let db = self.get_last_active_frontend(catalog, schema).await?;
*peer_desc = Some(PeerDesc::Dist {
peer: db.peer.clone(),
});
db.database
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
.handle(req.clone())
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to handle request at {:?}: {:?}", db.peer, req),
context: format!("Failed to handle request: {:?}", req),
})
}
FrontendClient::Standalone { database_client } => {

View File

@@ -36,7 +36,7 @@ use operator::expr_helper::column_schemas_to_defs;
use query::query_engine::DefaultSerializer;
use query::QueryEngineRef;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use snafu::{OptionExt, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use tokio::sync::oneshot;
use tokio::sync::oneshot::error::TryRecvError;
@@ -53,7 +53,6 @@ use crate::batching_mode::utils::{
use crate::batching_mode::{
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, MIN_REFRESH_DURATION, SLOW_QUERY_THRESHOLD,
};
use crate::df_optimizer::apply_df_optimizer;
use crate::error::{
ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu,
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
@@ -223,15 +222,15 @@ impl BatchingTask {
.map(|c| c.name)
.collect::<BTreeSet<_>>();
for column in new_query.schema().columns() {
ensure!(
table_columns.contains(column.name()),
InvalidQuerySnafu {
if !table_columns.contains(column.name()) {
return InvalidQuerySnafu {
reason: format!(
"Column {} not found in sink table with columns {:?}",
column, table_columns
),
}
);
.fail();
}
}
// update_at& time index placeholder (if exists) should have default value
LogicalPlan::Dml(DmlStatement::new(
@@ -542,10 +541,7 @@ impl BatchingTask {
.clone()
.rewrite(&mut add_auto_column)
.with_context(|_| DatafusionSnafu {
context: format!(
"Failed to rewrite plan:\n {}\n",
self.config.plan
),
context: format!("Failed to rewrite plan {:?}", self.config.plan),
})?
.data;
let schema_len = plan.schema().fields().len();
@@ -577,19 +573,16 @@ impl BatchingTask {
let mut add_filter = AddFilterRewriter::new(expr);
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_schema.clone());
// make a not optimized plan for clearer unparse
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.config.query, false)
.await?;
let rewrite = plan
.clone()
plan.clone()
.rewrite(&mut add_filter)
.and_then(|p| p.data.rewrite(&mut add_auto_column))
.with_context(|_| DatafusionSnafu {
context: format!("Failed to rewrite plan:\n {}\n", plan),
context: format!("Failed to rewrite plan {plan:?}"),
})?
.data;
// only apply optimize after complex rewrite is done
apply_df_optimizer(rewrite).await?
.data
};
Ok(Some((new_plan, schema_len)))

View File

@@ -704,28 +704,6 @@ mod test {
),
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
),
// complex time window index with where
(
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number in (2, 3, 4) GROUP BY time_window;",
Timestamp::new(1740394109, TimeUnit::Second),
(
"ts".to_string(),
Some(Timestamp::new(1740394080, TimeUnit::Second)),
Some(Timestamp::new(1740394140, TimeUnit::Second)),
),
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
),
// complex time window index with between and
(
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number BETWEEN 2 AND 4 GROUP BY time_window;",
Timestamp::new(1740394109, TimeUnit::Second),
(
"ts".to_string(),
Some(Timestamp::new(1740394080, TimeUnit::Second)),
Some(Timestamp::new(1740394140, TimeUnit::Second)),
),
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
),
// no time index
(
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",

View File

@@ -50,8 +50,8 @@ pub async fn get_table_info_df_schema(
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.context(TableNotFoundSnafu {
name: &full_table_name,
.with_context(|| TableNotFoundSnafu {
name: full_table_name.clone(),
})?;
let table_info = table.table_info().clone();
@@ -342,8 +342,8 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
}
} else {
return Err(DataFusionError::Plan(format!(
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}",
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas()
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?} at node {:?}",
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas(), node
)));
}
@@ -358,6 +358,8 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
}
}
// TODO(discord9): a method to found out the precise time window
/// Find out the `Filter` Node corresponding to innermost(deepest) `WHERE` and add a new filter expr to it
#[derive(Debug)]
pub struct AddFilterRewriter {
@@ -406,9 +408,7 @@ mod test {
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use pretty_assertions::assert_eq;
use query::query_engine::DefaultSerializer;
use session::context::QueryContext;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use super::*;
use crate::test_utils::create_test_query_engine;
@@ -703,18 +703,4 @@ mod test {
);
}
}
#[tokio::test]
async fn test_null_cast() {
let query_engine = create_test_query_engine();
let ctx = QueryContext::arc();
let sql = "SELECT NULL::DOUBLE FROM numbers_with_ts";
let plan = sql_to_df_plan(ctx, query_engine.clone(), sql, false)
.await
.unwrap();
let _sub_plan = DFLogicalSubstraitConvertor {}
.encode(&plan, DefaultSerializer)
.unwrap();
}
}

View File

@@ -25,6 +25,7 @@ use datafusion::config::ConfigOptions;
use datafusion::error::DataFusionError;
use datafusion::functions_aggregate::count::count_udaf;
use datafusion::functions_aggregate::sum::sum_udaf;
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
use datafusion::optimizer::optimize_projections::OptimizeProjections;
@@ -41,7 +42,6 @@ use datafusion_expr::{
BinaryExpr, ColumnarValue, Expr, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl,
Signature, TypeSignature, Volatility,
};
use query::optimizer::count_wildcard::CountWildcardToTimeIndexRule;
use query::parser::QueryLanguageParser;
use query::query_engine::DefaultSerializer;
use query::QueryEngine;
@@ -61,9 +61,9 @@ pub async fn apply_df_optimizer(
) -> Result<datafusion_expr::LogicalPlan, Error> {
let cfg = ConfigOptions::new();
let analyzer = Analyzer::with_rules(vec![
Arc::new(CountWildcardToTimeIndexRule),
Arc::new(AvgExpandRule),
Arc::new(TumbleExpandRule),
Arc::new(CountWildcardRule::new()),
Arc::new(AvgExpandRule::new()),
Arc::new(TumbleExpandRule::new()),
Arc::new(CheckGroupByRule::new()),
Arc::new(TypeCoercion::new()),
]);
@@ -128,7 +128,13 @@ pub async fn sql_to_flow_plan(
}
#[derive(Debug)]
struct AvgExpandRule;
struct AvgExpandRule {}
impl AvgExpandRule {
pub fn new() -> Self {
Self {}
}
}
impl AnalyzerRule for AvgExpandRule {
fn analyze(
@@ -325,7 +331,13 @@ impl TreeNodeRewriter for ExpandAvgRewriter<'_> {
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
#[derive(Debug)]
struct TumbleExpandRule;
struct TumbleExpandRule {}
impl TumbleExpandRule {
pub fn new() -> Self {
Self {}
}
}
impl AnalyzerRule for TumbleExpandRule {
fn analyze(

View File

@@ -61,16 +61,6 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"No available frontend found after timeout: {timeout:?}, context: {context}"
))]
NoAvailableFrontend {
timeout: std::time::Duration,
context: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("External error"))]
External {
source: BoxedError,
@@ -306,8 +296,7 @@ impl ErrorExt for Error {
Self::Eval { .. }
| Self::JoinTask { .. }
| Self::Datafusion { .. }
| Self::InsertIntoFlow { .. }
| Self::NoAvailableFrontend { .. } => StatusCode::Internal,
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
Self::TableNotFound { .. }
| Self::TableNotFoundMeta { .. }

View File

@@ -43,7 +43,7 @@ mod utils;
#[cfg(test)]
mod test_utils;
pub use adapter::{FlowConfig, FlowStreamingEngineRef, FlownodeOptions, StreamingEngine};
pub use adapter::{FlowConfig, FlowStreamingEngine, FlowWorkerManagerRef, FlownodeOptions};
pub use batching_mode::frontend_client::{FrontendClient, GrpcQueryHandlerWithBoxedError};
pub(crate) use engine::{CreateFlowArgs, FlowId, TableName};
pub use error::{Error, Result};

View File

@@ -52,7 +52,7 @@ use tonic::transport::server::TcpIncoming;
use tonic::{Request, Response, Status};
use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef};
use crate::adapter::{create_worker, FlowStreamingEngineRef};
use crate::adapter::{create_worker, FlowWorkerManagerRef};
use crate::batching_mode::engine::BatchingEngine;
use crate::engine::FlowEngine;
use crate::error::{
@@ -63,12 +63,13 @@ use crate::heartbeat::HeartbeatTask;
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
use crate::transform::register_function_to_query_engine;
use crate::utils::{SizeReportSender, StateReportHandler};
use crate::{CreateFlowArgs, Error, FlownodeOptions, FrontendClient, StreamingEngine};
use crate::{CreateFlowArgs, Error, FlowStreamingEngine, FlownodeOptions, FrontendClient};
pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
/// wrapping flow node manager to avoid orphan rule with Arc<...>
#[derive(Clone)]
pub struct FlowService {
/// TODO(discord9): replace with dual engine
pub dual_engine: FlowDualEngineRef,
}
@@ -172,8 +173,6 @@ impl FlownodeServer {
}
/// Start the background task for streaming computation.
///
/// Should be called only after heartbeat is establish, hence can get cluster info
async fn start_workers(&self) -> Result<(), Error> {
let manager_ref = self.inner.flow_service.dual_engine.clone();
let handle = manager_ref
@@ -439,7 +438,6 @@ impl FlownodeBuilder {
let cnt = to_be_recovered.len();
// TODO(discord9): recover in parallel
info!("Recovering {} flows: {:?}", cnt, to_be_recovered);
for flow_id in to_be_recovered {
let info = self
.flow_metadata_manager
@@ -455,7 +453,6 @@ impl FlownodeBuilder {
info.sink_table_name().schema_name.clone(),
info.sink_table_name().table_name.clone(),
];
let args = CreateFlowArgs {
flow_id: flow_id as _,
sink_table_name,
@@ -469,24 +466,11 @@ impl FlownodeBuilder {
comment: Some(info.comment().clone()),
sql: info.raw_sql().clone(),
flow_options: info.options().clone(),
query_ctx: info
.query_context()
.clone()
.map(|ctx| {
ctx.try_into()
.map_err(BoxedError::new)
.context(ExternalSnafu)
})
.transpose()?
// or use default QueryContext with catalog_name from info
// to keep compatibility with old version
.or_else(|| {
Some(
QueryContextBuilder::default()
.current_catalog(info.catalog_name().to_string())
.build(),
)
}),
query_ctx: Some(
QueryContextBuilder::default()
.current_catalog(info.catalog_name().clone())
.build(),
),
};
manager
.create_flow(args)
@@ -505,7 +489,7 @@ impl FlownodeBuilder {
async fn build_manager(
&mut self,
query_engine: Arc<dyn QueryEngine>,
) -> Result<StreamingEngine, Error> {
) -> Result<FlowStreamingEngine, Error> {
let table_meta = self.table_meta.clone();
register_function_to_query_engine(&query_engine);
@@ -514,7 +498,7 @@ impl FlownodeBuilder {
let node_id = self.opts.node_id.map(|id| id as u32);
let mut man = StreamingEngine::new(node_id, query_engine, table_meta);
let mut man = FlowStreamingEngine::new(node_id, query_engine, table_meta);
for worker_id in 0..num_workers {
let (tx, rx) = oneshot::channel();
@@ -621,7 +605,7 @@ impl FrontendInvoker {
}
pub async fn build_from(
flow_streaming_engine: FlowStreamingEngineRef,
flow_worker_manager: FlowWorkerManagerRef,
catalog_manager: CatalogManagerRef,
kv_backend: KvBackendRef,
layered_cache_registry: LayeredCacheRegistryRef,
@@ -656,7 +640,7 @@ impl FrontendInvoker {
node_manager.clone(),
));
let query_engine = flow_streaming_engine.query_engine.clone();
let query_engine = flow_worker_manager.query_engine.clone();
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),

View File

@@ -46,11 +46,7 @@ pub struct ChineseTokenizer;
impl Tokenizer for ChineseTokenizer {
fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str> {
if text.is_ascii() {
EnglishTokenizer {}.tokenize(text)
} else {
JIEBA.cut(text, false)
}
JIEBA.cut(text, false)
}
}

View File

@@ -66,12 +66,10 @@ use crate::election::postgres::PgElection;
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
use crate::election::CANDIDATE_LEASE_SECS;
use crate::metasrv::builder::MetasrvBuilder;
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectTarget, SelectorRef};
use crate::node_excluder::NodeExcluderRef;
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
use crate::selector::lease_based::LeaseBasedSelector;
use crate::selector::load_based::LoadBasedSelector;
use crate::selector::round_robin::RoundRobinSelector;
use crate::selector::weight_compute::RegionNumsBasedWeightCompute;
use crate::selector::SelectorType;
use crate::service::admin;
use crate::{error, Result};
@@ -296,25 +294,14 @@ pub async fn metasrv_builder(
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
let node_excluder = plugins
.get::<NodeExcluderRef>()
.unwrap_or_else(|| Arc::new(Vec::new()) as NodeExcluderRef);
let selector = if let Some(selector) = plugins.get::<SelectorRef>() {
info!("Using selector from plugins");
selector
} else {
let selector = match opts.selector {
SelectorType::LoadBased => Arc::new(LoadBasedSelector::new(
RegionNumsBasedWeightCompute,
node_excluder,
)) as SelectorRef,
SelectorType::LeaseBased => {
Arc::new(LeaseBasedSelector::new(node_excluder)) as SelectorRef
}
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::new(
SelectTarget::Datanode,
node_excluder,
)) as SelectorRef,
SelectorType::LoadBased => Arc::new(LoadBasedSelector::default()) as SelectorRef,
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::default()) as SelectorRef,
};
info!(
"Using selector from options, selector type: {}",

View File

@@ -31,7 +31,6 @@ pub mod metasrv;
pub mod metrics;
#[cfg(feature = "mock")]
pub mod mocks;
pub mod node_excluder;
pub mod procedure;
pub mod pubsub;
pub mod region;

View File

@@ -14,7 +14,7 @@
pub mod builder;
use std::fmt::{self, Display};
use std::fmt::Display;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
@@ -96,7 +96,7 @@ pub enum BackendImpl {
MysqlStore,
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(default)]
pub struct MetasrvOptions {
/// The address the server listens on.
@@ -111,11 +111,6 @@ pub struct MetasrvOptions {
pub use_memory_store: bool,
/// Whether to enable region failover.
pub enable_region_failover: bool,
/// Whether to allow region failover on local WAL.
///
/// If it's true, the region failover will be allowed even if the local WAL is used.
/// Note that this option is not recommended to be set to true, because it may lead to data loss during failover.
pub allow_region_failover_on_local_wal: bool,
/// The HTTP server options.
pub http: HttpOptions,
/// The logging options.
@@ -166,47 +161,6 @@ pub struct MetasrvOptions {
pub node_max_idle_time: Duration,
}
impl fmt::Debug for MetasrvOptions {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut debug_struct = f.debug_struct("MetasrvOptions");
debug_struct
.field("bind_addr", &self.bind_addr)
.field("server_addr", &self.server_addr)
.field("store_addrs", &self.sanitize_store_addrs())
.field("selector", &self.selector)
.field("use_memory_store", &self.use_memory_store)
.field("enable_region_failover", &self.enable_region_failover)
.field(
"allow_region_failover_on_local_wal",
&self.allow_region_failover_on_local_wal,
)
.field("http", &self.http)
.field("logging", &self.logging)
.field("procedure", &self.procedure)
.field("failure_detector", &self.failure_detector)
.field("datanode", &self.datanode)
.field("enable_telemetry", &self.enable_telemetry)
.field("data_home", &self.data_home)
.field("wal", &self.wal)
.field("export_metrics", &self.export_metrics)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("flush_stats_factor", &self.flush_stats_factor)
.field("tracing", &self.tracing)
.field("backend", &self.backend);
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
debug_struct.field("meta_table_name", &self.meta_table_name);
#[cfg(feature = "pg_kvbackend")]
debug_struct.field("meta_election_lock_id", &self.meta_election_lock_id);
debug_struct
.field("node_max_idle_time", &self.node_max_idle_time)
.finish()
}
}
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
impl Default for MetasrvOptions {
@@ -219,7 +173,6 @@ impl Default for MetasrvOptions {
selector: SelectorType::default(),
use_memory_store: false,
enable_region_failover: false,
allow_region_failover_on_local_wal: false,
http: HttpOptions::default(),
logging: LoggingOptions {
dir: format!("{METASRV_HOME}/logs"),
@@ -290,13 +243,6 @@ impl MetasrvOptions {
common_telemetry::debug!("detect local IP is not supported on Android");
}
}
fn sanitize_store_addrs(&self) -> Vec<String> {
self.store_addrs
.iter()
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
.collect()
}
}
pub struct MetasrvInfo {

View File

@@ -40,8 +40,7 @@ use common_meta::state_store::KvStateStore;
use common_meta::wal_options_allocator::{build_kafka_client, build_wal_options_allocator};
use common_procedure::local::{LocalManager, ManagerConfig};
use common_procedure::ProcedureManagerRef;
use common_telemetry::warn;
use snafu::{ensure, ResultExt};
use snafu::ResultExt;
use crate::cache_invalidator::MetasrvCacheInvalidator;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
@@ -191,7 +190,7 @@ impl MetasrvBuilder {
let meta_peer_client = meta_peer_client
.unwrap_or_else(|| build_default_meta_peer_client(&election, &in_memory));
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector::default()));
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector));
let pushers = Pushers::default();
let mailbox = build_mailbox(&kv_backend, &pushers);
let procedure_manager = build_procedure_manager(&options, &kv_backend);
@@ -235,17 +234,13 @@ impl MetasrvBuilder {
))
});
let flow_selector = Arc::new(RoundRobinSelector::new(
SelectTarget::Flownode,
Arc::new(Vec::new()),
)) as SelectorRef;
let flow_metadata_allocator = {
// for now flownode just use round-robin selector
let flow_selector = RoundRobinSelector::new(SelectTarget::Flownode);
let flow_selector_ctx = selector_ctx.clone();
let peer_allocator = Arc::new(FlowPeerAllocator::new(
flow_selector_ctx,
flow_selector.clone(),
Arc::new(flow_selector),
));
let seq = Arc::new(
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
@@ -277,25 +272,18 @@ impl MetasrvBuilder {
},
));
let peer_lookup_service = Arc::new(MetaPeerLookupService::new(meta_peer_client.clone()));
if !is_remote_wal && options.enable_region_failover {
ensure!(
options.allow_region_failover_on_local_wal,
error::UnexpectedSnafu {
violated: "Region failover is not supported in the local WAL implementation!
If you want to enable region failover for local WAL, please set `allow_region_failover_on_local_wal` to true.",
}
);
if options.allow_region_failover_on_local_wal {
warn!("Region failover is force enabled in the local WAL implementation! This may lead to data loss during failover!");
return error::UnexpectedSnafu {
violated: "Region failover is not supported in the local WAL implementation!",
}
.fail();
}
let (tx, rx) = RegionSupervisor::channel();
let (region_failure_detector_controller, region_supervisor_ticker): (
RegionFailureDetectorControllerRef,
Option<std::sync::Arc<RegionSupervisorTicker>>,
) = if options.enable_region_failover {
) = if options.enable_region_failover && is_remote_wal {
(
Arc::new(RegionFailureDetectorControl::new(tx.clone())) as _,
Some(Arc::new(RegionSupervisorTicker::new(
@@ -321,7 +309,7 @@ impl MetasrvBuilder {
));
region_migration_manager.try_start()?;
let region_failover_handler = if options.enable_region_failover {
let region_failover_handler = if options.enable_region_failover && is_remote_wal {
let region_supervisor = RegionSupervisor::new(
rx,
options.failure_detector,
@@ -432,7 +420,7 @@ impl MetasrvBuilder {
meta_peer_client: meta_peer_client.clone(),
selector,
// TODO(jeremy): We do not allow configuring the flow selector.
flow_selector,
flow_selector: Arc::new(RoundRobinSelector::new(SelectTarget::Flownode)),
handler_group: RwLock::new(None),
handler_group_builder: Mutex::new(Some(handler_group_builder)),
election,

View File

@@ -71,13 +71,4 @@ lazy_static! {
/// The remote WAL prune execute counter.
pub static ref METRIC_META_REMOTE_WAL_PRUNE_EXECUTE: IntCounterVec =
register_int_counter_vec!("greptime_meta_remote_wal_prune_execute", "meta remote wal prune execute", &["topic_name"]).unwrap();
/// The migration stage elapsed histogram.
pub static ref METRIC_META_REGION_MIGRATION_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
"greptime_meta_region_migration_stage_elapsed",
"meta region migration stage elapsed",
&["stage"],
// 0.01 ~ 1000
exponential_buckets(0.01, 10.0, 7).unwrap(),
)
.unwrap();
}

View File

@@ -1,32 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_meta::DatanodeId;
pub type NodeExcluderRef = Arc<dyn NodeExcluder>;
/// [NodeExcluder] is used to help decide whether some nodes should be excluded (out of consideration)
/// in certain situations. For example, in some node selectors.
pub trait NodeExcluder: Send + Sync {
/// Returns the excluded datanode ids.
fn excluded_datanode_ids(&self) -> &Vec<DatanodeId>;
}
impl NodeExcluder for Vec<DatanodeId> {
fn excluded_datanode_ids(&self) -> &Vec<DatanodeId> {
self
}
}

View File

@@ -25,7 +25,7 @@ pub(crate) mod update_metadata;
pub(crate) mod upgrade_candidate_region;
use std::any::Any;
use std::fmt::{Debug, Display};
use std::fmt::Debug;
use std::time::Duration;
use common_error::ext::BoxedError;
@@ -43,7 +43,7 @@ use common_procedure::error::{
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status, StringKey};
use common_telemetry::{error, info};
use common_telemetry::info;
use manager::RegionMigrationProcedureGuard;
pub use manager::{
RegionMigrationManagerRef, RegionMigrationProcedureTask, RegionMigrationProcedureTracker,
@@ -55,10 +55,7 @@ use tokio::time::Instant;
use self::migration_start::RegionMigrationStart;
use crate::error::{self, Result};
use crate::metrics::{
METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE,
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED,
};
use crate::metrics::{METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE};
use crate::service::mailbox::MailboxRef;
/// The default timeout for region migration.
@@ -106,82 +103,6 @@ impl PersistentContext {
}
}
/// Metrics of region migration.
#[derive(Debug, Clone, Default)]
pub struct Metrics {
/// Elapsed time of downgrading region and upgrading region.
operations_elapsed: Duration,
/// Elapsed time of downgrading leader region.
downgrade_leader_region_elapsed: Duration,
/// Elapsed time of open candidate region.
open_candidate_region_elapsed: Duration,
/// Elapsed time of upgrade candidate region.
upgrade_candidate_region_elapsed: Duration,
}
impl Display for Metrics {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"operations_elapsed: {:?}, downgrade_leader_region_elapsed: {:?}, open_candidate_region_elapsed: {:?}, upgrade_candidate_region_elapsed: {:?}",
self.operations_elapsed,
self.downgrade_leader_region_elapsed,
self.open_candidate_region_elapsed,
self.upgrade_candidate_region_elapsed
)
}
}
impl Metrics {
/// Updates the elapsed time of downgrading region and upgrading region.
pub fn update_operations_elapsed(&mut self, elapsed: Duration) {
self.operations_elapsed += elapsed;
}
/// Updates the elapsed time of downgrading leader region.
pub fn update_downgrade_leader_region_elapsed(&mut self, elapsed: Duration) {
self.downgrade_leader_region_elapsed += elapsed;
}
/// Updates the elapsed time of open candidate region.
pub fn update_open_candidate_region_elapsed(&mut self, elapsed: Duration) {
self.open_candidate_region_elapsed += elapsed;
}
/// Updates the elapsed time of upgrade candidate region.
pub fn update_upgrade_candidate_region_elapsed(&mut self, elapsed: Duration) {
self.upgrade_candidate_region_elapsed += elapsed;
}
}
impl Drop for Metrics {
fn drop(&mut self) {
if !self.operations_elapsed.is_zero() {
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
.with_label_values(&["operations"])
.observe(self.operations_elapsed.as_secs_f64());
}
if !self.downgrade_leader_region_elapsed.is_zero() {
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
.with_label_values(&["downgrade_leader_region"])
.observe(self.downgrade_leader_region_elapsed.as_secs_f64());
}
if !self.open_candidate_region_elapsed.is_zero() {
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
.with_label_values(&["open_candidate_region"])
.observe(self.open_candidate_region_elapsed.as_secs_f64());
}
if !self.upgrade_candidate_region_elapsed.is_zero() {
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
.with_label_values(&["upgrade_candidate_region"])
.observe(self.upgrade_candidate_region_elapsed.as_secs_f64());
}
}
}
/// It's shared in each step and available in executing (including retrying).
///
/// It will be dropped if the procedure runner crashes.
@@ -211,8 +132,8 @@ pub struct VolatileContext {
leader_region_last_entry_id: Option<u64>,
/// The last_entry_id of leader metadata region (Only used for metric engine).
leader_region_metadata_last_entry_id: Option<u64>,
/// Metrics of region migration.
metrics: Metrics,
/// Elapsed time of downgrading region and upgrading region.
operations_elapsed: Duration,
}
impl VolatileContext {
@@ -310,35 +231,12 @@ impl Context {
pub fn next_operation_timeout(&self) -> Option<Duration> {
self.persistent_ctx
.timeout
.checked_sub(self.volatile_ctx.metrics.operations_elapsed)
.checked_sub(self.volatile_ctx.operations_elapsed)
}
/// Updates operations elapsed.
pub fn update_operations_elapsed(&mut self, instant: Instant) {
self.volatile_ctx
.metrics
.update_operations_elapsed(instant.elapsed());
}
/// Updates the elapsed time of downgrading leader region.
pub fn update_downgrade_leader_region_elapsed(&mut self, instant: Instant) {
self.volatile_ctx
.metrics
.update_downgrade_leader_region_elapsed(instant.elapsed());
}
/// Updates the elapsed time of open candidate region.
pub fn update_open_candidate_region_elapsed(&mut self, instant: Instant) {
self.volatile_ctx
.metrics
.update_open_candidate_region_elapsed(instant.elapsed());
}
/// Updates the elapsed time of upgrade candidate region.
pub fn update_upgrade_candidate_region_elapsed(&mut self, instant: Instant) {
self.volatile_ctx
.metrics
.update_upgrade_candidate_region_elapsed(instant.elapsed());
self.volatile_ctx.operations_elapsed += instant.elapsed();
}
/// Returns address of meta server.
@@ -652,14 +550,6 @@ impl Procedure for RegionMigrationProcedure {
.inc();
ProcedureError::retry_later(e)
} else {
error!(
e;
"Region migration procedure failed, region_id: {}, from_peer: {}, to_peer: {}, {}",
self.context.region_id(),
self.context.persistent_ctx.from_peer,
self.context.persistent_ctx.to_peer,
self.context.volatile_ctx.metrics,
);
METRIC_META_REGION_MIGRATION_ERROR
.with_label_values(&[name, "external"])
.inc();

View File

@@ -46,13 +46,7 @@ impl State for CloseDowngradedRegion {
let region_id = ctx.region_id();
warn!(err; "Failed to close downgraded leader region: {region_id} on datanode {:?}", downgrade_leader_datanode);
}
info!(
"Region migration is finished: region_id: {}, from_peer: {}, to_peer: {}, {}",
ctx.region_id(),
ctx.persistent_ctx.from_peer,
ctx.persistent_ctx.to_peer,
ctx.volatile_ctx.metrics,
);
Ok((Box::new(RegionMigrationEnd), Status::done()))
}

View File

@@ -54,7 +54,6 @@ impl Default for DowngradeLeaderRegion {
#[typetag::serde]
impl State for DowngradeLeaderRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let now = Instant::now();
// Ensures the `leader_region_lease_deadline` must exist after recovering.
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
@@ -78,7 +77,6 @@ impl State for DowngradeLeaderRegion {
}
}
}
ctx.update_downgrade_leader_region_elapsed(now);
Ok((
Box::new(UpgradeCandidateRegion::default()),
@@ -350,8 +348,7 @@ mod tests {
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
prepare_table_metadata(&ctx, HashMap::default()).await;
ctx.volatile_ctx.metrics.operations_elapsed =
ctx.persistent_ctx.timeout + Duration::from_secs(1);
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
let err = state.downgrade_region(&mut ctx).await.unwrap_err();
@@ -594,8 +591,7 @@ mod tests {
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
ctx.volatile_ctx.metrics.operations_elapsed =
ctx.persistent_ctx.timeout + Duration::from_secs(1);
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx

View File

@@ -15,7 +15,6 @@
use std::any::Any;
use common_procedure::Status;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
use crate::error::{self, Result};
@@ -38,15 +37,7 @@ impl RegionMigrationAbort {
#[async_trait::async_trait]
#[typetag::serde]
impl State for RegionMigrationAbort {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
warn!(
"Region migration is aborted: {}, region_id: {}, from_peer: {}, to_peer: {}, {}",
self.reason,
ctx.region_id(),
ctx.persistent_ctx.from_peer,
ctx.persistent_ctx.to_peer,
ctx.volatile_ctx.metrics,
);
async fn next(&mut self, _: &mut Context) -> Result<(Box<dyn State>, Status)> {
error::MigrationAbortSnafu {
reason: &self.reason,
}

View File

@@ -13,7 +13,7 @@
// limitations under the License.
use std::any::Any;
use std::time::Duration;
use std::time::{Duration, Instant};
use api::v1::meta::MailboxMessage;
use common_meta::distributed_time_constants::REGION_LEASE_SECS;
@@ -24,7 +24,6 @@ use common_procedure::Status;
use common_telemetry::info;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use tokio::time::Instant;
use crate::error::{self, Result};
use crate::handler::HeartbeatMailbox;
@@ -43,9 +42,7 @@ pub struct OpenCandidateRegion;
impl State for OpenCandidateRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let instruction = self.build_open_region_instruction(ctx).await?;
let now = Instant::now();
self.open_candidate_region(ctx, instruction).await?;
ctx.update_open_candidate_region_elapsed(now);
Ok((
Box::new(UpdateMetadata::Downgrade),

View File

@@ -54,12 +54,9 @@ impl Default for UpgradeCandidateRegion {
#[typetag::serde]
impl State for UpgradeCandidateRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let now = Instant::now();
if self.upgrade_region_with_retry(ctx).await {
ctx.update_upgrade_candidate_region_elapsed(now);
Ok((Box::new(UpdateMetadata::Upgrade), Status::executing(false)))
} else {
ctx.update_upgrade_candidate_region_elapsed(now);
Ok((Box::new(UpdateMetadata::Rollback), Status::executing(false)))
}
}
@@ -291,8 +288,7 @@ mod tests {
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
ctx.volatile_ctx.metrics.operations_elapsed =
ctx.persistent_ctx.timeout + Duration::from_secs(1);
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
let err = state.upgrade_region(&ctx).await.unwrap_err();
@@ -562,8 +558,7 @@ mod tests {
let mut ctx = env.context_factory().new_context(persistent_context);
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
ctx.volatile_ctx.metrics.operations_elapsed =
ctx.persistent_ctx.timeout + Duration::from_secs(1);
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
let (tx, rx) = tokio::sync::mpsc::channel(1);
mailbox_ctx

View File

@@ -335,21 +335,22 @@ impl WalPruneProcedure {
})?;
partition_client
.delete_records(
// notice here no "+1" is needed because the offset arg is exclusive, and it's defensive programming just in case somewhere else have a off by one error, see https://kafka.apache.org/36/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html#endOffsets(java.util.Collection) which we use to get the end offset from high watermark
self.data.prunable_entry_id as i64,
(self.data.prunable_entry_id + 1) as i64,
DELETE_RECORDS_TIMEOUT.as_millis() as i32,
)
.await
.context(DeleteRecordsSnafu {
topic: &self.data.topic,
partition: DEFAULT_PARTITION,
offset: self.data.prunable_entry_id,
offset: (self.data.prunable_entry_id + 1),
})
.map_err(BoxedError::new)
.with_context(|_| error::RetryLaterWithSourceSnafu {
reason: format!(
"Failed to delete records for topic: {}, partition: {}, offset: {}",
self.data.topic, DEFAULT_PARTITION, self.data.prunable_entry_id
self.data.topic,
DEFAULT_PARTITION,
self.data.prunable_entry_id + 1
),
})?;
info!(
@@ -604,19 +605,19 @@ mod tests {
// Step 3: Test `on_prune`.
let status = procedure.on_prune().await.unwrap();
assert_matches!(status, Status::Done { output: None });
// Check if the entry ids after(include) `prunable_entry_id` still exist.
// Check if the entry ids after `prunable_entry_id` still exist.
check_entry_id_existence(
procedure.context.client.clone(),
&topic_name,
procedure.data.prunable_entry_id as i64 + 1,
true,
)
.await;
// Check if the entry s before `prunable_entry_id` are deleted.
check_entry_id_existence(
procedure.context.client.clone(),
&topic_name,
procedure.data.prunable_entry_id as i64,
true,
)
.await;
// Check if the entry ids before `prunable_entry_id` are deleted.
check_entry_id_existence(
procedure.context.client.clone(),
&topic_name,
procedure.data.prunable_entry_id as i64 - 1,
false,
)
.await;

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::collections::HashSet;
use std::fmt::Debug;
use std::sync::{Arc, Mutex};
use std::time::Duration;
@@ -25,7 +25,7 @@ use common_meta::leadership_notifier::LeadershipChangeListener;
use common_meta::peer::PeerLookupServiceRef;
use common_meta::DatanodeId;
use common_runtime::JoinHandle;
use common_telemetry::{debug, error, info, warn};
use common_telemetry::{error, info, warn};
use common_time::util::current_time_millis;
use error::Error::{LeaderPeerChanged, MigrationRunning, TableRouteNotFound};
use snafu::{OptionExt, ResultExt};
@@ -208,8 +208,6 @@ pub const DEFAULT_TICK_INTERVAL: Duration = Duration::from_secs(1);
pub struct RegionSupervisor {
/// Used to detect the failure of regions.
failure_detector: RegionFailureDetector,
/// Tracks the number of failovers for each region.
failover_counts: HashMap<DetectingRegion, u32>,
/// Receives [Event]s.
receiver: Receiver<Event>,
/// The context of [`SelectorRef`]
@@ -295,7 +293,6 @@ impl RegionSupervisor {
) -> Self {
Self {
failure_detector: RegionFailureDetector::new(options),
failover_counts: HashMap::new(),
receiver: event_receiver,
selector_context,
selector,
@@ -339,14 +336,13 @@ impl RegionSupervisor {
}
}
async fn deregister_failure_detectors(&mut self, detecting_regions: Vec<DetectingRegion>) {
async fn deregister_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>) {
for region in detecting_regions {
self.failure_detector.remove(&region);
self.failover_counts.remove(&region);
self.failure_detector.remove(&region)
}
}
async fn handle_region_failures(&mut self, mut regions: Vec<(DatanodeId, RegionId)>) {
async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) {
if regions.is_empty() {
return;
}
@@ -369,7 +365,8 @@ impl RegionSupervisor {
.collect::<Vec<_>>();
for (datanode_id, region_id) in migrating_regions {
debug!(
self.failure_detector.remove(&(datanode_id, region_id));
warn!(
"Removed region failover for region: {region_id}, datanode: {datanode_id} because it's migrating"
);
}
@@ -389,12 +386,7 @@ impl RegionSupervisor {
.context(error::MaintenanceModeManagerSnafu)
}
async fn do_failover(&mut self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
let count = *self
.failover_counts
.entry((datanode_id, region_id))
.and_modify(|count| *count += 1)
.or_insert(1);
async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
let from_peer = self
.peer_lookup
.datanode(datanode_id)
@@ -423,14 +415,11 @@ impl RegionSupervisor {
);
return Ok(());
}
info!(
"Failover for region: {region_id}, from_peer: {from_peer}, to_peer: {to_peer}, tries: {count}"
);
let task = RegionMigrationProcedureTask {
region_id,
from_peer,
to_peer,
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT * count,
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT,
};
if let Err(err) = self.region_migration_manager.submit_procedure(task).await {
@@ -444,8 +433,7 @@ impl RegionSupervisor {
Ok(())
}
TableRouteNotFound { .. } => {
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
.await;
self.failure_detector.remove(&(datanode_id, region_id));
info!(
"Table route is not found, the table is dropped, removed failover detector for region: {}, datanode: {}",
region_id, datanode_id
@@ -453,8 +441,7 @@ impl RegionSupervisor {
Ok(())
}
LeaderPeerChanged { .. } => {
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
.await;
self.failure_detector.remove(&(datanode_id, region_id));
info!(
"Region's leader peer changed, removed failover detector for region: {}, datanode: {}",
region_id, datanode_id

View File

@@ -18,7 +18,7 @@ pub mod load_based;
pub mod round_robin;
#[cfg(test)]
pub(crate) mod test_utils;
pub mod weight_compute;
mod weight_compute;
pub mod weighted_choose;
use std::collections::HashSet;

View File

@@ -12,37 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::sync::Arc;
use common_meta::peer::Peer;
use crate::error::Result;
use crate::lease;
use crate::metasrv::SelectorContext;
use crate::node_excluder::NodeExcluderRef;
use crate::selector::common::{choose_items, filter_out_excluded_peers};
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
use crate::selector::{Selector, SelectorOptions};
/// Select all alive datanodes based using a random weighted choose.
pub struct LeaseBasedSelector {
node_excluder: NodeExcluderRef,
}
impl LeaseBasedSelector {
pub fn new(node_excluder: NodeExcluderRef) -> Self {
Self { node_excluder }
}
}
impl Default for LeaseBasedSelector {
fn default() -> Self {
Self {
node_excluder: Arc::new(Vec::new()),
}
}
}
pub struct LeaseBasedSelector;
#[async_trait::async_trait]
impl Selector for LeaseBasedSelector {
@@ -67,14 +47,7 @@ impl Selector for LeaseBasedSelector {
.collect();
// 3. choose peers by weight_array.
let mut exclude_peer_ids = self
.node_excluder
.excluded_datanode_ids()
.iter()
.cloned()
.collect::<HashSet<_>>();
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
filter_out_excluded_peers(&mut weight_array, &exclude_peer_ids);
filter_out_excluded_peers(&mut weight_array, &opts.exclude_peer_ids);
let mut weighted_choose = RandomWeightedChoose::new(weight_array);
let selected = choose_items(&opts, &mut weighted_choose)?;

View File

@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::collections::HashMap;
use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue};
use common_meta::key::TableMetadataManager;
@@ -27,7 +26,6 @@ use crate::error::{self, Result};
use crate::key::{DatanodeLeaseKey, LeaseValue};
use crate::lease;
use crate::metasrv::SelectorContext;
use crate::node_excluder::NodeExcluderRef;
use crate::selector::common::{choose_items, filter_out_excluded_peers};
use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute};
use crate::selector::weighted_choose::RandomWeightedChoose;
@@ -35,15 +33,11 @@ use crate::selector::{Selector, SelectorOptions};
pub struct LoadBasedSelector<C> {
weight_compute: C,
node_excluder: NodeExcluderRef,
}
impl<C> LoadBasedSelector<C> {
pub fn new(weight_compute: C, node_excluder: NodeExcluderRef) -> Self {
Self {
weight_compute,
node_excluder,
}
pub fn new(weight_compute: C) -> Self {
Self { weight_compute }
}
}
@@ -51,7 +45,6 @@ impl Default for LoadBasedSelector<RegionNumsBasedWeightCompute> {
fn default() -> Self {
Self {
weight_compute: RegionNumsBasedWeightCompute,
node_excluder: Arc::new(Vec::new()),
}
}
}
@@ -95,14 +88,7 @@ where
let mut weight_array = self.weight_compute.compute(&stat_kvs);
// 5. choose peers by weight_array.
let mut exclude_peer_ids = self
.node_excluder
.excluded_datanode_ids()
.iter()
.cloned()
.collect::<HashSet<_>>();
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
filter_out_excluded_peers(&mut weight_array, &exclude_peer_ids);
filter_out_excluded_peers(&mut weight_array, &opts.exclude_peer_ids);
let mut weighted_choose = RandomWeightedChoose::new(weight_array);
let selected = choose_items(&opts, &mut weighted_choose)?;

View File

@@ -12,9 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use common_meta::peer::Peer;
use snafu::ensure;
@@ -22,7 +20,6 @@ use snafu::ensure;
use crate::error::{NoEnoughAvailableNodeSnafu, Result};
use crate::lease;
use crate::metasrv::{SelectTarget, SelectorContext};
use crate::node_excluder::NodeExcluderRef;
use crate::selector::{Selector, SelectorOptions};
/// Round-robin selector that returns the next peer in the list in sequence.
@@ -35,7 +32,6 @@ use crate::selector::{Selector, SelectorOptions};
pub struct RoundRobinSelector {
select_target: SelectTarget,
counter: AtomicUsize,
node_excluder: NodeExcluderRef,
}
impl Default for RoundRobinSelector {
@@ -43,38 +39,32 @@ impl Default for RoundRobinSelector {
Self {
select_target: SelectTarget::Datanode,
counter: AtomicUsize::new(0),
node_excluder: Arc::new(Vec::new()),
}
}
}
impl RoundRobinSelector {
pub fn new(select_target: SelectTarget, node_excluder: NodeExcluderRef) -> Self {
pub fn new(select_target: SelectTarget) -> Self {
Self {
select_target,
node_excluder,
..Default::default()
}
}
async fn get_peers(&self, opts: &SelectorOptions, ctx: &SelectorContext) -> Result<Vec<Peer>> {
async fn get_peers(
&self,
min_required_items: usize,
ctx: &SelectorContext,
) -> Result<Vec<Peer>> {
let mut peers = match self.select_target {
SelectTarget::Datanode => {
// 1. get alive datanodes.
let lease_kvs =
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
let mut exclude_peer_ids = self
.node_excluder
.excluded_datanode_ids()
.iter()
.cloned()
.collect::<HashSet<_>>();
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
// 2. map into peers
lease_kvs
.into_iter()
.filter(|(k, _)| !exclude_peer_ids.contains(&k.node_id))
.map(|(k, v)| Peer::new(k.node_id, v.node_addr))
.collect::<Vec<_>>()
}
@@ -94,8 +84,8 @@ impl RoundRobinSelector {
ensure!(
!peers.is_empty(),
NoEnoughAvailableNodeSnafu {
required: opts.min_required_items,
available: peers.len(),
required: min_required_items,
available: 0usize,
select_target: self.select_target
}
);
@@ -113,7 +103,7 @@ impl Selector for RoundRobinSelector {
type Output = Vec<Peer>;
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Vec<Peer>> {
let peers = self.get_peers(&opts, ctx).await?;
let peers = self.get_peers(opts.min_required_items, ctx).await?;
// choose peers
let mut selected = Vec::with_capacity(opts.min_required_items);
for _ in 0..opts.min_required_items {
@@ -186,42 +176,4 @@ mod test {
assert_eq!(peers.len(), 2);
assert_eq!(peers, vec![peer2.clone(), peer3.clone()]);
}
#[tokio::test]
async fn test_round_robin_selector_with_exclude_peer_ids() {
let selector = RoundRobinSelector::new(SelectTarget::Datanode, Arc::new(vec![5]));
let ctx = create_selector_context();
// add three nodes
let peer1 = Peer {
id: 2,
addr: "node1".to_string(),
};
let peer2 = Peer {
id: 5,
addr: "node2".to_string(),
};
let peer3 = Peer {
id: 8,
addr: "node3".to_string(),
};
put_datanodes(
&ctx.meta_peer_client,
vec![peer1.clone(), peer2.clone(), peer3.clone()],
)
.await;
let peers = selector
.select(
&ctx,
SelectorOptions {
min_required_items: 1,
allow_duplication: true,
exclude_peer_ids: HashSet::from([2]),
},
)
.await
.unwrap();
assert_eq!(peers.len(), 1);
assert_eq!(peers, vec![peer3.clone()]);
}
}

View File

@@ -18,13 +18,11 @@ common-error.workspace = true
common-macro.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
datafusion.workspace = true
datatypes.workspace = true
futures-util.workspace = true
humantime-serde.workspace = true
itertools.workspace = true
lazy_static = "1.4"
mito2.workspace = true

View File

@@ -12,49 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
/// The default flush interval of the metadata region.
pub(crate) const DEFAULT_FLUSH_METADATA_REGION_INTERVAL: Duration = Duration::from_secs(30);
/// Configuration for the metric engine.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct EngineConfig {
/// Experimental feature to use sparse primary key encoding.
pub experimental_sparse_primary_key_encoding: bool,
/// The flush interval of the metadata region.
#[serde(
with = "humantime_serde",
default = "EngineConfig::default_flush_metadata_region_interval"
)]
pub flush_metadata_region_interval: Duration,
}
impl Default for EngineConfig {
fn default() -> Self {
Self {
flush_metadata_region_interval: DEFAULT_FLUSH_METADATA_REGION_INTERVAL,
experimental_sparse_primary_key_encoding: false,
}
}
}
impl EngineConfig {
fn default_flush_metadata_region_interval() -> Duration {
DEFAULT_FLUSH_METADATA_REGION_INTERVAL
}
/// Sanitizes the configuration.
pub fn sanitize(&mut self) {
if self.flush_metadata_region_interval.is_zero() {
warn!(
"Flush metadata region interval is zero, override with default value: {:?}. Disable metadata region flush is forbidden.",
DEFAULT_FLUSH_METADATA_REGION_INTERVAL
);
self.flush_metadata_region_interval = DEFAULT_FLUSH_METADATA_REGION_INTERVAL;
}
}
}

View File

@@ -34,11 +34,9 @@ use api::region::RegionResponse;
use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_runtime::RepeatedTask;
use mito2::engine::MitoEngine;
pub(crate) use options::IndexOptions;
use snafu::ResultExt;
pub(crate) use state::MetricEngineState;
use store_api::metadata::RegionMetadataRef;
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
use store_api::region_engine::{
@@ -49,11 +47,11 @@ use store_api::region_engine::{
use store_api::region_request::{BatchRegionDdlRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
use self::state::MetricEngineState;
use crate::config::EngineConfig;
use crate::data_region::DataRegion;
use crate::error::{self, Error, Result, StartRepeatedTaskSnafu, UnsupportedRegionRequestSnafu};
use crate::error::{self, Result, UnsupportedRegionRequestSnafu};
use crate::metadata_region::MetadataRegion;
use crate::repeated_task::FlushMetadataRegionTask;
use crate::row_modifier::RowModifier;
use crate::utils::{self, get_region_statistic};
@@ -361,32 +359,19 @@ impl RegionEngine for MetricEngine {
}
impl MetricEngine {
pub fn try_new(mito: MitoEngine, mut config: EngineConfig) -> Result<Self> {
pub fn new(mito: MitoEngine, config: EngineConfig) -> Self {
let metadata_region = MetadataRegion::new(mito.clone());
let data_region = DataRegion::new(mito.clone());
let state = Arc::new(RwLock::default());
config.sanitize();
let flush_interval = config.flush_metadata_region_interval;
let inner = Arc::new(MetricEngineInner {
mito: mito.clone(),
metadata_region,
data_region,
state: state.clone(),
config,
row_modifier: RowModifier::new(),
flush_task: RepeatedTask::new(
flush_interval,
Box::new(FlushMetadataRegionTask {
state: state.clone(),
mito: mito.clone(),
}),
),
});
inner
.flush_task
.start(common_runtime::global_runtime())
.context(StartRepeatedTaskSnafu { name: "flush_task" })?;
Ok(Self { inner })
Self {
inner: Arc::new(MetricEngineInner {
mito,
metadata_region,
data_region,
state: RwLock::default(),
config,
row_modifier: RowModifier::new(),
}),
}
}
pub fn mito(&self) -> MitoEngine {
@@ -441,21 +426,15 @@ impl MetricEngine {
) -> Result<common_recordbatch::SendableRecordBatchStream, BoxedError> {
self.inner.scan_to_stream(region_id, request).await
}
/// Returns the configuration of the engine.
pub fn config(&self) -> &EngineConfig {
&self.inner.config
}
}
struct MetricEngineInner {
mito: MitoEngine,
metadata_region: MetadataRegion,
data_region: DataRegion,
state: Arc<RwLock<MetricEngineState>>,
state: RwLock<MetricEngineState>,
config: EngineConfig,
row_modifier: RowModifier,
flush_task: RepeatedTask<Error>,
}
#[cfg(test)]

View File

@@ -737,7 +737,7 @@ mod test {
// set up
let env = TestEnv::new().await;
let engine = MetricEngine::try_new(env.mito(), EngineConfig::default()).unwrap();
let engine = MetricEngine::new(env.mito(), EngineConfig::default());
let engine_inner = engine.inner;
// check create data region request

View File

@@ -282,14 +282,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to start repeated task: {}", name))]
StartRepeatedTask {
name: String,
source: common_runtime::error::Error,
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -343,8 +335,6 @@ impl ErrorExt for Error {
CollectRecordBatchStream { source, .. } => source.status_code(),
StartRepeatedTask { source, .. } => source.status_code(),
MetricManifestInfo { .. } => StatusCode::Internal,
}
}

View File

@@ -59,7 +59,6 @@ pub mod engine;
pub mod error;
mod metadata_region;
mod metrics;
mod repeated_task;
pub mod row_modifier;
#[cfg(test)]
mod test_util;

View File

@@ -1,167 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, RwLock};
use std::time::Instant;
use common_runtime::TaskFunction;
use common_telemetry::{debug, error};
use mito2::engine::MitoEngine;
use store_api::region_engine::{RegionEngine, RegionRole};
use store_api::region_request::{RegionFlushRequest, RegionRequest};
use crate::engine::MetricEngineState;
use crate::error::{Error, Result};
use crate::utils;
/// Task to flush metadata regions.
///
/// This task is used to send flush requests to the metadata regions
/// periodically.
pub(crate) struct FlushMetadataRegionTask {
pub(crate) state: Arc<RwLock<MetricEngineState>>,
pub(crate) mito: MitoEngine,
}
#[async_trait::async_trait]
impl TaskFunction<Error> for FlushMetadataRegionTask {
fn name(&self) -> &str {
"FlushMetadataRegionTask"
}
async fn call(&mut self) -> Result<()> {
let region_ids = {
let state = self.state.read().unwrap();
state
.physical_region_states()
.keys()
.cloned()
.collect::<Vec<_>>()
};
let num_region = region_ids.len();
let now = Instant::now();
for region_id in region_ids {
let Some(role) = self.mito.role(region_id) else {
continue;
};
if role == RegionRole::Follower {
continue;
}
let metadata_region_id = utils::to_metadata_region_id(region_id);
if let Err(e) = self
.mito
.handle_request(
metadata_region_id,
RegionRequest::Flush(RegionFlushRequest {
row_group_size: None,
}),
)
.await
{
error!(e; "Failed to flush metadata region {}", metadata_region_id);
}
}
debug!(
"Flushed {} metadata regions, elapsed: {:?}",
num_region,
now.elapsed()
);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::time::Duration;
use store_api::region_engine::{RegionEngine, RegionManifestInfo};
use crate::config::{EngineConfig, DEFAULT_FLUSH_METADATA_REGION_INTERVAL};
use crate::test_util::TestEnv;
#[tokio::test]
async fn test_flush_metadata_region_task() {
let env = TestEnv::with_prefix_and_config(
"test_flush_metadata_region_task",
EngineConfig {
flush_metadata_region_interval: Duration::from_millis(100),
..Default::default()
},
)
.await;
env.init_metric_region().await;
let engine = env.metric();
// Wait for flush task run
tokio::time::sleep(Duration::from_millis(200)).await;
let physical_region_id = env.default_physical_region_id();
let stat = engine.region_statistic(physical_region_id).unwrap();
assert_matches!(
stat.manifest,
RegionManifestInfo::Metric {
metadata_manifest_version: 1,
metadata_flushed_entry_id: 1,
..
}
)
}
#[tokio::test]
async fn test_flush_metadata_region_task_with_long_interval() {
let env = TestEnv::with_prefix_and_config(
"test_flush_metadata_region_task_with_long_interval",
EngineConfig {
flush_metadata_region_interval: Duration::from_secs(60),
..Default::default()
},
)
.await;
env.init_metric_region().await;
let engine = env.metric();
// Wait for flush task run, should not flush metadata region
tokio::time::sleep(Duration::from_millis(200)).await;
let physical_region_id = env.default_physical_region_id();
let stat = engine.region_statistic(physical_region_id).unwrap();
assert_matches!(
stat.manifest,
RegionManifestInfo::Metric {
metadata_manifest_version: 0,
metadata_flushed_entry_id: 0,
..
}
)
}
#[tokio::test]
async fn test_flush_metadata_region_sanitize() {
let env = TestEnv::with_prefix_and_config(
"test_flush_metadata_region_sanitize",
EngineConfig {
flush_metadata_region_interval: Duration::from_secs(0),
..Default::default()
},
)
.await;
let metric = env.metric();
let config = metric.config();
assert_eq!(
config.flush_metadata_region_interval,
DEFAULT_FLUSH_METADATA_REGION_INTERVAL
);
}
}

View File

@@ -54,14 +54,9 @@ impl TestEnv {
/// Returns a new env with specific `prefix` for test.
pub async fn with_prefix(prefix: &str) -> Self {
Self::with_prefix_and_config(prefix, EngineConfig::default()).await
}
/// Returns a new env with specific `prefix` and `config` for test.
pub async fn with_prefix_and_config(prefix: &str, config: EngineConfig) -> Self {
let mut mito_env = MitoTestEnv::with_prefix(prefix);
let mito = mito_env.create_engine(MitoConfig::default()).await;
let metric = MetricEngine::try_new(mito.clone(), config).unwrap();
let metric = MetricEngine::new(mito.clone(), EngineConfig::default());
Self {
mito_env,
mito,
@@ -89,7 +84,7 @@ impl TestEnv {
.mito_env
.create_follower_engine(MitoConfig::default())
.await;
let metric = MetricEngine::try_new(mito.clone(), EngineConfig::default()).unwrap();
let metric = MetricEngine::new(mito.clone(), EngineConfig::default());
let region_id = self.default_physical_region_id();
debug!("opening default physical region: {region_id}");

View File

@@ -302,10 +302,7 @@ impl PartitionTreeMemtable {
fn update_stats(&self, metrics: &WriteMetrics) {
// Only let the tracker tracks value bytes.
self.alloc_tracker.on_allocation(metrics.value_bytes);
self.max_timestamp
.fetch_max(metrics.max_ts, Ordering::SeqCst);
self.min_timestamp
.fetch_min(metrics.min_ts, Ordering::SeqCst);
metrics.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
}
}

View File

@@ -14,6 +14,8 @@
//! Internal metrics of the memtable.
use std::sync::atomic::{AtomicI64, Ordering};
/// Metrics of writing memtables.
pub(crate) struct WriteMetrics {
/// Size allocated by keys.
@@ -26,6 +28,51 @@ pub(crate) struct WriteMetrics {
pub(crate) max_ts: i64,
}
impl WriteMetrics {
/// Update the min/max timestamp range according to current write metric.
pub(crate) fn update_timestamp_range(&self, prev_max_ts: &AtomicI64, prev_min_ts: &AtomicI64) {
loop {
let current_min = prev_min_ts.load(Ordering::Relaxed);
if self.min_ts >= current_min {
break;
}
let Err(updated) = prev_min_ts.compare_exchange(
current_min,
self.min_ts,
Ordering::Relaxed,
Ordering::Relaxed,
) else {
break;
};
if updated == self.min_ts {
break;
}
}
loop {
let current_max = prev_max_ts.load(Ordering::Relaxed);
if self.max_ts <= current_max {
break;
}
let Err(updated) = prev_max_ts.compare_exchange(
current_max,
self.max_ts,
Ordering::Relaxed,
Ordering::Relaxed,
) else {
break;
};
if updated == self.max_ts {
break;
}
}
}
}
impl Default for WriteMetrics {
fn default() -> Self {
Self {

View File

@@ -147,8 +147,7 @@ impl TimeSeriesMemtable {
fn update_stats(&self, stats: WriteMetrics) {
self.alloc_tracker
.on_allocation(stats.key_bytes + stats.value_bytes);
self.max_timestamp.fetch_max(stats.max_ts, Ordering::SeqCst);
self.min_timestamp.fetch_min(stats.min_ts, Ordering::SeqCst);
stats.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
}
fn write_key_value(&self, kv: KeyValue, stats: &mut WriteMetrics) -> Result<()> {

View File

@@ -322,10 +322,13 @@ impl ScanRegion {
let memtables: Vec<_> = memtables
.into_iter()
.filter(|mem| {
// check if memtable is empty by reading stats.
let Some((start, end)) = mem.stats().time_range() else {
if mem.is_empty() {
return false;
};
}
let stats = mem.stats();
// Safety: the memtable is not empty.
let (start, end) = stats.time_range().unwrap();
// The time range of the memtable is inclusive.
let memtable_range = TimestampRange::new_inclusive(Some(start), Some(end));
memtable_range.intersects(&time_range)

View File

@@ -134,7 +134,6 @@ impl WriteFormat {
/// Helper for reading the SST format.
pub struct ReadFormat {
/// The metadata stored in the SST.
metadata: RegionMetadataRef,
/// SST file schema.
arrow_schema: SchemaRef,
@@ -306,23 +305,17 @@ impl ReadFormat {
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column_id: ColumnId,
) -> StatValues {
let Some(column) = self.metadata.column_by_id(column_id) else {
// No such column in the SST.
return StatValues::NoColumn;
};
) -> Option<ArrayRef> {
let column = self.metadata.column_by_id(column_id)?;
match column.semantic_type {
SemanticType::Tag => self.tag_values(row_groups, column, true),
SemanticType::Field => {
// Safety: `field_id_to_index` is initialized by the semantic type.
let index = self.field_id_to_index.get(&column_id).unwrap();
let stats = Self::column_values(row_groups, column, *index, true);
StatValues::from_stats_opt(stats)
let index = self.field_id_to_index.get(&column_id)?;
Self::column_values(row_groups, column, *index, true)
}
SemanticType::Timestamp => {
let index = self.time_index_position();
let stats = Self::column_values(row_groups, column, index, true);
StatValues::from_stats_opt(stats)
Self::column_values(row_groups, column, index, true)
}
}
}
@@ -332,23 +325,17 @@ impl ReadFormat {
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column_id: ColumnId,
) -> StatValues {
let Some(column) = self.metadata.column_by_id(column_id) else {
// No such column in the SST.
return StatValues::NoColumn;
};
) -> Option<ArrayRef> {
let column = self.metadata.column_by_id(column_id)?;
match column.semantic_type {
SemanticType::Tag => self.tag_values(row_groups, column, false),
SemanticType::Field => {
// Safety: `field_id_to_index` is initialized by the semantic type.
let index = self.field_id_to_index.get(&column_id).unwrap();
let stats = Self::column_values(row_groups, column, *index, false);
StatValues::from_stats_opt(stats)
let index = self.field_id_to_index.get(&column_id)?;
Self::column_values(row_groups, column, *index, false)
}
SemanticType::Timestamp => {
let index = self.time_index_position();
let stats = Self::column_values(row_groups, column, index, false);
StatValues::from_stats_opt(stats)
Self::column_values(row_groups, column, index, false)
}
}
}
@@ -358,23 +345,17 @@ impl ReadFormat {
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column_id: ColumnId,
) -> StatValues {
let Some(column) = self.metadata.column_by_id(column_id) else {
// No such column in the SST.
return StatValues::NoColumn;
};
) -> Option<ArrayRef> {
let column = self.metadata.column_by_id(column_id)?;
match column.semantic_type {
SemanticType::Tag => StatValues::NoStats,
SemanticType::Tag => None,
SemanticType::Field => {
// Safety: `field_id_to_index` is initialized by the semantic type.
let index = self.field_id_to_index.get(&column_id).unwrap();
let stats = Self::column_null_counts(row_groups, *index);
StatValues::from_stats_opt(stats)
let index = self.field_id_to_index.get(&column_id)?;
Self::column_null_counts(row_groups, *index)
}
SemanticType::Timestamp => {
let index = self.time_index_position();
let stats = Self::column_null_counts(row_groups, index);
StatValues::from_stats_opt(stats)
Self::column_null_counts(row_groups, index)
}
}
}
@@ -409,7 +390,8 @@ impl ReadFormat {
row_groups: &[impl Borrow<RowGroupMetaData>],
column: &ColumnMetadata,
is_min: bool,
) -> StatValues {
) -> Option<ArrayRef> {
let primary_key_encoding = self.metadata.primary_key_encoding;
let is_first_tag = self
.metadata
.primary_key
@@ -418,28 +400,9 @@ impl ReadFormat {
.unwrap_or(false);
if !is_first_tag {
// Only the min-max of the first tag is available in the primary key.
return StatValues::NoStats;
return None;
}
StatValues::from_stats_opt(self.first_tag_values(row_groups, column, is_min))
}
/// Returns min/max values of the first tag.
/// Returns None if the tag does not have statistics.
fn first_tag_values(
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column: &ColumnMetadata,
is_min: bool,
) -> Option<ArrayRef> {
debug_assert!(self
.metadata
.primary_key
.first()
.map(|id| *id == column.column_id)
.unwrap_or(false));
let primary_key_encoding = self.metadata.primary_key_encoding;
let converter = build_primary_key_codec_with_fields(
primary_key_encoding,
[(
@@ -489,7 +452,6 @@ impl ReadFormat {
}
/// Returns min/max values of specific non-tag columns.
/// Returns None if the column does not have statistics.
fn column_values(
row_groups: &[impl Borrow<RowGroupMetaData>],
column: &ColumnMetadata,
@@ -582,29 +544,6 @@ impl ReadFormat {
}
}
/// Values of column statistics of the SST.
///
/// It also distinguishes the case that a column is not found and
/// the column exists but has no statistics.
pub enum StatValues {
/// Values of each row group.
Values(ArrayRef),
/// No such column.
NoColumn,
/// Column exists but has no statistics.
NoStats,
}
impl StatValues {
/// Creates a new `StatValues` instance from optional statistics.
pub fn from_stats_opt(stats: Option<ArrayRef>) -> Self {
match stats {
Some(stats) => StatValues::Values(stats),
None => StatValues::NoStats,
}
}
}
#[cfg(test)]
impl ReadFormat {
/// Creates a helper with existing `metadata` and all columns.

View File

@@ -25,7 +25,7 @@ use parquet::file::metadata::RowGroupMetaData;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::ColumnId;
use crate::sst::parquet::format::{ReadFormat, StatValues};
use crate::sst::parquet::format::ReadFormat;
/// Statistics for pruning row groups.
pub(crate) struct RowGroupPruningStats<'a, T> {
@@ -100,18 +100,16 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
fn min_values(&self, column: &Column) -> Option<ArrayRef> {
let column_id = self.column_id_to_prune(&column.name)?;
match self.read_format.min_values(self.row_groups, column_id) {
StatValues::Values(values) => Some(values),
StatValues::NoColumn => self.compat_default_value(&column.name),
StatValues::NoStats => None,
Some(values) => Some(values),
None => self.compat_default_value(&column.name),
}
}
fn max_values(&self, column: &Column) -> Option<ArrayRef> {
let column_id = self.column_id_to_prune(&column.name)?;
match self.read_format.max_values(self.row_groups, column_id) {
StatValues::Values(values) => Some(values),
StatValues::NoColumn => self.compat_default_value(&column.name),
StatValues::NoStats => None,
Some(values) => Some(values),
None => self.compat_default_value(&column.name),
}
}
@@ -120,12 +118,10 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
}
fn null_counts(&self, column: &Column) -> Option<ArrayRef> {
let column_id = self.column_id_to_prune(&column.name)?;
match self.read_format.null_counts(self.row_groups, column_id) {
StatValues::Values(values) => Some(values),
StatValues::NoColumn => self.compat_null_count(&column.name),
StatValues::NoStats => None,
}
let Some(column_id) = self.column_id_to_prune(&column.name) else {
return self.compat_null_count(&column.name);
};
self.read_format.null_counts(self.row_groups, column_id)
}
fn row_counts(&self, _column: &Column) -> Option<ArrayRef> {

View File

@@ -141,6 +141,11 @@ impl<S: LogStore> RegionWorkerLoop<S> {
// But the flush is skipped if memtables are empty. Thus should update the `topic_latest_entry_id`
// when handling flush request instead of in `schedule_flush` or `flush_finished`.
self.update_topic_latest_entry_id(&region);
info!(
"Region {} flush request, high watermark: {}",
region_id,
region.topic_latest_entry_id.load(Ordering::Relaxed)
);
let reason = if region.is_downgrading() {
FlushReason::Downgrading
@@ -263,17 +268,15 @@ impl<S: LogStore> RegionWorkerLoop<S> {
.store()
.high_watermark(&region.provider)
.unwrap_or(0);
let topic_last_entry_id = region.topic_latest_entry_id.load(Ordering::Relaxed);
if high_watermark != 0 && high_watermark > topic_last_entry_id {
if high_watermark != 0 {
region
.topic_latest_entry_id
.store(high_watermark, Ordering::Relaxed);
info!(
"Region {} high watermark updated to {}",
region.region_id, high_watermark
);
}
info!(
"Region {} high watermark updated to {}",
region.region_id, high_watermark
);
}
}
}

View File

@@ -37,7 +37,7 @@ use common_meta::rpc::ddl::{
};
use common_meta::rpc::router::{Partition, Partition as MetaPartition};
use common_query::Output;
use common_telemetry::{debug, info, tracing, warn};
use common_telemetry::{debug, info, tracing};
use common_time::Timezone;
use datafusion_common::tree_node::TreeNodeVisitor;
use datafusion_expr::LogicalPlan;
@@ -369,7 +369,7 @@ impl StatementExecutor {
query_context: QueryContextRef,
) -> Result<SubmitDdlTaskResponse> {
let flow_type = self
.determine_flow_type(&expr, query_context.clone())
.determine_flow_type(&expr.sql, query_context.clone())
.await?;
info!("determined flow={} type: {:#?}", expr.flow_name, flow_type);
@@ -398,49 +398,9 @@ impl StatementExecutor {
/// Determine the flow type based on the SQL query
///
/// If it contains aggregation or distinct, then it is a batch flow, otherwise it is a streaming flow
async fn determine_flow_type(
&self,
expr: &CreateFlowExpr,
query_ctx: QueryContextRef,
) -> Result<FlowType> {
// first check if source table's ttl is instant, if it is, force streaming mode
for src_table_name in &expr.source_table_names {
let table = self
.catalog_manager()
.table(
&src_table_name.catalog_name,
&src_table_name.schema_name,
&src_table_name.table_name,
Some(&query_ctx),
)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.with_context(|| TableNotFoundSnafu {
table_name: format_full_table_name(
&src_table_name.catalog_name,
&src_table_name.schema_name,
&src_table_name.table_name,
),
})?;
// instant source table can only be handled by streaming mode
if table.table_info().meta.options.ttl == Some(common_time::TimeToLive::Instant) {
warn!(
"Source table `{}` for flow `{}`'s ttl=instant, fallback to streaming mode",
format_full_table_name(
&src_table_name.catalog_name,
&src_table_name.schema_name,
&src_table_name.table_name
),
expr.flow_name
);
return Ok(FlowType::Streaming);
}
}
async fn determine_flow_type(&self, sql: &str, query_ctx: QueryContextRef) -> Result<FlowType> {
let engine = &self.query_engine;
let stmt = QueryLanguageParser::parse_sql(&expr.sql, &query_ctx)
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let plan = engine

View File

@@ -29,7 +29,7 @@ pub use etl::{
DispatchedTo, Pipeline, PipelineExecOutput, PipelineMap,
};
pub use manager::{
pipeline_operator, table, util, IdentityTimeIndex, PipelineContext, PipelineDefinition,
PipelineInfo, PipelineRef, PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo,
pipeline_operator, table, util, IdentityTimeIndex, PipelineDefinition, PipelineInfo,
PipelineRef, PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo,
GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, GREPTIME_INTERNAL_TRACE_PIPELINE_V1_NAME,
};

View File

@@ -26,7 +26,7 @@ use util::to_pipeline_version;
use crate::error::{CastTypeSnafu, InvalidCustomTimeIndexSnafu, PipelineMissingSnafu, Result};
use crate::etl::value::time::{MS_RESOLUTION, NS_RESOLUTION, S_RESOLUTION, US_RESOLUTION};
use crate::table::PipelineTable;
use crate::{GreptimePipelineParams, Pipeline, Value};
use crate::{Pipeline, Value};
pub mod pipeline_operator;
pub mod table;
@@ -104,22 +104,6 @@ impl PipelineDefinition {
}
}
pub struct PipelineContext<'a> {
pub pipeline_definition: &'a PipelineDefinition,
pub pipeline_param: &'a GreptimePipelineParams,
}
impl<'a> PipelineContext<'a> {
pub fn new(
pipeline_definition: &'a PipelineDefinition,
pipeline_param: &'a GreptimePipelineParams,
) -> Self {
Self {
pipeline_definition,
pipeline_param,
}
}
}
pub enum PipelineWay {
OtlpLogDirect(Box<SelectInfo>),
Pipeline(PipelineDefinition),

View File

@@ -44,13 +44,13 @@ pub use quantile_aggr::quantile_udaf;
pub use resets::Resets;
pub use round::Round;
/// Extracts an array from a `ColumnarValue`.
///
/// If the `ColumnarValue` is a scalar, it converts it to an array of size 1.
pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef, DataFusionError> {
match columnar_value {
ColumnarValue::Array(array) => Ok(array.clone()),
ColumnarValue::Scalar(scalar) => Ok(scalar.to_array_of_size(1)?),
if let ColumnarValue::Array(array) = columnar_value {
Ok(array.clone())
} else {
Err(DataFusionError::Execution(
"expect array as input, found scalar value".to_string(),
))
}
}

View File

@@ -231,7 +231,6 @@ mod test {
AvgOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(49.9999995),
Some(45.8618844),
@@ -254,7 +253,6 @@ mod test {
MinOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(12.345678),
Some(12.345678),
@@ -277,7 +275,6 @@ mod test {
MaxOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(87.654321),
Some(87.654321),
@@ -300,7 +297,6 @@ mod test {
SumOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(99.999999),
Some(229.309422),
@@ -323,7 +319,6 @@ mod test {
CountOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(2.0),
Some(5.0),
@@ -346,7 +341,6 @@ mod test {
LastOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(87.654321),
Some(70.710678),
@@ -369,7 +363,6 @@ mod test {
AbsentOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
None,
None,
@@ -392,7 +385,6 @@ mod test {
PresentOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(1.0),
Some(1.0),
@@ -415,7 +407,6 @@ mod test {
StdvarOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(1417.8479276253622),
Some(808.999919713209),
@@ -451,7 +442,6 @@ mod test {
StdvarOverTime::scalar_udf(),
RangeArray::from_ranges(ts_array, ranges).unwrap(),
RangeArray::from_ranges(values_array, ranges).unwrap(),
vec![],
vec![Some(0.0), Some(10.559999999999999)],
);
}
@@ -463,7 +453,6 @@ mod test {
StddevOverTime::scalar_udf(),
ts_array,
value_array,
vec![],
vec![
Some(37.6543215),
Some(28.442923895289123),
@@ -499,7 +488,6 @@ mod test {
StddevOverTime::scalar_udf(),
RangeArray::from_ranges(ts_array, ranges).unwrap(),
RangeArray::from_ranges(values_array, ranges).unwrap(),
vec![],
vec![Some(0.0), Some(3.249615361854384)],
);
}

View File

@@ -90,7 +90,6 @@ mod test {
Changes::scalar_udf(),
ts_array_1,
value_array_1,
vec![],
vec![Some(0.0), Some(3.0), Some(5.0), Some(8.0), None],
);
@@ -102,7 +101,6 @@ mod test {
Changes::scalar_udf(),
ts_array_2,
value_array_2,
vec![],
vec![Some(0.0), Some(3.0), Some(5.0), Some(9.0), None],
);
@@ -113,7 +111,6 @@ mod test {
Changes::scalar_udf(),
ts_array_3,
value_array_3,
vec![],
vec![Some(0.0), Some(0.0), Some(1.0), Some(1.0), None],
);
}

View File

@@ -74,7 +74,6 @@ mod test {
Deriv::scalar_udf(),
ts_array,
value_array,
vec![],
vec![Some(10.606060606060607), None],
);
}
@@ -100,7 +99,6 @@ mod test {
Deriv::scalar_udf(),
ts_range_array,
value_range_array,
vec![],
vec![Some(0.0)],
);
}

View File

@@ -34,11 +34,11 @@ use std::sync::Arc;
use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::{DataFusionError, Result as DfResult};
use datafusion::common::DataFusionError;
use datafusion::logical_expr::{ScalarUDF, Volatility};
use datafusion::physical_plan::ColumnarValue;
use datafusion_expr::create_udf;
use datatypes::arrow::array::{Array, Int64Array};
use datatypes::arrow::array::Array;
use datatypes::arrow::datatypes::DataType;
use crate::extension_plan::Millisecond;
@@ -53,7 +53,7 @@ pub type Increase = ExtrapolatedRate<true, false>;
/// from <https://github.com/prometheus/prometheus/blob/v0.40.1/promql/functions.go#L66>
#[derive(Debug)]
pub struct ExtrapolatedRate<const IS_COUNTER: bool, const IS_RATE: bool> {
/// Range length in milliseconds.
/// Range duration in millisecond
range_length: i64,
}
@@ -63,7 +63,7 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
Self { range_length }
}
fn scalar_udf_with_name(name: &str) -> ScalarUDF {
fn scalar_udf_with_name(name: &str, range_length: i64) -> ScalarUDF {
let input_types = vec![
// timestamp range vector
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
@@ -71,8 +71,6 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
RangeArray::convert_data_type(DataType::Float64),
// timestamp vector
DataType::Timestamp(TimeUnit::Millisecond, None),
// range length
DataType::Int64,
];
create_udf(
@@ -80,34 +78,12 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
input_types,
DataType::Float64,
Volatility::Volatile,
Arc::new(move |input: &_| Self::create_function(input)?.calc(input)) as _,
Arc::new(move |input: &_| Self::new(range_length).calc(input)) as _,
)
}
fn create_function(inputs: &[ColumnarValue]) -> DfResult<Self> {
if inputs.len() != 4 {
return Err(DataFusionError::Plan(
"ExtrapolatedRate function should have 4 inputs".to_string(),
));
}
let range_length_array = extract_array(&inputs[3])?;
let range_length = range_length_array
.as_any()
.downcast_ref::<Int64Array>()
.unwrap()
.value(0) as i64;
Ok(Self::new(range_length))
}
/// Input parameters:
/// * 0: timestamp range vector
/// * 1: value range vector
/// * 2: timestamp vector
/// * 3: range length. Range duration in millisecond. Not used here
fn calc(&self, input: &[ColumnarValue]) -> DfResult<ColumnarValue> {
assert_eq!(input.len(), 4);
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
assert_eq!(input.len(), 3);
// construct matrix from input
let ts_array = extract_array(&input[0])?;
@@ -232,8 +208,8 @@ impl ExtrapolatedRate<false, false> {
"prom_delta"
}
pub fn scalar_udf() -> ScalarUDF {
Self::scalar_udf_with_name(Self::name())
pub fn scalar_udf(range_length: i64) -> ScalarUDF {
Self::scalar_udf_with_name(Self::name(), range_length)
}
}
@@ -243,8 +219,8 @@ impl ExtrapolatedRate<true, true> {
"prom_rate"
}
pub fn scalar_udf() -> ScalarUDF {
Self::scalar_udf_with_name(Self::name())
pub fn scalar_udf(range_length: i64) -> ScalarUDF {
Self::scalar_udf_with_name(Self::name(), range_length)
}
}
@@ -254,8 +230,8 @@ impl ExtrapolatedRate<true, false> {
"prom_increase"
}
pub fn scalar_udf() -> ScalarUDF {
Self::scalar_udf_with_name(Self::name())
pub fn scalar_udf(range_length: i64) -> ScalarUDF {
Self::scalar_udf_with_name(Self::name(), range_length)
}
}
@@ -295,7 +271,6 @@ mod test {
ColumnarValue::Array(Arc::new(ts_range.into_dict())),
ColumnarValue::Array(Arc::new(value_range.into_dict())),
ColumnarValue::Array(timestamps),
ColumnarValue::Array(Arc::new(Int64Array::from(vec![5]))),
];
let output = extract_array(
&ExtrapolatedRate::<IS_COUNTER, IS_RATE>::new(5)

View File

@@ -22,7 +22,6 @@ use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
use datafusion::logical_expr::{ScalarUDF, Volatility};
use datafusion::physical_plan::ColumnarValue;
use datafusion_common::ScalarValue;
use datafusion_expr::create_udf;
use datatypes::arrow::array::Array;
use datatypes::arrow::datatypes::DataType;
@@ -63,10 +62,6 @@ impl HoltWinters {
vec![
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
RangeArray::convert_data_type(DataType::Float64),
// sf
DataType::Float64,
// tf
DataType::Float64,
]
}
@@ -74,39 +69,20 @@ impl HoltWinters {
DataType::Float64
}
pub fn scalar_udf() -> ScalarUDF {
pub fn scalar_udf(level: f64, trend: f64) -> ScalarUDF {
create_udf(
Self::name(),
Self::input_type(),
Self::return_type(),
Volatility::Volatile,
Arc::new(move |input: &_| Self::create_function(input)?.calc(input)) as _,
Arc::new(move |input: &_| Self::new(level, trend).calc(input)) as _,
)
}
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
if inputs.len() != 4 {
return Err(DataFusionError::Plan(
"HoltWinters function should have 4 inputs".to_string(),
));
}
let ColumnarValue::Scalar(ScalarValue::Float64(Some(sf))) = inputs[2] else {
return Err(DataFusionError::Plan(
"HoltWinters function's third input should be a scalar float64".to_string(),
));
};
let ColumnarValue::Scalar(ScalarValue::Float64(Some(tf))) = inputs[3] else {
return Err(DataFusionError::Plan(
"HoltWinters function's fourth input should be a scalar float64".to_string(),
));
};
Ok(Self::new(sf, tf))
}
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
// construct matrix from input.
// The third one is level param, the fourth - trend param which are included in fields.
assert_eq!(input.len(), 4);
assert_eq!(input.len(), 2);
let ts_array = extract_array(&input[0])?;
let value_array = extract_array(&input[1])?;
@@ -288,13 +264,9 @@ mod tests {
let ts_range_array = RangeArray::from_ranges(ts_array, ranges).unwrap();
let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
simple_range_udf_runner(
HoltWinters::scalar_udf(),
HoltWinters::scalar_udf(0.5, 0.1),
ts_range_array,
value_range_array,
vec![
ScalarValue::Float64(Some(0.5)),
ScalarValue::Float64(Some(0.1)),
],
vec![Some(5.0)],
);
}
@@ -315,13 +287,9 @@ mod tests {
let ts_range_array = RangeArray::from_ranges(ts_array, ranges).unwrap();
let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
simple_range_udf_runner(
HoltWinters::scalar_udf(),
HoltWinters::scalar_udf(0.5, 0.1),
ts_range_array,
value_range_array,
vec![
ScalarValue::Float64(Some(0.5)),
ScalarValue::Float64(Some(0.1)),
],
vec![Some(38.18119566835938)],
);
}
@@ -347,13 +315,9 @@ mod tests {
let (ts_range_array, value_range_array) =
create_ts_and_value_range_arrays(query, ranges.clone());
simple_range_udf_runner(
HoltWinters::scalar_udf(),
HoltWinters::scalar_udf(0.01, 0.1),
ts_range_array,
value_range_array,
vec![
ScalarValue::Float64(Some(0.01)),
ScalarValue::Float64(Some(0.1)),
],
vec![Some(expected)],
);
}

View File

@@ -190,7 +190,6 @@ mod test {
IDelta::<false>::scalar_udf(),
ts_range_array,
value_range_array,
vec![],
vec![Some(1.0), Some(-5.0), None, Some(6.0), None, None],
);
@@ -201,7 +200,6 @@ mod test {
IDelta::<true>::scalar_udf(),
ts_range_array,
value_range_array,
vec![],
// the second point represent counter reset
vec![Some(0.5), Some(0.0), None, Some(3.0), None, None],
);

View File

@@ -22,7 +22,6 @@ use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
use datafusion::logical_expr::{ScalarUDF, Volatility};
use datafusion::physical_plan::ColumnarValue;
use datafusion_common::ScalarValue;
use datafusion_expr::create_udf;
use datatypes::arrow::array::Array;
use datatypes::arrow::datatypes::DataType;
@@ -45,41 +44,25 @@ impl PredictLinear {
"prom_predict_linear"
}
pub fn scalar_udf() -> ScalarUDF {
pub fn scalar_udf(t: i64) -> ScalarUDF {
let input_types = vec![
// time index column
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
// value column
RangeArray::convert_data_type(DataType::Float64),
// t
DataType::Int64,
];
create_udf(
Self::name(),
input_types,
DataType::Float64,
Volatility::Volatile,
Arc::new(move |input: &_| Self::create_function(input)?.predict_linear(input)) as _,
Arc::new(move |input: &_| Self::new(t).predict_linear(input)) as _,
)
}
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
if inputs.len() != 3 {
return Err(DataFusionError::Plan(
"PredictLinear function should have 3 inputs".to_string(),
));
}
let ColumnarValue::Scalar(ScalarValue::Int64(Some(t))) = inputs[2] else {
return Err(DataFusionError::Plan(
"PredictLinear function's third input should be a scalar int64".to_string(),
));
};
Ok(Self::new(t))
}
fn predict_linear(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
// construct matrix from input.
assert_eq!(input.len(), 3);
assert_eq!(input.len(), 2);
let ts_array = extract_array(&input[0])?;
let value_array = extract_array(&input[1])?;
@@ -207,10 +190,9 @@ mod test {
let ts_array = RangeArray::from_ranges(ts_array, ranges).unwrap();
let value_array = RangeArray::from_ranges(values_array, ranges).unwrap();
simple_range_udf_runner(
PredictLinear::scalar_udf(),
PredictLinear::scalar_udf(0),
ts_array,
value_array,
vec![ScalarValue::Int64(Some(0))],
vec![None, None],
);
}
@@ -219,10 +201,9 @@ mod test {
fn calculate_predict_linear_test1() {
let (ts_array, value_array) = build_test_range_arrays();
simple_range_udf_runner(
PredictLinear::scalar_udf(),
PredictLinear::scalar_udf(0),
ts_array,
value_array,
vec![ScalarValue::Int64(Some(0))],
// value at t = 0
vec![Some(38.63636363636364)],
);
@@ -232,10 +213,9 @@ mod test {
fn calculate_predict_linear_test2() {
let (ts_array, value_array) = build_test_range_arrays();
simple_range_udf_runner(
PredictLinear::scalar_udf(),
PredictLinear::scalar_udf(3000),
ts_array,
value_array,
vec![ScalarValue::Int64(Some(3000))],
// value at t = 3000
vec![Some(31856.818181818187)],
);
@@ -245,10 +225,9 @@ mod test {
fn calculate_predict_linear_test3() {
let (ts_array, value_array) = build_test_range_arrays();
simple_range_udf_runner(
PredictLinear::scalar_udf(),
PredictLinear::scalar_udf(4200),
ts_array,
value_array,
vec![ScalarValue::Int64(Some(4200))],
// value at t = 4200
vec![Some(44584.09090909091)],
);
@@ -258,10 +237,9 @@ mod test {
fn calculate_predict_linear_test4() {
let (ts_array, value_array) = build_test_range_arrays();
simple_range_udf_runner(
PredictLinear::scalar_udf(),
PredictLinear::scalar_udf(6600),
ts_array,
value_array,
vec![ScalarValue::Int64(Some(6600))],
// value at t = 6600
vec![Some(70038.63636363638)],
);
@@ -271,10 +249,9 @@ mod test {
fn calculate_predict_linear_test5() {
let (ts_array, value_array) = build_test_range_arrays();
simple_range_udf_runner(
PredictLinear::scalar_udf(),
PredictLinear::scalar_udf(7800),
ts_array,
value_array,
vec![ScalarValue::Int64(Some(7800))],
// value at t = 7800
vec![Some(82765.9090909091)],
);

View File

@@ -19,7 +19,6 @@ use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
use datafusion::logical_expr::{ScalarUDF, Volatility};
use datafusion::physical_plan::ColumnarValue;
use datafusion_common::ScalarValue;
use datafusion_expr::create_udf;
use datatypes::arrow::array::Array;
use datatypes::arrow::datatypes::DataType;
@@ -41,38 +40,22 @@ impl QuantileOverTime {
"prom_quantile_over_time"
}
pub fn scalar_udf() -> ScalarUDF {
pub fn scalar_udf(quantile: f64) -> ScalarUDF {
let input_types = vec![
// time index column
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
// value column
RangeArray::convert_data_type(DataType::Float64),
// quantile
DataType::Float64,
];
create_udf(
Self::name(),
input_types,
DataType::Float64,
Volatility::Volatile,
Arc::new(move |input: &_| Self::create_function(input)?.quantile_over_time(input)) as _,
Arc::new(move |input: &_| Self::new(quantile).quantile_over_time(input)) as _,
)
}
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
if inputs.len() != 3 {
return Err(DataFusionError::Plan(
"QuantileOverTime function should have 3 inputs".to_string(),
));
}
let ColumnarValue::Scalar(ScalarValue::Float64(Some(quantile))) = inputs[2] else {
return Err(DataFusionError::Plan(
"QuantileOverTime function's third input should be a scalar float64".to_string(),
));
};
Ok(Self::new(quantile))
}
fn quantile_over_time(
&self,
input: &[ColumnarValue],

View File

@@ -16,12 +16,10 @@ use std::sync::Arc;
use datafusion::arrow::array::{ArrayRef, AsArray};
use datafusion::common::cast::{as_list_array, as_primitive_array, as_struct_array};
use datafusion::error::{DataFusionError, Result as DfResult};
use datafusion::error::Result as DfResult;
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF, Volatility};
use datafusion::physical_plan::expressions::Literal;
use datafusion::prelude::create_udaf;
use datafusion_common::ScalarValue;
use datafusion_expr::function::AccumulatorArgs;
use datatypes::arrow::array::{ListArray, StructArray};
use datatypes::arrow::datatypes::{DataType, Field, Float64Type};
@@ -40,16 +38,16 @@ pub struct QuantileAccumulator {
/// Create a quantile `AggregateUDF` for PromQL quantile operator,
/// which calculates φ-quantile (0 ≤ φ ≤ 1) over dimensions
pub fn quantile_udaf() -> Arc<AggregateUDF> {
pub fn quantile_udaf(q: f64) -> Arc<AggregateUDF> {
Arc::new(create_udaf(
QUANTILE_NAME,
// Input type: (φ, values)
vec![DataType::Float64, DataType::Float64],
// Input type: (values)
vec![DataType::Float64],
// Output type: the φ-quantile
Arc::new(DataType::Float64),
Volatility::Volatile,
// Create the accumulator
Arc::new(QuantileAccumulator::from_args),
Arc::new(move |_| Ok(Box::new(QuantileAccumulator::new(q)))),
// Intermediate state types
Arc::new(vec![DataType::Struct(
vec![Field::new(
@@ -67,40 +65,17 @@ pub fn quantile_udaf() -> Arc<AggregateUDF> {
}
impl QuantileAccumulator {
fn new(q: f64) -> Self {
pub fn new(q: f64) -> Self {
Self {
q,
..Default::default()
}
}
pub fn from_args(args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
if args.exprs.len() != 2 {
return Err(DataFusionError::Plan(
"Quantile function should have 2 inputs".to_string(),
));
}
let q = match &args.exprs[0]
.as_any()
.downcast_ref::<Literal>()
.map(|lit| lit.value())
{
Some(ScalarValue::Float64(Some(q))) => *q,
_ => {
return Err(DataFusionError::Internal(
"Invalid quantile value".to_string(),
))
}
};
Ok(Box::new(Self::new(q)))
}
}
impl DfAccumulator for QuantileAccumulator {
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
let f64_array = values[1].as_primitive::<Float64Type>();
let f64_array = values[0].as_primitive::<Float64Type>();
self.values.extend(f64_array);
@@ -187,10 +162,9 @@ mod tests {
#[test]
fn test_quantile_accumulator_single_value() {
let mut accumulator = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input = create_f64_array(vec![Some(10.0)]);
accumulator.update_batch(&[q, input]).unwrap();
accumulator.update_batch(&[input]).unwrap();
let result = accumulator.evaluate().unwrap();
assert_eq!(result, ScalarValue::Float64(Some(10.0)));
@@ -199,10 +173,9 @@ mod tests {
#[test]
fn test_quantile_accumulator_multiple_values() {
let mut accumulator = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input = create_f64_array(vec![Some(1.0), Some(2.0), Some(3.0), Some(4.0), Some(5.0)]);
accumulator.update_batch(&[q, input]).unwrap();
accumulator.update_batch(&[input]).unwrap();
let result = accumulator.evaluate().unwrap();
assert_eq!(result, ScalarValue::Float64(Some(3.0)));
@@ -211,10 +184,9 @@ mod tests {
#[test]
fn test_quantile_accumulator_with_nulls() {
let mut accumulator = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input = create_f64_array(vec![Some(1.0), None, Some(3.0), Some(4.0), Some(5.0)]);
accumulator.update_batch(&[q, input]).unwrap();
accumulator.update_batch(&[input]).unwrap();
let result = accumulator.evaluate().unwrap();
assert_eq!(result, ScalarValue::Float64(Some(3.0)));
@@ -223,12 +195,11 @@ mod tests {
#[test]
fn test_quantile_accumulator_multiple_batches() {
let mut accumulator = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input1 = create_f64_array(vec![Some(1.0), Some(2.0)]);
let input2 = create_f64_array(vec![Some(3.0), Some(4.0), Some(5.0)]);
accumulator.update_batch(&[q.clone(), input1]).unwrap();
accumulator.update_batch(&[q, input2]).unwrap();
accumulator.update_batch(&[input1]).unwrap();
accumulator.update_batch(&[input2]).unwrap();
let result = accumulator.evaluate().unwrap();
assert_eq!(result, ScalarValue::Float64(Some(3.0)));
@@ -237,33 +208,29 @@ mod tests {
#[test]
fn test_quantile_accumulator_different_quantiles() {
let mut min_accumulator = QuantileAccumulator::new(0.0);
let q = create_f64_array(vec![Some(0.0)]);
let input = create_f64_array(vec![Some(1.0), Some(2.0), Some(3.0), Some(4.0), Some(5.0)]);
min_accumulator.update_batch(&[q, input.clone()]).unwrap();
min_accumulator.update_batch(&[input.clone()]).unwrap();
assert_eq!(
min_accumulator.evaluate().unwrap(),
ScalarValue::Float64(Some(1.0))
);
let mut q1_accumulator = QuantileAccumulator::new(0.25);
let q = create_f64_array(vec![Some(0.25)]);
q1_accumulator.update_batch(&[q, input.clone()]).unwrap();
q1_accumulator.update_batch(&[input.clone()]).unwrap();
assert_eq!(
q1_accumulator.evaluate().unwrap(),
ScalarValue::Float64(Some(2.0))
);
let mut q3_accumulator = QuantileAccumulator::new(0.75);
let q = create_f64_array(vec![Some(0.75)]);
q3_accumulator.update_batch(&[q, input.clone()]).unwrap();
q3_accumulator.update_batch(&[input.clone()]).unwrap();
assert_eq!(
q3_accumulator.evaluate().unwrap(),
ScalarValue::Float64(Some(4.0))
);
let mut max_accumulator = QuantileAccumulator::new(1.0);
let q = create_f64_array(vec![Some(1.0)]);
max_accumulator.update_batch(&[q, input]).unwrap();
max_accumulator.update_batch(&[input]).unwrap();
assert_eq!(
max_accumulator.evaluate().unwrap(),
ScalarValue::Float64(Some(5.0))
@@ -273,11 +240,10 @@ mod tests {
#[test]
fn test_quantile_accumulator_size() {
let mut accumulator = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input = create_f64_array(vec![Some(1.0), Some(2.0), Some(3.0)]);
let initial_size = accumulator.size();
accumulator.update_batch(&[q, input]).unwrap();
accumulator.update_batch(&[input]).unwrap();
let after_update_size = accumulator.size();
assert!(after_update_size >= initial_size);
@@ -286,16 +252,14 @@ mod tests {
#[test]
fn test_quantile_accumulator_state_and_merge() -> DfResult<()> {
let mut acc1 = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input1 = create_f64_array(vec![Some(1.0), Some(2.0)]);
acc1.update_batch(&[q, input1])?;
acc1.update_batch(&[input1])?;
let state1 = acc1.state()?;
let mut acc2 = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input2 = create_f64_array(vec![Some(3.0), Some(4.0), Some(5.0)]);
acc2.update_batch(&[q, input2])?;
acc2.update_batch(&[input2])?;
let mut struct_builders = vec![];
for scalar in &state1 {
@@ -316,16 +280,16 @@ mod tests {
#[test]
fn test_quantile_accumulator_with_extreme_values() {
let mut accumulator = QuantileAccumulator::new(0.5);
let q = create_f64_array(vec![Some(0.5)]);
let input = create_f64_array(vec![Some(f64::MAX), Some(f64::MIN), Some(0.0)]);
accumulator.update_batch(&[q, input]).unwrap();
accumulator.update_batch(&[input]).unwrap();
let _result = accumulator.evaluate().unwrap();
}
#[test]
fn test_quantile_udaf_creation() {
let udaf = quantile_udaf();
let q = 0.5;
let udaf = quantile_udaf(q);
assert_eq!(udaf.name(), QUANTILE_NAME);
assert_eq!(udaf.return_type(&[]).unwrap(), DataType::Float64);

View File

@@ -90,7 +90,6 @@ mod test {
Resets::scalar_udf(),
ts_array_1,
value_array_1,
vec![],
vec![Some(0.0), Some(1.0), Some(2.0), Some(3.0), None],
);
@@ -102,7 +101,6 @@ mod test {
Resets::scalar_udf(),
ts_array_2,
value_array_2,
vec![],
vec![Some(0.0), Some(0.0), Some(1.0), Some(1.0), None],
);
@@ -113,7 +111,6 @@ mod test {
Resets::scalar_udf(),
ts_array_3,
value_array_3,
vec![],
vec![Some(0.0), Some(0.0), Some(0.0), Some(0.0), None],
);
}

View File

@@ -15,7 +15,6 @@
use std::sync::Arc;
use datafusion::error::DataFusionError;
use datafusion_common::ScalarValue;
use datafusion_expr::{create_udf, ColumnarValue, ScalarUDF, Volatility};
use datatypes::arrow::array::AsArray;
use datatypes::arrow::datatypes::{DataType, Float64Type};
@@ -37,39 +36,25 @@ impl Round {
}
fn input_type() -> Vec<DataType> {
vec![DataType::Float64, DataType::Float64]
vec![DataType::Float64]
}
pub fn return_type() -> DataType {
DataType::Float64
}
pub fn scalar_udf() -> ScalarUDF {
pub fn scalar_udf(nearest: f64) -> ScalarUDF {
create_udf(
Self::name(),
Self::input_type(),
Self::return_type(),
Volatility::Volatile,
Arc::new(move |input: &_| Self::create_function(input)?.calc(input)) as _,
Arc::new(move |input: &_| Self::new(nearest).calc(input)) as _,
)
}
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
if inputs.len() != 2 {
return Err(DataFusionError::Plan(
"Round function should have 2 inputs".to_string(),
));
}
let ColumnarValue::Scalar(ScalarValue::Float64(Some(nearest))) = inputs[1] else {
return Err(DataFusionError::Plan(
"Round function's second input should be a scalar float64".to_string(),
));
};
Ok(Self::new(nearest))
}
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
assert_eq!(input.len(), 2);
assert_eq!(input.len(), 1);
let value_array = extract_array(&input[0])?;
@@ -95,11 +80,8 @@ mod tests {
use super::*;
fn test_round_f64(value: Vec<f64>, nearest: f64, expected: Vec<f64>) {
let round_udf = Round::scalar_udf();
let input = vec![
ColumnarValue::Array(Arc::new(Float64Array::from(value))),
ColumnarValue::Scalar(ScalarValue::Float64(Some(nearest))),
];
let round_udf = Round::scalar_udf(nearest);
let input = vec![ColumnarValue::Array(Arc::new(Float64Array::from(value)))];
let args = ScalarFunctionArgs {
args: input,
number_rows: 1,

View File

@@ -17,7 +17,6 @@ use std::sync::Arc;
use datafusion::arrow::array::Float64Array;
use datafusion::logical_expr::ScalarUDF;
use datafusion::physical_plan::ColumnarValue;
use datafusion_common::ScalarValue;
use datafusion_expr::ScalarFunctionArgs;
use datatypes::arrow::datatypes::DataType;
@@ -29,17 +28,13 @@ pub fn simple_range_udf_runner(
range_fn: ScalarUDF,
input_ts: RangeArray,
input_value: RangeArray,
other_args: Vec<ScalarValue>,
expected: Vec<Option<f64>>,
) {
let num_rows = input_ts.len();
let input = [
let input = vec![
ColumnarValue::Array(Arc::new(input_ts.into_dict())),
ColumnarValue::Array(Arc::new(input_value.into_dict())),
]
.into_iter()
.chain(other_args.into_iter().map(ColumnarValue::Scalar))
.collect::<Vec<_>>();
];
let args = ScalarFunctionArgs {
args: input,
number_rows: num_rows,

View File

@@ -28,7 +28,7 @@ pub mod error;
pub mod executor;
pub mod log_query;
pub mod metrics;
pub mod optimizer;
mod optimizer;
pub mod options;
pub mod parser;
mod part_sort;

View File

@@ -31,7 +31,7 @@ use datafusion::functions_aggregate::stddev::stddev_pop_udaf;
use datafusion::functions_aggregate::sum::sum_udaf;
use datafusion::functions_aggregate::variance::var_pop_udaf;
use datafusion::functions_window::row_number::RowNumber;
use datafusion::logical_expr::expr::{Alias, ScalarFunction, WindowFunction};
use datafusion::logical_expr::expr::{AggregateFunction, Alias, ScalarFunction, WindowFunction};
use datafusion::logical_expr::expr_rewriter::normalize_cols;
use datafusion::logical_expr::{
BinaryExpr, Cast, Extension, LogicalPlan, LogicalPlanBuilder, Operator,
@@ -1425,18 +1425,15 @@ impl PromPlanner {
let field_column_pos = 0;
let mut exprs = Vec::with_capacity(self.ctx.field_columns.len());
let scalar_func = match func.name {
"increase" => ScalarFunc::ExtrapolateUdf(
Arc::new(Increase::scalar_udf()),
"increase" => ScalarFunc::ExtrapolateUdf(Arc::new(Increase::scalar_udf(
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
),
"rate" => ScalarFunc::ExtrapolateUdf(
Arc::new(Rate::scalar_udf()),
))),
"rate" => ScalarFunc::ExtrapolateUdf(Arc::new(Rate::scalar_udf(
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
),
"delta" => ScalarFunc::ExtrapolateUdf(
Arc::new(Delta::scalar_udf()),
))),
"delta" => ScalarFunc::ExtrapolateUdf(Arc::new(Delta::scalar_udf(
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
),
))),
"idelta" => ScalarFunc::Udf(Arc::new(IDelta::<false>::scalar_udf())),
"irate" => ScalarFunc::Udf(Arc::new(IDelta::<true>::scalar_udf())),
"resets" => ScalarFunc::Udf(Arc::new(Resets::scalar_udf())),
@@ -1452,9 +1449,50 @@ impl PromPlanner {
"present_over_time" => ScalarFunc::Udf(Arc::new(PresentOverTime::scalar_udf())),
"stddev_over_time" => ScalarFunc::Udf(Arc::new(StddevOverTime::scalar_udf())),
"stdvar_over_time" => ScalarFunc::Udf(Arc::new(StdvarOverTime::scalar_udf())),
"quantile_over_time" => ScalarFunc::Udf(Arc::new(QuantileOverTime::scalar_udf())),
"predict_linear" => ScalarFunc::Udf(Arc::new(PredictLinear::scalar_udf())),
"holt_winters" => ScalarFunc::Udf(Arc::new(HoltWinters::scalar_udf())),
"quantile_over_time" => {
let quantile_expr = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Float64(Some(quantile)))) => quantile,
other => UnexpectedPlanExprSnafu {
desc: format!("expected f64 literal as quantile, but found {:?}", other),
}
.fail()?,
};
ScalarFunc::Udf(Arc::new(QuantileOverTime::scalar_udf(quantile_expr)))
}
"predict_linear" => {
let t_expr = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t as i64,
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t,
other => UnexpectedPlanExprSnafu {
desc: format!("expected i64 literal as t, but found {:?}", other),
}
.fail()?,
};
ScalarFunc::Udf(Arc::new(PredictLinear::scalar_udf(t_expr)))
}
"holt_winters" => {
let sf_exp = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Float64(Some(sf)))) => sf,
other => UnexpectedPlanExprSnafu {
desc: format!(
"expected f64 literal as smoothing factor, but found {:?}",
other
),
}
.fail()?,
};
let tf_exp = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Float64(Some(tf)))) => tf,
other => UnexpectedPlanExprSnafu {
desc: format!(
"expected f64 literal as trend factor, but found {:?}",
other
),
}
.fail()?,
};
ScalarFunc::Udf(Arc::new(HoltWinters::scalar_udf(sf_exp, tf_exp)))
}
"time" => {
exprs.push(build_special_time_expr(
self.ctx.time_index_column.as_ref().unwrap(),
@@ -1589,10 +1627,17 @@ impl PromPlanner {
ScalarFunc::GeneratedExpr
}
"round" => {
if other_input_exprs.is_empty() {
other_input_exprs.push_front(DfExpr::Literal(ScalarValue::Float64(Some(0.0))));
}
ScalarFunc::DataFusionUdf(Arc::new(Round::scalar_udf()))
let nearest = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t,
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t as f64,
None => 0.0,
other => UnexpectedPlanExprSnafu {
desc: format!("expected f64 literal as t, but found {:?}", other),
}
.fail()?,
};
ScalarFunc::DataFusionUdf(Arc::new(Round::scalar_udf(nearest)))
}
_ => {
@@ -1650,7 +1695,7 @@ impl PromPlanner {
let _ = other_input_exprs.remove(field_column_pos + 1);
let _ = other_input_exprs.remove(field_column_pos);
}
ScalarFunc::ExtrapolateUdf(func, range_length) => {
ScalarFunc::ExtrapolateUdf(func) => {
let ts_range_expr = DfExpr::Column(Column::from_name(
RangeManipulate::build_timestamp_range_name(
self.ctx.time_index_column.as_ref().unwrap(),
@@ -1660,13 +1705,11 @@ impl PromPlanner {
other_input_exprs.insert(field_column_pos + 1, col_expr);
other_input_exprs
.insert(field_column_pos + 2, self.create_time_index_column_expr()?);
other_input_exprs.push_back(lit(range_length));
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
func,
args: other_input_exprs.clone().into(),
});
exprs.push(fn_expr);
let _ = other_input_exprs.pop_back();
let _ = other_input_exprs.remove(field_column_pos + 2);
let _ = other_input_exprs.remove(field_column_pos + 1);
let _ = other_input_exprs.remove(field_column_pos);
@@ -1929,13 +1972,11 @@ impl PromPlanner {
param: &Option<Box<PromExpr>>,
input_plan: &LogicalPlan,
) -> Result<(Vec<DfExpr>, Vec<DfExpr>)> {
let mut non_col_args = Vec::new();
let aggr = match op.id() {
token::T_SUM => sum_udaf(),
token::T_QUANTILE => {
let q = Self::get_param_value_as_f64(op, param)?;
non_col_args.push(lit(q));
quantile_udaf()
quantile_udaf(q)
}
token::T_AVG => avg_udaf(),
token::T_COUNT_VALUES | token::T_COUNT => count_udaf(),
@@ -1957,12 +1998,16 @@ impl PromPlanner {
.field_columns
.iter()
.map(|col| {
non_col_args.push(DfExpr::Column(Column::from_name(col)));
let expr = aggr.call(non_col_args.clone());
non_col_args.pop();
expr
Ok(DfExpr::AggregateFunction(AggregateFunction {
func: aggr.clone(),
args: vec![DfExpr::Column(Column::from_name(col))],
distinct: false,
filter: None,
order_by: None,
null_treatment: None,
}))
})
.collect::<Vec<_>>();
.collect::<Result<Vec<_>>>()?;
// if the aggregator is `count_values`, it must be grouped by current fields.
let prev_field_exprs = if op.id() == token::T_COUNT_VALUES {
@@ -2896,8 +2941,7 @@ enum ScalarFunc {
Udf(Arc<ScalarUdfDef>),
// todo(ruihang): maybe merge with Udf later
/// UDF that require extra information like range length to be evaluated.
/// The second argument is range length.
ExtrapolateUdf(Arc<ScalarUdfDef>, i64),
ExtrapolateUdf(Arc<ScalarUdfDef>),
/// Func that doesn't require input, like `time()`.
GeneratedExpr,
}
@@ -3551,8 +3595,8 @@ mod test {
async fn increase_aggr() {
let query = "increase(some_metric[5m])";
let expected = String::from(
"Filter: prom_increase(timestamp_range,field_0,timestamp,Int64(300000)) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
\n Projection: some_metric.timestamp, prom_increase(timestamp_range, field_0, some_metric.timestamp, Int64(300000)) AS prom_increase(timestamp_range,field_0,timestamp,Int64(300000)), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
"Filter: prom_increase(timestamp_range,field_0,timestamp) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp):Float64;N, tag_0:Utf8]\
\n Projection: some_metric.timestamp, prom_increase(timestamp_range, field_0, some_metric.timestamp) AS prom_increase(timestamp_range,field_0,timestamp), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp):Float64;N, tag_0:Utf8]\
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[300000], time index=[timestamp], values=[\"field_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Dictionary(Int64, Float64);N, timestamp_range:Dictionary(Int64, Timestamp(Millisecond, None))]\
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
@@ -4351,8 +4395,8 @@ mod test {
let plan = PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_session_state())
.await
.unwrap();
let expected = "Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [greptime_timestamp:Timestamp(Millisecond, None), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[quantile(Float64(0.3), sum(prometheus_tsdb_head_series.greptime_value))]] [greptime_timestamp:Timestamp(Millisecond, None), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
let expected = "Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [greptime_timestamp:Timestamp(Millisecond, None), quantile(sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[quantile(sum(prometheus_tsdb_head_series.greptime_value))]] [greptime_timestamp:Timestamp(Millisecond, None), quantile(sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[sum(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\

Some files were not shown because too many files have changed in this diff Show More