Compare commits

..

2 Commits

Author SHA1 Message Date
WenyXu
07b2ea096b feat(standalone): support to dump/restore metadata 2025-04-20 08:13:35 +00:00
WenyXu
d55d9addf2 feat: introduce MetadataSnaphostManager 2025-04-20 06:32:56 +00:00
275 changed files with 15194 additions and 24970 deletions

15
.coderabbit.yaml Normal file
View File

@@ -0,0 +1,15 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

View File

@@ -7,8 +7,7 @@ meta:
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
auto_prune_interval = "30s"
trigger_flush_threshold = 100
auto_prune_topic_records = true
[datanode]
[datanode.client]
@@ -23,7 +22,6 @@ datanode:
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
overwrite_entry_start_id = true
frontend:
configData: |-
[runtime]

View File

@@ -1,37 +0,0 @@
#!/bin/bash
DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
# Configure Git configs.
git config --global user.email greptimedb-ci@greptime.com
git config --global user.name greptimedb-ci
# Checkout a new branch.
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile
git commit -m "ci: update dev-builder image tag"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "ci: update dev-builder image tag" \
--body "This PR updates the dev-builder image tag" \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_dev_builder_version

View File

@@ -21,6 +21,32 @@ jobs:
run: sudo apt-get install -y jq
# Make the check.sh script executable
- name: Check grafana dashboards
- name: Make check.sh executable
run: chmod +x grafana/check.sh
# Run the check.sh script
- name: Run check.sh
run: ./grafana/check.sh
# Only run summary.sh for pull_request events (not for merge queues or final pushes)
- name: Check if this is a pull request
id: check-pr
run: |
make check-dashboards
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "is_pull_request=true" >> $GITHUB_OUTPUT
else
echo "is_pull_request=false" >> $GITHUB_OUTPUT
fi
# Make the summary.sh script executable
- name: Make summary.sh executable
if: steps.check-pr.outputs.is_pull_request == 'true'
run: chmod +x grafana/summary.sh
# Run the summary.sh script and add its output to the GitHub Job Summary
- name: Run summary.sh and add to Job Summary
if: steps.check-pr.outputs.is_pull_request == 'true'
run: |
SUMMARY=$(./grafana/summary.sh)
echo "### Summary of Grafana Panels" >> $GITHUB_STEP_SUMMARY
echo "$SUMMARY" >> $GITHUB_STEP_SUMMARY

View File

@@ -24,19 +24,11 @@ on:
description: Release dev-builder-android image
required: false
default: false
update_dev_builder_image_tag:
type: boolean
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
required: false
default: false
jobs:
release-dev-builder-images:
name: Release dev builder images
# The jobs are triggered by the following events:
# 1. Manually triggered workflow_dispatch event
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-latest
outputs:
version: ${{ steps.set-version.outputs.version }}
@@ -65,9 +57,9 @@ jobs:
version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
@@ -93,7 +85,7 @@ jobs:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -114,7 +106,7 @@ jobs:
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -135,7 +127,7 @@ jobs:
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -170,7 +162,7 @@ jobs:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -184,7 +176,7 @@ jobs:
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -198,7 +190,7 @@ jobs:
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -209,24 +201,3 @@ jobs:
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
update-dev-builder-image-tag:
name: Update dev-builder image tag
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
needs: [
release-dev-builder-images
]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update dev-builder image tag
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}

751
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -68,7 +68,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.14.3"
version = "0.14.0"
edition = "2021"
license = "Apache-2.0"
@@ -112,15 +112,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "6.1"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
@@ -129,7 +129,7 @@ etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "4d4136692fe7fbbd509ebc8c902f6afcc0ce61e4" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b6d9cffd43c4e6358805a798f17e03e232994b82" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -161,10 +161,8 @@ parquet = { version = "54.2", default-features = false, features = ["arrow", "as
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "0410e8b459dda7cb222ce9596f8bf3971bd07bd2", features = [
"ser",
] }
prost = { version = "0.13", features = ["no-recursion-limit"] }
promql-parser = { version = "0.5.1", features = ["ser"] }
prost = "0.13"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
ratelimit = "0.10"
@@ -193,7 +191,7 @@ simd-json = "0.15"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e98e6b322426a9d397a71efef17075966223c089", features = [
"visitor",
"serde",
] } # branch = "v0.54.x"
@@ -271,9 +269,6 @@ metric-engine = { path = "src/metric-engine" }
mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
otel-arrow-rust = { git = "https://github.com/open-telemetry/otel-arrow", rev = "5d551412d2a12e689cde4d84c14ef29e36784e51", features = [
"server",
] }
partition = { path = "src/partition" }
pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" }

View File

@@ -222,16 +222,6 @@ start-cluster: ## Start the greptimedb cluster with etcd by using docker compose
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
##@ Grafana
.PHONY: check-dashboards
check-dashboards: ## Check the Grafana dashboards.
@./grafana/scripts/check.sh
.PHONY: dashboards
dashboards: ## Generate the Grafana dashboards for standalone mode and intermediate dashboards.
@./grafana/scripts/gen-dashboards.sh
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \

View File

@@ -6,7 +6,7 @@
</picture>
</p>
<h2 align="center">Real-Time & Cloud-Native Observability Database<br/>for metrics, logs, and traces</h2>
<h2 align="center">Unified & Cost-Effective Observability Database for Metrics, Logs, and Events</h2>
<div align="center">
<h3 align="center">

View File

@@ -319,7 +319,6 @@
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
| `runtime` | -- | -- | The runtime options. |

View File

@@ -50,10 +50,6 @@ use_memory_store = false
## - Using shared storage (e.g., s3).
enable_region_failover = false
## Whether to allow region failover on local WAL.
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
allow_region_failover_on_local_wal = false
## Max allowed idle time before removing node info from metasrv memory.
node_max_idle_time = "24hours"

View File

@@ -1,89 +1,61 @@
# Grafana dashboards for GreptimeDB
Grafana dashboard for GreptimeDB
--------------------------------
## Overview
GreptimeDB's official Grafana dashboard.
This repository maintains the Grafana dashboards for GreptimeDB. It has two types of dashboards:
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
- `cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
- `standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
As the rapid development of GreptimeDB, the metrics may be changed, and please feel free to submit your feedback and/or contribution to this dashboard 🤗
**NOTE**:
- The Grafana version should be greater than 9.0.
- If you want to modify the dashboards, you only need to modify the `cluster/dashboard.json` and run the `make dashboards` command to generate the `standalone/dashboard.json` and other related files.
To maintain the dashboards easily, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
- `cluster/dashboard.yaml`: The intermediate dashboard for the GreptimeDB cluster.
- `standalone/dashboard.yaml`: The intermediate dashboard for the standalone GreptimeDB instance.
## Data Sources
There are two data sources for the dashboards to fetch the metrics:
- **Prometheus**: Expose the metrics of GreptimeDB.
- **Information Schema**: It is the MySQL port of the current monitored instance. The `overview` dashboard will use this datasource to show the information schema of the current instance.
## Instance Filters
To deploy the dashboards for multiple scenarios (K8s, bare metal, etc.), we prefer to use the `instance` label when filtering instances.
Additionally, we recommend including the `pod` label in the legend to make it easier to identify each instance, even though this field will be empty in bare metal scenarios.
For example, the following query is recommended:
```promql
sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
```
And the legend will be like: `[{{instance}}]-[{{ pod }}]`.
## Deployment
### Helm
If you use the Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy a GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
If you use Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
The standalone GreptimeDB instance will collect metrics from your cluster and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
### Self-host Prometheus and import dashboards manually
# How to use
1. **Configure Prometheus to scrape the cluster**
## `greptimedb.json`
The following is an example configuration(**Please modify it according to your actual situation**):
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
```yml
# example config
# only to indicate how to assign labels to each target
# modify yours accordingly
scrape_configs:
- job_name: metasrv
static_configs:
- targets: ['<metasrv-ip>:<port>']
## `greptimedb-cluster.json`
- job_name: datanode
static_configs:
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>']
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
- job_name: frontend
static_configs:
- targets: ['<frontend-ip>:<port>']
```
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
2. **Configure the data sources in Grafana**
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
You need to add two data sources in Grafana:
### Configuration
- Prometheus: It is the Prometheus instance that scrapes the GreptimeDB metrics.
- Information Schema: It is the MySQL port of the current monitored instance. The dashboard will use this datasource to show the information schema of the current instance.
Please ensure the following configuration before importing the dashboard into Grafana.
3. **Import the dashboards based on your deployment scenario**
__1. Prometheus scrape config__
- **Cluster**: Import the `cluster/dashboard.json` dashboard.
- **Standalone**: Import the `standalone/dashboard.json` dashboard.
Configure Prometheus to scrape the cluster.
```yml
# example config
# only to indicate how to assign labels to each target
# modify yours accordingly
scrape_configs:
- job_name: metasrv
static_configs:
- targets: ['<metasrv-ip>:<port>']
- job_name: datanode
static_configs:
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>']
- job_name: frontend
static_configs:
- targets: ['<frontend-ip>:<port>']
```
__2. Grafana config__
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
### Usage
Use `datasource` or `instance` on the upper-left corner to filter data from certain node.

19
grafana/check.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
BASEDIR=$(dirname "$0")
# Use jq to check for panels with empty or missing descriptions
invalid_panels=$(cat $BASEDIR/greptimedb-cluster.json | jq -r '
.panels[]
| select((.type == "stats" or .type == "timeseries") and (.description == "" or .description == null))
')
# Check if any invalid panels were found
if [[ -n "$invalid_panels" ]]; then
echo "Error: The following panels have empty or missing descriptions:"
echo "$invalid_panels"
exit 1
else
echo "All panels with type 'stats' or 'timeseries' have valid descriptions."
exit 0
fi

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +0,0 @@
# Overview
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
# Ingestion
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
# Queries
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
# Resources
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
# Frontend Requests
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
# Frontend to Datanode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
# Metasrv
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
# Flownode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |

View File

@@ -1,769 +0,0 @@
groups:
- title: Overview
panels:
- title: Uptime
type: stat
description: The start time of GreptimeDB.
unit: s
queries:
- expr: time() - process_start_time_seconds
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Version
type: stat
description: GreptimeDB version.
queries:
- expr: SELECT pkg_version FROM information_schema.build_info
datasource:
type: mysql
uid: ${information_schema}
- title: Total Ingestion Rate
type: stat
description: Total ingestion rate.
unit: rowsps
queries:
- expr: sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Total Storage Size
type: stat
description: Total number of data file size.
unit: decbytes
queries:
- expr: select SUM(disk_size) from information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- title: Total Rows
type: stat
description: Total number of data rows in the cluster. Calculated by sum of rows from each region.
unit: sishort
queries:
- expr: select SUM(region_rows) from information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- title: Deployment
type: stat
description: The deployment topology of GreptimeDB.
queries:
- expr: SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';
datasource:
type: mysql
uid: ${information_schema}
- title: Database Resources
type: stat
description: The number of the key resources in GreptimeDB.
queries:
- expr: SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT COUNT(region_id) as regions FROM information_schema.region_peers
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT COUNT(*) as flows FROM information_schema.flows
datasource:
type: mysql
uid: ${information_schema}
- title: Data Size
type: stat
description: The data size of wal/index/manifest in the GreptimeDB.
unit: decbytes
queries:
- expr: SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT SUM(index_size) as index FROM information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- title: Ingestion
panels:
- title: Total Ingestion Rate
type: timeseries
description: |
Total ingestion rate.
Here we listed 3 primary protocols:
- Prometheus remote write
- Greptime's gRPC API (when using our ingest SDK)
- Log ingestion http API
unit: rowsps
queries:
- expr: sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: ingestion
- title: Ingestion Rate by Type
type: timeseries
description: |
Total ingestion rate.
Here we listed 3 primary protocols:
- Prometheus remote write
- Greptime's gRPC API (when using our ingest SDK)
- Log ingestion http API
unit: rowsps
queries:
- expr: sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: http-logs
- expr: sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: prometheus-remote-write
- title: Queries
panels:
- title: Total Query Rate
type: timeseries
description: |-
Total rate of query API calls by protocol. This metric is collected from frontends.
Here we listed 3 main protocols:
- MySQL
- Postgres
- Prometheus API
Note that there are some other minor query APIs like /sql are not included
unit: reqps
queries:
- expr: sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: mysql
- expr: sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: pg
- expr: sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: promql
- title: Resources
panels:
- title: Datanode Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{ pod }}]'
- title: Datanode CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Frontend Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Frontend CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
- title: Metasrv Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
- title: Metasrv CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Flownode Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Flownode CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Frontend Requests
panels:
- title: HTTP QPS per Instance
type: timeseries
description: HTTP QPS per Instance.
unit: reqps
queries:
- expr: sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health|/metrics"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]'
- title: HTTP P99 per Instance
type: timeseries
description: HTTP P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health|/metrics"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99'
- title: gRPC QPS per Instance
type: timeseries
description: gRPC QPS per Instance.
unit: reqps
queries:
- expr: sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]'
- title: gRPC P99 per Instance
type: timeseries
description: gRPC P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99'
- title: MySQL QPS per Instance
type: timeseries
description: MySQL QPS per Instance.
unit: reqps
queries:
- expr: sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: MySQL P99 per Instance
type: timeseries
description: MySQL P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]-p99'
- title: PostgreSQL QPS per Instance
type: timeseries
description: PostgreSQL QPS per Instance.
unit: reqps
queries:
- expr: sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: PostgreSQL P99 per Instance
type: timeseries
description: PostgreSQL P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p99'
- title: Frontend to Datanode
panels:
- title: Ingest Rows per Instance
type: timeseries
description: Ingestion rate by row as in each frontend
unit: rowsps
queries:
- expr: sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Region Call QPS per Instance
type: timeseries
description: Region Call QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: Region Call P99 per Instance
type: timeseries
description: Region Call P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: Mito Engine
panels:
- title: Request OPS per Instance
type: timeseries
description: Request QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
- title: Request P99 per Instance
type: timeseries
description: Request P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
- title: Write Buffer per Instance
type: timeseries
description: Write Buffer per Instance.
unit: decbytes
queries:
- expr: greptime_mito_write_buffer_bytes{instance=~"$datanode"}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Write Rows per Instance
type: timeseries
description: Ingestion size by row counts.
unit: rowsps
queries:
- expr: sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Flush OPS per Instance
type: timeseries
description: Flush QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{reason}}]'
- title: Write Stall per Instance
type: timeseries
description: Write Stall per Instance.
queries:
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Read Stage OPS per Instance
type: timeseries
description: Read Stage OPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Read Stage P99 per Instance
type: timeseries
description: Read Stage P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]'
- title: Write Stage P99 per Instance
type: timeseries
description: Write Stage P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]'
- title: Compaction OPS per Instance
type: timeseries
description: Compaction OPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{pod}}]'
- title: Compaction P99 per Instance by Stage
type: timeseries
description: Compaction latency by stage
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
- title: Compaction P99 per Instance
type: timeseries
description: Compaction P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction'
- title: WAL write size
type: timeseries
description: Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate.
unit: bytes
queries:
- expr: histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-req-size-p95'
- expr: histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-req-size-p99'
- expr: sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-throughput'
- title: Cached Bytes per Instance
type: timeseries
description: Cached Bytes per Instance.
unit: decbytes
queries:
- expr: greptime_mito_cache_bytes{instance=~"$datanode"}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
- title: Inflight Compaction
type: timeseries
description: Ongoing compaction task count
unit: none
queries:
- expr: greptime_mito_inflight_compaction_count
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: WAL sync duration seconds
type: timeseries
description: Raft engine (local disk) log store sync latency, p99
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p99'
- title: Log Store op duration seconds
type: timeseries
description: Write-ahead log operations latency at p99
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99'
- title: Inflight Flush
type: timeseries
description: Ongoing flush task count
unit: none
queries:
- expr: greptime_mito_inflight_flush_count
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: OpenDAL
panels:
- title: QPS per Instance
type: timeseries
description: QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: Read QPS per Instance
type: timeseries
description: Read QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: Read P99 per Instance
type: timeseries
description: Read P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
- title: Write QPS per Instance
type: timeseries
description: Write QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
- title: Write P99 per Instance
type: timeseries
description: Write P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: List QPS per Instance
type: timeseries
description: List QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: List P99 per Instance
type: timeseries
description: List P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: Other Requests per Instance
type: timeseries
description: Other Requests per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read|write|list|stat"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: Other Request P99 per Instance
type: timeseries
description: Other Request P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read|write|list"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: Opendal traffic
type: timeseries
description: Total traffic as in bytes by instance and operation
unit: decbytes
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: OpenDAL errors per Instance
type: timeseries
description: OpenDAL error counts per Instance.
queries:
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
- title: Metasrv
panels:
- title: Region migration datanode
type: state-timeline
description: Counter of region migration by source and destination
unit: none
queries:
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: from-datanode-{{datanode_id}}
- expr: greptime_meta_region_migration_stat{datanode_type="desc"}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: to-datanode-{{datanode_id}}
- title: Region migration error
type: timeseries
description: Counter of region migration error
unit: none
queries:
- expr: greptime_meta_region_migration_error
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Datanode load
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: none
queries:
- expr: greptime_datanode_load
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Flownode
panels:
- title: Flow Ingest / Output Rate
type: timeseries
description: Flow Ingest / Output Rate.
queries:
- expr: sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{pod}}]-[{{instance}}]-[{{direction}}]'
- title: Flow Ingest Latency
type: timeseries
description: Flow Ingest Latency.
queries:
- expr: histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p95'
- expr: histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p99'
- title: Flow Operation Latency
type: timeseries
description: Flow Operation Latency.
queries:
- expr: histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]-p95'
- expr: histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]-p99'
- title: Flow Buffer Size per Instance
type: timeseries
description: Flow Buffer Size per Instance.
queries:
- expr: greptime_flow_input_buf_size
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}]'
- title: Flow Processing Error per Instance
type: timeseries
description: Flow Processing Error per Instance.
queries:
- expr: sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{code}}]'

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +0,0 @@
# Overview
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
# Ingestion
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
# Queries
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
# Resources
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
# Frontend Requests
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
# Frontend to Datanode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
# Metasrv
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
# Flownode
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |

View File

@@ -1,769 +0,0 @@
groups:
- title: Overview
panels:
- title: Uptime
type: stat
description: The start time of GreptimeDB.
unit: s
queries:
- expr: time() - process_start_time_seconds
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Version
type: stat
description: GreptimeDB version.
queries:
- expr: SELECT pkg_version FROM information_schema.build_info
datasource:
type: mysql
uid: ${information_schema}
- title: Total Ingestion Rate
type: stat
description: Total ingestion rate.
unit: rowsps
queries:
- expr: sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Total Storage Size
type: stat
description: Total number of data file size.
unit: decbytes
queries:
- expr: select SUM(disk_size) from information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- title: Total Rows
type: stat
description: Total number of data rows in the cluster. Calculated by sum of rows from each region.
unit: sishort
queries:
- expr: select SUM(region_rows) from information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- title: Deployment
type: stat
description: The deployment topology of GreptimeDB.
queries:
- expr: SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';
datasource:
type: mysql
uid: ${information_schema}
- title: Database Resources
type: stat
description: The number of the key resources in GreptimeDB.
queries:
- expr: SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT COUNT(region_id) as regions FROM information_schema.region_peers
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT COUNT(*) as flows FROM information_schema.flows
datasource:
type: mysql
uid: ${information_schema}
- title: Data Size
type: stat
description: The data size of wal/index/manifest in the GreptimeDB.
unit: decbytes
queries:
- expr: SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT SUM(index_size) as index FROM information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- expr: SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;
datasource:
type: mysql
uid: ${information_schema}
- title: Ingestion
panels:
- title: Total Ingestion Rate
type: timeseries
description: |
Total ingestion rate.
Here we listed 3 primary protocols:
- Prometheus remote write
- Greptime's gRPC API (when using our ingest SDK)
- Log ingestion http API
unit: rowsps
queries:
- expr: sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: ingestion
- title: Ingestion Rate by Type
type: timeseries
description: |
Total ingestion rate.
Here we listed 3 primary protocols:
- Prometheus remote write
- Greptime's gRPC API (when using our ingest SDK)
- Log ingestion http API
unit: rowsps
queries:
- expr: sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: http-logs
- expr: sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: prometheus-remote-write
- title: Queries
panels:
- title: Total Query Rate
type: timeseries
description: |-
Total rate of query API calls by protocol. This metric is collected from frontends.
Here we listed 3 main protocols:
- MySQL
- Postgres
- Prometheus API
Note that there are some other minor query APIs like /sql are not included
unit: reqps
queries:
- expr: sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: mysql
- expr: sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: pg
- expr: sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: promql
- title: Resources
panels:
- title: Datanode Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{ pod }}]'
- title: Datanode CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Frontend Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Frontend CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
- title: Metasrv Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
- title: Metasrv CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Flownode Memory per Instance
type: timeseries
description: Current memory usage by instance
unit: decbytes
queries:
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Flownode CPU Usage per Instance
type: timeseries
description: Current cpu usage by instance
unit: none
queries:
- expr: sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]'
- title: Frontend Requests
panels:
- title: HTTP QPS per Instance
type: timeseries
description: HTTP QPS per Instance.
unit: reqps
queries:
- expr: sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health|/metrics"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]'
- title: HTTP P99 per Instance
type: timeseries
description: HTTP P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health|/metrics"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99'
- title: gRPC QPS per Instance
type: timeseries
description: gRPC QPS per Instance.
unit: reqps
queries:
- expr: sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]'
- title: gRPC P99 per Instance
type: timeseries
description: gRPC P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99'
- title: MySQL QPS per Instance
type: timeseries
description: MySQL QPS per Instance.
unit: reqps
queries:
- expr: sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: MySQL P99 per Instance
type: timeseries
description: MySQL P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{ pod }}]-p99'
- title: PostgreSQL QPS per Instance
type: timeseries
description: PostgreSQL QPS per Instance.
unit: reqps
queries:
- expr: sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: PostgreSQL P99 per Instance
type: timeseries
description: PostgreSQL P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p99'
- title: Frontend to Datanode
panels:
- title: Ingest Rows per Instance
type: timeseries
description: Ingestion rate by row as in each frontend
unit: rowsps
queries:
- expr: sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Region Call QPS per Instance
type: timeseries
description: Region Call QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: Region Call P99 per Instance
type: timeseries
description: Region Call P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: Mito Engine
panels:
- title: Request OPS per Instance
type: timeseries
description: Request QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
- title: Request P99 per Instance
type: timeseries
description: Request P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
- title: Write Buffer per Instance
type: timeseries
description: Write Buffer per Instance.
unit: decbytes
queries:
- expr: greptime_mito_write_buffer_bytes{}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Write Rows per Instance
type: timeseries
description: Ingestion size by row counts.
unit: rowsps
queries:
- expr: sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Flush OPS per Instance
type: timeseries
description: Flush QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{reason}}]'
- title: Write Stall per Instance
type: timeseries
description: Write Stall per Instance.
queries:
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{})
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Read Stage OPS per Instance
type: timeseries
description: Read Stage OPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Read Stage P99 per Instance
type: timeseries
description: Read Stage P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]'
- title: Write Stage P99 per Instance
type: timeseries
description: Write Stage P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]'
- title: Compaction OPS per Instance
type: timeseries
description: Compaction OPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{pod}}]'
- title: Compaction P99 per Instance by Stage
type: timeseries
description: Compaction latency by stage
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
- title: Compaction P99 per Instance
type: timeseries
description: Compaction P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction'
- title: WAL write size
type: timeseries
description: Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate.
unit: bytes
queries:
- expr: histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-req-size-p95'
- expr: histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-req-size-p99'
- expr: sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-throughput'
- title: Cached Bytes per Instance
type: timeseries
description: Cached Bytes per Instance.
unit: decbytes
queries:
- expr: greptime_mito_cache_bytes{}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
- title: Inflight Compaction
type: timeseries
description: Ongoing compaction task count
unit: none
queries:
- expr: greptime_mito_inflight_compaction_count
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: WAL sync duration seconds
type: timeseries
description: Raft engine (local disk) log store sync latency, p99
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p99'
- title: Log Store op duration seconds
type: timeseries
description: Write-ahead log operations latency at p99
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99'
- title: Inflight Flush
type: timeseries
description: Ongoing flush task count
unit: none
queries:
- expr: greptime_mito_inflight_flush_count
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: OpenDAL
panels:
- title: QPS per Instance
type: timeseries
description: QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: Read QPS per Instance
type: timeseries
description: Read QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: Read P99 per Instance
type: timeseries
description: Read P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
- title: Write QPS per Instance
type: timeseries
description: Write QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
- title: Write P99 per Instance
type: timeseries
description: Write P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: List QPS per Instance
type: timeseries
description: List QPS per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: List P99 per Instance
type: timeseries
description: List P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
- title: Other Requests per Instance
type: timeseries
description: Other Requests per Instance.
unit: ops
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read|write|list|stat"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: Other Request P99 per Instance
type: timeseries
description: Other Request P99 per Instance.
unit: s
queries:
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read|write|list"}[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: Opendal traffic
type: timeseries
description: Total traffic as in bytes by instance and operation
unit: decbytes
queries:
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
- title: OpenDAL errors per Instance
type: timeseries
description: OpenDAL error counts per Instance.
queries:
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
- title: Metasrv
panels:
- title: Region migration datanode
type: state-timeline
description: Counter of region migration by source and destination
unit: none
queries:
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: from-datanode-{{datanode_id}}
- expr: greptime_meta_region_migration_stat{datanode_type="desc"}
datasource:
type: prometheus
uid: ${metrics}
legendFormat: to-datanode-{{datanode_id}}
- title: Region migration error
type: timeseries
description: Counter of region migration error
unit: none
queries:
- expr: greptime_meta_region_migration_error
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Datanode load
type: timeseries
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
unit: none
queries:
- expr: greptime_datanode_load
datasource:
type: prometheus
uid: ${metrics}
legendFormat: __auto
- title: Flownode
panels:
- title: Flow Ingest / Output Rate
type: timeseries
description: Flow Ingest / Output Rate.
queries:
- expr: sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{pod}}]-[{{instance}}]-[{{direction}}]'
- title: Flow Ingest Latency
type: timeseries
description: Flow Ingest Latency.
queries:
- expr: histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p95'
- expr: histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-p99'
- title: Flow Operation Latency
type: timeseries
description: Flow Operation Latency.
queries:
- expr: histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]-p95'
- expr: histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]-p99'
- title: Flow Buffer Size per Instance
type: timeseries
description: Flow Buffer Size per Instance.
queries:
- expr: greptime_flow_input_buf_size
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}]'
- title: Flow Processing Error per Instance
type: timeseries
description: Flow Processing Error per Instance.
queries:
- expr: sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{code}}]'

File diff suppressed because it is too large Load Diff

4159
grafana/greptimedb.json Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env bash
DASHBOARD_DIR=${1:-grafana/dashboards}
check_dashboard_description() {
for dashboard in $(find $DASHBOARD_DIR -name "*.json"); do
echo "Checking $dashboard description"
# Use jq to check for panels with empty or missing descriptions
invalid_panels=$(cat $dashboard | jq -r '
.panels[]
| select((.type == "stats" or .type == "timeseries") and (.description == "" or .description == null))')
# Check if any invalid panels were found
if [[ -n "$invalid_panels" ]]; then
echo "Error: The following panels have empty or missing descriptions:"
echo "$invalid_panels"
exit 1
else
echo "All panels with type 'stats' or 'timeseries' have valid descriptions."
fi
done
}
check_dashboards_generation() {
./grafana/scripts/gen-dashboards.sh
if [[ -n "$(git diff --name-only grafana/dashboards)" ]]; then
echo "Error: The dashboards are not generated correctly. You should execute the `make dashboards` command."
exit 1
fi
}
check_datasource() {
for dashboard in $(find $DASHBOARD_DIR -name "*.json"); do
echo "Checking $dashboard datasource"
jq -r '.panels[] | select(.type != "row") | .targets[] | [.datasource.type, .datasource.uid] | @tsv' $dashboard | while read -r type uid; do
# if the datasource is prometheus, check if the uid is ${metrics}
if [[ "$type" == "prometheus" && "$uid" != "\${metrics}" ]]; then
echo "Error: The datasource uid of $dashboard is not valid. It should be \${metrics}, got $uid"
exit 1
fi
# if the datasource is mysql, check if the uid is ${information_schema}
if [[ "$type" == "mysql" && "$uid" != "\${information_schema}" ]]; then
echo "Error: The datasource uid of $dashboard is not valid. It should be \${information_schema}, got $uid"
exit 1
fi
done
done
}
check_dashboards_generation
check_dashboard_description
check_datasource

View File

@@ -1,25 +0,0 @@
#! /usr/bin/env bash
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/cluster}
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/standalone}
DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
remove_instance_filters() {
# Remove the instance filters for the standalone dashboards.
sed 's/instance=~\\"$datanode\\",//; s/instance=~\\"$datanode\\"//; s/instance=~\\"$frontend\\",//; s/instance=~\\"$frontend\\"//; s/instance=~\\"$metasrv\\",//; s/instance=~\\"$metasrv\\"//; s/instance=~\\"$flownode\\",//; s/instance=~\\"$flownode\\"//;' $CLUSTER_DASHBOARD_DIR/dashboard.json > $STANDALONE_DASHBOARD_DIR/dashboard.json
}
generate_intermediate_dashboards_and_docs() {
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
-i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json \
-o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml \
-m /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.md
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
-i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json \
-o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml \
-m /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.md
}
remove_instance_filters
generate_intermediate_dashboards_and_docs

11
grafana/summary.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/usr/bin/env bash
BASEDIR=$(dirname "$0")
echo '| Title | Description | Expressions |
|---|---|---|'
cat $BASEDIR/greptimedb-cluster.json | jq -r '
.panels |
map(select(.type == "stat" or .type == "timeseries")) |
.[] | "| \(.title) | \(.description | gsub("\n"; "<br>")) | \(.targets | map(.expr // .rawSql | "`\(.|gsub("\n"; "<br>"))`") | join("<br>")) |"
'

View File

@@ -514,7 +514,6 @@ fn query_request_type(request: &QueryRequest) -> &'static str {
Some(Query::Sql(_)) => "query.sql",
Some(Query::LogicalPlan(_)) => "query.logical_plan",
Some(Query::PromRangeQuery(_)) => "query.prom_range",
Some(Query::InsertIntoPlan(_)) => "query.insert_into_plan",
None => "query.empty",
}
}

View File

@@ -27,7 +27,7 @@ use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableType;
use table::table::adapter::DfTableProviderAdapter;
pub mod dummy_catalog;
mod dummy_catalog;
use dummy_catalog::DummyCatalogList;
use table::TableRef;

View File

@@ -36,8 +36,8 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::RecordBatchStreamWrapper;
use common_telemetry::error;
use common_telemetry::tracing_context::W3cTrace;
use common_telemetry::{error, warn};
use futures::future;
use futures_util::{Stream, StreamExt, TryStreamExt};
use prost::Message;
@@ -192,36 +192,6 @@ impl Database {
from_grpc_response(response)
}
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
/// is `max_retries * GRPC_CONN_TIMEOUT`
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
let mut client = make_database_client(&self.client)?.inner;
let mut retries = 0;
let request = self.to_rpc_request(request);
loop {
let raw_response = client.handle(request.clone()).await;
match (raw_response, retries < max_retries) {
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
(Err(err), true) => {
// determine if the error is retryable
if is_grpc_retryable(&err) {
// retry
retries += 1;
warn!("Retrying {} times with error = {:?}", retries, err);
continue;
}
}
(Err(err), false) => {
error!(
"Failed to send request to grpc handle after {} retries, error = {:?}",
retries, err
);
return Err(err.into());
}
}
}
}
#[inline]
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
@@ -398,11 +368,6 @@ impl Database {
}
}
/// by grpc standard, only `Unavailable` is retryable, see: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc
pub fn is_grpc_retryable(err: &tonic::Status) -> bool {
matches!(err.code(), tonic::Code::Unavailable)
}
#[derive(Default, Debug, Clone)]
struct FlightContext {
auth_header: Option<AuthHeader>,

View File

@@ -78,6 +78,13 @@ pub enum Error {
source: datanode::error::Error,
},
#[snafu(display("Failed to build object storage manager"))]
BuildObjectStorageManager {
#[snafu(implicit)]
location: Location,
source: datanode::error::Error,
},
#[snafu(display("Failed to shutdown datanode"))]
ShutdownDatanode {
#[snafu(implicit)]
@@ -328,6 +335,8 @@ impl ErrorExt for Error {
source.status_code()
}
Error::BuildObjectStorageManager { source, .. } => source.status_code(),
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }

View File

@@ -345,7 +345,7 @@ impl StartCommand {
let client = Arc::new(NodeClients::new(channel_config));
let invoker = FrontendInvoker::build_from(
flownode.flow_engine().streaming_engine(),
flownode.flow_worker_manager().clone(),
catalog_manager.clone(),
cached_meta_backend.clone(),
layered_cache_registry.clone(),
@@ -355,9 +355,7 @@ impl StartCommand {
.await
.context(StartFlownodeSnafu)?;
flownode
.flow_engine()
.streaming_engine()
// TODO(discord9): refactor and avoid circular reference
.flow_worker_manager()
.set_frontend_invoker(invoker)
.await;

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::time::Duration;
use async_trait::async_trait;
@@ -132,8 +131,8 @@ impl SubCommand {
}
}
#[derive(Default, Parser)]
pub struct StartCommand {
#[derive(Debug, Default, Parser)]
struct StartCommand {
/// The address to bind the gRPC server.
#[clap(long, alias = "bind-addr")]
rpc_bind_addr: Option<String>,
@@ -172,29 +171,8 @@ pub struct StartCommand {
backend: Option<BackendImpl>,
}
impl fmt::Debug for StartCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StartCommand")
.field("rpc_bind_addr", &self.rpc_bind_addr)
.field("rpc_server_addr", &self.rpc_server_addr)
.field("store_addrs", &self.sanitize_store_addrs())
.field("config_file", &self.config_file)
.field("selector", &self.selector)
.field("use_memory_store", &self.use_memory_store)
.field("enable_region_failover", &self.enable_region_failover)
.field("http_addr", &self.http_addr)
.field("http_timeout", &self.http_timeout)
.field("env_prefix", &self.env_prefix)
.field("data_home", &self.data_home)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("backend", &self.backend)
.finish()
}
}
impl StartCommand {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
let mut opts = MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
@@ -206,15 +184,6 @@ impl StartCommand {
Ok(opts)
}
fn sanitize_store_addrs(&self) -> Option<Vec<String>> {
self.store_addrs.as_ref().map(|addrs| {
addrs
.iter()
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
.collect()
})
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
@@ -292,7 +261,7 @@ impl StartCommand {
Ok(())
}
pub async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(

View File

@@ -44,6 +44,7 @@ use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::region_registry::LeaderRegionRegistry;
use common_meta::sequence::SequenceBuilder;
use common_meta::snapshot::MetadataSnapshotManager;
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
use common_telemetry::info;
@@ -56,8 +57,8 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig;
use flow::{
FlowConfig, FlownodeBuilder, FlownodeInstance, FlownodeOptions, FrontendClient,
FrontendInvoker, GrpcQueryHandlerWithBoxedError, StreamingEngine,
FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeInstance, FlownodeOptions,
FrontendClient, FrontendInvoker,
};
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::instance::builder::FrontendBuilder;
@@ -497,6 +498,10 @@ impl StartCommand {
.build(),
);
let object_store_manager = DatanodeBuilder::build_object_store_manager(&dn_opts.storage)
.await
.context(error::BuildObjectStorageManagerSnafu)?;
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone(), Mode::Standalone)
.with_kv_backend(kv_backend.clone())
.with_cache_registry(layered_cache_registry.clone())
@@ -524,17 +529,17 @@ impl StartCommand {
..Default::default()
};
// for standalone not use grpc, but get a handler to frontend grpc client without
// TODO(discord9): for standalone not use grpc, but just somehow get a handler to frontend grpc client without
// actually make a connection
let (frontend_client, frontend_instance_handler) =
FrontendClient::from_empty_grpc_handler();
let fe_server_addr = fe_opts.grpc.bind_addr.clone();
let frontend_client = FrontendClient::from_static_grpc_addr(fe_server_addr);
let flow_builder = FlownodeBuilder::new(
flownode_options,
plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
flow_metadata_manager.clone(),
Arc::new(frontend_client.clone()),
Arc::new(frontend_client),
);
let flownode = flow_builder
.build()
@@ -544,15 +549,15 @@ impl StartCommand {
// set the ref to query for the local flow state
{
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
let flow_worker_manager = flownode.flow_worker_manager();
information_extension
.set_flow_streaming_engine(flow_streaming_engine)
.set_flow_worker_manager(flow_worker_manager.clone())
.await;
}
let node_manager = Arc::new(StandaloneDatanodeManager {
region_server: datanode.region_server(),
flow_server: flownode.flow_engine(),
flow_server: flownode.flow_worker_manager(),
});
let table_id_sequence = Arc::new(
@@ -591,6 +596,11 @@ impl StartCommand {
)
.await?;
let metadata_snapshot_manager = MetadataSnapshotManager::new(
kv_backend.clone(),
object_store_manager.default_object_store().clone(),
);
let fe_instance = FrontendBuilder::new(
fe_opts.clone(),
kv_backend.clone(),
@@ -601,24 +611,16 @@ impl StartCommand {
StatementStatistics::new(opts.logging.slow_query.clone()),
)
.with_plugin(plugins.clone())
.with_metadata_snapshot_manager(metadata_snapshot_manager)
.try_build()
.await
.context(error::StartFrontendSnafu)?;
let fe_instance = Arc::new(fe_instance);
// set the frontend client for flownode
let grpc_handler = fe_instance.clone() as Arc<dyn GrpcQueryHandlerWithBoxedError>;
let weak_grpc_handler = Arc::downgrade(&grpc_handler);
frontend_instance_handler
.lock()
.unwrap()
.replace(weak_grpc_handler);
// set the frontend invoker for flownode
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
let flow_worker_manager = flownode.flow_worker_manager();
// flow server need to be able to use frontend to write insert requests back
let invoker = FrontendInvoker::build_from(
flow_streaming_engine.clone(),
flow_worker_manager.clone(),
catalog_manager.clone(),
kv_backend.clone(),
layered_cache_registry.clone(),
@@ -627,7 +629,7 @@ impl StartCommand {
)
.await
.context(error::StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
flow_worker_manager.set_frontend_invoker(invoker).await;
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(error::ServersSnafu)?;
@@ -703,7 +705,7 @@ pub struct StandaloneInformationExtension {
region_server: RegionServer,
procedure_manager: ProcedureManagerRef,
start_time_ms: u64,
flow_streaming_engine: RwLock<Option<Arc<StreamingEngine>>>,
flow_worker_manager: RwLock<Option<Arc<FlowWorkerManager>>>,
}
impl StandaloneInformationExtension {
@@ -712,14 +714,14 @@ impl StandaloneInformationExtension {
region_server,
procedure_manager,
start_time_ms: common_time::util::current_time_millis() as u64,
flow_streaming_engine: RwLock::new(None),
flow_worker_manager: RwLock::new(None),
}
}
/// Set the flow streaming engine for the standalone instance.
pub async fn set_flow_streaming_engine(&self, flow_streaming_engine: Arc<StreamingEngine>) {
let mut guard = self.flow_streaming_engine.write().await;
*guard = Some(flow_streaming_engine);
/// Set the flow worker manager for the standalone instance.
pub async fn set_flow_worker_manager(&self, flow_worker_manager: Arc<FlowWorkerManager>) {
let mut guard = self.flow_worker_manager.write().await;
*guard = Some(flow_worker_manager);
}
}
@@ -798,7 +800,7 @@ impl InformationExtension for StandaloneInformationExtension {
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
Ok(Some(
self.flow_streaming_engine
self.flow_worker_manager
.read()
.await
.as_ref()

View File

@@ -74,7 +74,6 @@ fn test_load_datanode_example_config() {
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
experimental_sparse_primary_key_encoding: false,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
logging: LoggingOptions {
@@ -217,7 +216,6 @@ fn test_load_standalone_example_config() {
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
experimental_sparse_primary_key_encoding: false,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
storage: StorageConfig {

View File

@@ -31,8 +31,7 @@ impl Plugins {
}
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
let last = self.write().insert(value);
assert!(last.is_none(), "each type of plugins must be one and only");
let _ = self.write().insert(value);
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
@@ -138,12 +137,4 @@ mod tests {
assert_eq!(plugins.len(), 2);
assert!(!plugins.is_empty());
}
#[test]
#[should_panic(expected = "each type of plugins must be one and only")]
fn test_plugin_uniqueness() {
let plugins = Plugins::new();
plugins.insert(1i32);
plugins.insert(2i32);
}
}

View File

@@ -15,6 +15,7 @@
mod add_region_follower;
mod flush_compact_region;
mod flush_compact_table;
mod metadata_snaphost;
mod migrate_region;
mod remove_region_follower;
@@ -23,6 +24,7 @@ use std::sync::Arc;
use add_region_follower::AddRegionFollowerFunction;
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
use metadata_snaphost::{DumpMetadataFunction, RestoreMetadataFunction};
use migrate_region::MigrateRegionFunction;
use remove_region_follower::RemoveRegionFollowerFunction;
@@ -43,5 +45,7 @@ impl AdminFunction {
registry.register_async(Arc::new(FlushTableFunction));
registry.register_async(Arc::new(CompactTableFunction));
registry.register_async(Arc::new(FlushFlowFunction));
registry.register_async(Arc::new(DumpMetadataFunction));
registry.register_async(Arc::new(RestoreMetadataFunction));
}
}

View File

@@ -0,0 +1,56 @@
use common_macro::admin_fn;
use common_query::error::{MissingMetadataSnapshotHandlerSnafu, Result};
use common_query::prelude::{Signature, Volatility};
use datatypes::prelude::*;
use session::context::QueryContextRef;
use crate::handlers::MetadataSnapshotHandlerRef;
const METADATA_DIR: &str = "/snaphost/";
const METADATA_FILE_NAME: &str = "dump_metadata";
const METADATA_FILE_EXTENSION: &str = "metadata.fb";
#[admin_fn(
name = DumpMetadataFunction,
display_name = dump_metadata,
sig_fn = dump_signature,
ret = string
)]
pub(crate) async fn dump_metadata(
metadata_snapshot_handler: &MetadataSnapshotHandlerRef,
_query_ctx: &QueryContextRef,
_params: &[ValueRef<'_>],
) -> Result<Value> {
let filename = metadata_snapshot_handler
.dump(METADATA_DIR, METADATA_FILE_NAME)
.await?;
Ok(Value::from(filename))
}
fn dump_signature() -> Signature {
Signature::uniform(0, vec![], Volatility::Immutable)
}
#[admin_fn(
name = RestoreMetadataFunction,
display_name = restore_metadata,
sig_fn = restore_signature,
ret = uint64,
)]
pub(crate) async fn restore_metadata(
metadata_snapshot_handler: &MetadataSnapshotHandlerRef,
_query_ctx: &QueryContextRef,
_params: &[ValueRef<'_>],
) -> Result<Value> {
let num_keyvalues = metadata_snapshot_handler
.restore(
METADATA_DIR,
&format!("{METADATA_FILE_NAME}.{METADATA_FILE_EXTENSION}"),
)
.await?;
Ok(Value::from(num_keyvalues))
}
fn restore_signature() -> Signature {
Signature::uniform(0, vec![], Volatility::Immutable)
}

View File

@@ -89,8 +89,18 @@ pub trait FlowServiceHandler: Send + Sync {
) -> Result<api::v1::flow::FlowResponse>;
}
/// This metadata snapshot handler is only use for dump and restore metadata for now.
#[async_trait]
pub trait MetadataSnapshotHandler: Send + Sync {
async fn dump(&self, path: &str, filename: &str) -> Result<String>;
async fn restore(&self, path: &str, filename: &str) -> Result<u64>;
}
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
pub type ProcedureServiceHandlerRef = Arc<dyn ProcedureServiceHandler>;
pub type FlowServiceHandlerRef = Arc<dyn FlowServiceHandler>;
pub type MetadataSnapshotHandlerRef = Arc<dyn MetadataSnapshotHandler>;

View File

@@ -13,8 +13,10 @@
// limitations under the License.
use std::sync::Arc;
mod greatest;
mod to_unixtime;
use greatest::GreatestFunction;
use to_unixtime::ToUnixtimeFunction;
use crate::function_registry::FunctionRegistry;
@@ -24,5 +26,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ToUnixtimeFunction));
registry.register(Arc::new(GreatestFunction));
}
}

View File

@@ -0,0 +1,328 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self};
use common_query::error::{
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datafusion::arrow::compute::kernels::cmp::gt;
use datatypes::arrow::array::AsArray;
use datatypes::arrow::compute::cast;
use datatypes::arrow::compute::kernels::zip;
use datatypes::arrow::datatypes::{
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct GreatestFunction;
const NAME: &str = "greatest";
macro_rules! gt_time_types {
($ty: ident, $columns:expr) => {{
let column1 = $columns[0].to_arrow_array();
let column2 = $columns[1].to_arrow_array();
let column1 = column1.as_primitive::<$ty>();
let column2 = column2.as_primitive::<$ty>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
}};
}
impl Function for GreatestFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
ensure!(
input_types.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
input_types.len()
)
}
);
match &input_types[0] {
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: input_types,
}
.fail(),
}
}
fn signature(&self) -> Signature {
Signature::uniform(
2,
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::date_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_second_datatype(),
],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
columns.len()
),
}
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
let column1 = cast(
&columns[0].to_arrow_array(),
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
)
.context(ArrowComputeSnafu)?;
let column1 = column1.as_primitive::<TimestampMillisecondType>();
let column2 = cast(
&columns[1].to_arrow_array(),
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
)
.context(ArrowComputeSnafu)?;
let column2 = column2.as_primitive::<TimestampMillisecondType>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
ConcreteDataType::Timestamp(ts_type) => match ts_type {
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
TimestampType::Millisecond(_) => {
gt_time_types!(TimestampMillisecondType, columns)
}
TimestampType::Microsecond(_) => {
gt_time_types!(TimestampMicrosecondType, columns)
}
TimestampType::Nanosecond(_) => {
gt_time_types!(TimestampNanosecondType, columns)
}
},
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for GreatestFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GREATEST")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_time::timestamp::TimeUnit;
use common_time::{Date, Timestamp};
use datatypes::types::{
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
TimestampSecondType,
};
use datatypes::value::Value;
use datatypes::vectors::{
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, Vector,
};
use paste::paste;
use super::*;
#[test]
fn test_greatest_takes_string_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype()
])
.unwrap(),
ConcreteDataType::timestamp_millisecond_datatype()
);
let columns = vec![
Arc::new(StringVector::from(vec![
"1970-01-01".to_string(),
"2012-12-23".to_string(),
])) as _,
Arc::new(StringVector::from(vec![
"2001-02-01".to_string(),
"1999-01-01".to_string(),
])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result
.as_any()
.downcast_ref::<TimestampMillisecondVector>()
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
);
}
#[test]
fn test_greatest_takes_date_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::date_datatype(),
ConcreteDataType::date_datatype()
])
.unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Date(Date::from_str_utc("1970-01-01").unwrap())
);
assert_eq!(
result.get(1),
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
);
}
#[test]
fn test_greatest_takes_datetime_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype()
])
.unwrap(),
ConcreteDataType::timestamp_millisecond_datatype()
);
let columns = vec![
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result
.as_any()
.downcast_ref::<TimestampMillisecondVector>()
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
);
}
macro_rules! test_timestamp {
($type: expr,$unit: ident) => {
paste! {
#[test]
fn [<test_greatest_takes_ $unit:lower _vector>]() {
let function = GreatestFunction;
assert_eq!(
function.return_type(&[$type, $type]).unwrap(),
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
);
let columns = vec![
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
];
let result = function.eval(&FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
);
}
}
}
}
test_timestamp!(
ConcreteDataType::timestamp_nanosecond_datatype(),
Nanosecond
);
test_timestamp!(
ConcreteDataType::timestamp_microsecond_datatype(),
Microsecond
);
test_timestamp!(
ConcreteDataType::timestamp_millisecond_datatype(),
Millisecond
);
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
}

View File

@@ -115,13 +115,6 @@ impl Function for UddSketchCalcFunction {
}
};
// Check if the sketch is empty, if so, return null
// This is important to avoid panics when calling estimate_quantile on an empty sketch
// In practice, this will happen if input is all null
if sketch.bucket_iter().count() == 0 {
builder.push_null();
continue;
}
// Compute the estimated quantile from the sketch
let result = sketch.estimate_quantile(perc);
builder.push(Some(result));

View File

@@ -12,7 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::handlers::{FlowServiceHandlerRef, ProcedureServiceHandlerRef, TableMutationHandlerRef};
use crate::handlers::{
FlowServiceHandlerRef, MetadataSnapshotHandlerRef, ProcedureServiceHandlerRef,
TableMutationHandlerRef,
};
/// Shared state for SQL functions.
/// The handlers in state may be `None` in cli command-line or test cases.
@@ -24,6 +27,8 @@ pub struct FunctionState {
pub procedure_service_handler: Option<ProcedureServiceHandlerRef>,
// The flownode handler
pub flow_service_handler: Option<FlowServiceHandlerRef>,
// The metadata snapshot handler
pub metadata_snapshot_handler: Option<MetadataSnapshotHandlerRef>,
}
impl FunctionState {
@@ -48,10 +53,14 @@ impl FunctionState {
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
};
use crate::handlers::{FlowServiceHandler, ProcedureServiceHandler, TableMutationHandler};
use crate::handlers::{
FlowServiceHandler, MetadataSnapshotHandler, ProcedureServiceHandler,
TableMutationHandler,
};
struct MockProcedureServiceHandler;
struct MockTableMutationHandler;
struct MockFlowServiceHandler;
struct MockMetadataServiceHandler;
const ROWS: usize = 42;
#[async_trait]
@@ -150,10 +159,22 @@ impl FunctionState {
}
}
#[async_trait]
impl MetadataSnapshotHandler for MockMetadataServiceHandler {
async fn dump(&self, _path: &str, _filename: &str) -> Result<String> {
Ok("test_filename".to_string())
}
async fn restore(&self, _path: &str, _filename: &str) -> Result<u64> {
Ok(100)
}
}
Self {
table_mutation_handler: Some(Arc::new(MockTableMutationHandler)),
procedure_service_handler: Some(Arc::new(MockProcedureServiceHandler)),
flow_service_handler: Some(Arc::new(MockFlowServiceHandler)),
metadata_snapshot_handler: Some(Arc::new(MockMetadataServiceHandler)),
}
}
}

View File

@@ -18,5 +18,4 @@ pub mod flight;
pub mod precision;
pub mod select;
pub use arrow_flight::FlightData;
pub use error::Error;

View File

@@ -179,6 +179,10 @@ fn build_struct(
Ident::new("flow_service_handler", handler_type.span()),
Ident::new("MissingFlowServiceHandlerSnafu", handler_type.span()),
),
"MetadataSnapshotHandlerRef" => (
Ident::new("metadata_snapshot_handler", handler_type.span()),
Ident::new("MissingMetadataSnapshotHandlerSnafu", handler_type.span()),
),
handler => ok!(error!(
handler_type.span(),
format!("Unknown handler type: {handler}")

View File

@@ -41,6 +41,7 @@ deadpool = { workspace = true, optional = true }
deadpool-postgres = { workspace = true, optional = true }
derive_builder.workspace = true
etcd-client.workspace = true
flexbuffers = "25.2"
futures.workspace = true
futures-util.workspace = true
hex.workspace = true
@@ -48,6 +49,7 @@ humantime-serde.workspace = true
itertools.workspace = true
lazy_static.workspace = true
moka.workspace = true
object-store.workspace = true
prometheus.workspace = true
prost.workspace = true
rand.workspace = true
@@ -70,6 +72,7 @@ typetag.workspace = true
[dev-dependencies]
chrono.workspace = true
common-procedure = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
common-wal = { workspace = true, features = ["testing"] }
datatypes.workspace = true
hyper = { version = "0.14", features = ["full"] }

View File

@@ -24,39 +24,21 @@ use crate::cache::{CacheContainer, Initializer};
use crate::error::Result;
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::key::flow::{TableFlowManager, TableFlowManagerRef};
use crate::key::{FlowId, FlowPartitionId};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
use crate::FlownodeId;
/// Flow id&flow partition key
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FlowIdent {
pub flow_id: FlowId,
pub partition_id: FlowPartitionId,
}
impl FlowIdent {
pub fn new(flow_id: FlowId, partition_id: FlowPartitionId) -> Self {
Self {
flow_id,
partition_id,
}
}
}
/// cache for TableFlowManager, the table_id part is in the outer cache
/// include flownode_id, flow_id, partition_id mapping to Peer
type FlownodeFlowSet = Arc<HashMap<FlowIdent, Peer>>;
type FlownodeSet = Arc<HashMap<FlownodeId, Peer>>;
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
/// [TableFlownodeSetCache] caches the [TableId] to [FlownodeSet] mapping.
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeFlowSet, CacheIdent>;
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeSet, CacheIdent>;
/// Constructs a [TableFlownodeSetCache].
pub fn new_table_flownode_set_cache(
name: String,
cache: Cache<TableId, FlownodeFlowSet>,
cache: Cache<TableId, FlownodeSet>,
kv_backend: KvBackendRef,
) -> TableFlownodeSetCache {
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
@@ -65,7 +47,7 @@ pub fn new_table_flownode_set_cache(
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
}
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeFlowSet> {
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
Arc::new(move |&table_id| {
let table_flow_manager = table_flow_manager.clone();
Box::pin(async move {
@@ -75,12 +57,7 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
.map(|flows| {
flows
.into_iter()
.map(|(key, value)| {
(
FlowIdent::new(key.flow_id(), key.partition_id()),
value.peer,
)
})
.map(|(key, value)| (key.flownode_id(), value.peer))
.collect::<HashMap<_, _>>()
})
// We must cache the `HashSet` even if it's empty,
@@ -94,33 +71,26 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
}
async fn handle_create_flow(
cache: &Cache<TableId, FlownodeFlowSet>,
cache: &Cache<TableId, FlownodeSet>,
CreateFlow {
flow_id,
source_table_ids,
partition_to_peer_mapping: flow_part2nodes,
flownodes: flownode_peers,
}: &CreateFlow,
) {
for table_id in source_table_ids {
let entry = cache.entry(*table_id);
entry
.and_compute_with(
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
Some(entry) => {
let mut map = entry.into_value().as_ref().clone();
map.extend(
flow_part2nodes.iter().map(|(part, peer)| {
(FlowIdent::new(*flow_id, *part), peer.clone())
}),
);
map.extend(flownode_peers.iter().map(|peer| (peer.id, peer.clone())));
Op::Put(Arc::new(map))
}
None => {
Op::Put(Arc::new(HashMap::from_iter(flow_part2nodes.iter().map(
|(part, peer)| (FlowIdent::new(*flow_id, *part), peer.clone()),
))))
}
None => Op::Put(Arc::new(HashMap::from_iter(
flownode_peers.iter().map(|peer| (peer.id, peer.clone())),
))),
},
)
.await;
@@ -128,23 +98,21 @@ async fn handle_create_flow(
}
async fn handle_drop_flow(
cache: &Cache<TableId, FlownodeFlowSet>,
cache: &Cache<TableId, FlownodeSet>,
DropFlow {
flow_id,
source_table_ids,
flow_part2node_id,
flownode_ids,
}: &DropFlow,
) {
for table_id in source_table_ids {
let entry = cache.entry(*table_id);
entry
.and_compute_with(
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
Some(entry) => {
let mut set = entry.into_value().as_ref().clone();
for (part, _node) in flow_part2node_id {
let key = FlowIdent::new(*flow_id, *part);
set.remove(&key);
for flownode_id in flownode_ids {
set.remove(flownode_id);
}
Op::Put(Arc::new(set))
@@ -160,7 +128,7 @@ async fn handle_drop_flow(
}
fn invalidator<'a>(
cache: &'a Cache<TableId, FlownodeFlowSet>,
cache: &'a Cache<TableId, FlownodeSet>,
ident: &'a CacheIdent,
) -> BoxFuture<'a, Result<()>> {
Box::pin(async move {
@@ -186,7 +154,7 @@ mod tests {
use moka::future::CacheBuilder;
use table::table_name::TableName;
use crate::cache::flow::table_flownode::{new_table_flownode_set_cache, FlowIdent};
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
@@ -219,7 +187,6 @@ mod tests {
},
flownode_ids: BTreeMap::from([(0, 1), (1, 2), (2, 3)]),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
query_context: None,
flow_name: "my_flow".to_string(),
raw_sql: "sql".to_string(),
expire_after: Some(300),
@@ -246,16 +213,12 @@ mod tests {
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
)
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
)
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
);
let result = cache.get(1026).await.unwrap().unwrap();
assert_eq!(result.len(), 0);
@@ -267,9 +230,8 @@ mod tests {
let cache = CacheBuilder::new(128).build();
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
flownodes: (1..=5).map(Peer::empty).collect(),
})];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
@@ -278,54 +240,6 @@ mod tests {
assert_eq!(set.len(), 5);
}
#[tokio::test]
async fn test_replace_flow() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let cache = CacheBuilder::new(128).build();
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
})];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(set.len(), 5);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(set.len(), 5);
let drop_then_create_flow = vec![
CacheIdent::DropFlow(DropFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
}),
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1026, 1027],
partition_to_peer_mapping: (11..=15)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
}),
CacheIdent::FlowId(2001),
];
cache.invalidate(&drop_then_create_flow).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert!(set.is_empty());
let expected = HashMap::from_iter(
(11..=15).map(|i| (FlowIdent::new(2001, i as u32), Peer::empty(i + 1))),
);
let set = cache.get(1026).await.unwrap().unwrap();
assert_eq!(set.as_ref().clone(), expected);
let set = cache.get(1027).await.unwrap().unwrap();
assert_eq!(set.as_ref().clone(), expected);
}
#[tokio::test]
async fn test_drop_flow() {
let mem_kv = Arc::new(MemoryKvBackend::default());
@@ -333,57 +247,34 @@ mod tests {
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
let ident = vec![
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
flownodes: (1..=5).map(Peer::empty).collect(),
}),
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2002,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (11..=12)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
}),
// same flownode that hold multiple flows
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2003,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
flownodes: (11..=12).map(Peer::empty).collect(),
}),
];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(set.len(), 12);
assert_eq!(set.len(), 7);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(set.len(), 12);
assert_eq!(set.len(), 7);
let ident = vec![CacheIdent::DropFlow(DropFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
flownode_ids: vec![1, 2, 3, 4, 5],
})];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(11..=12)
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
)
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(11..=12)
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
)
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
);
}
}

View File

@@ -16,12 +16,9 @@ use std::sync::Arc;
use crate::error::Result;
use crate::flow_name::FlowName;
use crate::instruction::{CacheIdent, DropFlow};
use crate::instruction::CacheIdent;
use crate::key::flow::flow_info::FlowInfoKey;
use crate::key::flow::flow_name::FlowNameKey;
use crate::key::flow::flow_route::FlowRouteKey;
use crate::key::flow::flownode_flow::FlownodeFlowKey;
use crate::key::flow::table_flow::TableFlowKey;
use crate::key::schema_name::SchemaNameKey;
use crate::key::table_info::TableInfoKey;
use crate::key::table_name::TableNameKey;
@@ -92,40 +89,9 @@ where
let key: SchemaNameKey = schema_name.into();
self.invalidate_key(&key.to_bytes()).await;
}
CacheIdent::CreateFlow(_) => {
CacheIdent::CreateFlow(_) | CacheIdent::DropFlow(_) => {
// Do nothing
}
CacheIdent::DropFlow(DropFlow {
flow_id,
source_table_ids,
flow_part2node_id,
}) => {
// invalidate flow route/flownode flow/table flow
let mut keys = Vec::with_capacity(
source_table_ids.len() * flow_part2node_id.len()
+ flow_part2node_id.len() * 2,
);
for table_id in source_table_ids {
for (partition_id, node_id) in flow_part2node_id {
let key =
TableFlowKey::new(*table_id, *node_id, *flow_id, *partition_id)
.to_bytes();
keys.push(key);
}
}
for (partition_id, node_id) in flow_part2node_id {
let key =
FlownodeFlowKey::new(*node_id, *flow_id, *partition_id).to_bytes();
keys.push(key);
let key = FlowRouteKey::new(*flow_id, *partition_id).to_bytes();
keys.push(key);
}
for key in keys {
self.invalidate_key(&key).await;
}
}
CacheIdent::FlowName(FlowName {
catalog_name,
flow_name,

View File

@@ -38,8 +38,8 @@ use table::metadata::TableId;
use crate::cache_invalidator::Context;
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
use crate::ddl::DdlContext;
use crate::error::{self, Result, UnexpectedSnafu};
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::error::{self, Result};
use crate::instruction::{CacheIdent, CreateFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
use crate::key::table_name::TableNameKey;
@@ -70,7 +70,6 @@ impl CreateFlowProcedure {
query_context,
state: CreateFlowState::Prepare,
prev_flow_info_value: None,
did_replace: false,
flow_type: None,
},
}
@@ -172,7 +171,7 @@ impl CreateFlowProcedure {
}
self.data.state = CreateFlowState::CreateFlows;
// determine flow type
self.data.flow_type = Some(get_flow_type_from_options(&self.data.task)?);
self.data.flow_type = Some(determine_flow_type(&self.data.task));
Ok(Status::executing(true))
}
@@ -197,8 +196,8 @@ impl CreateFlowProcedure {
});
}
info!(
"Creating flow({:?}, type={:?}) on flownodes with peers={:?}",
self.data.flow_id, self.data.flow_type, self.data.peers
"Creating flow({:?}) on flownodes with peers={:?}",
self.data.flow_id, self.data.peers
);
join_all(create_flow)
.await
@@ -225,7 +224,6 @@ impl CreateFlowProcedure {
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
.await?;
info!("Replaced flow metadata for flow {flow_id}");
self.data.did_replace = true;
} else {
self.context
.flow_metadata_manager
@@ -242,43 +240,22 @@ impl CreateFlowProcedure {
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
// Safety: The flow id must be allocated.
let flow_id = self.data.flow_id.unwrap();
let did_replace = self.data.did_replace;
let ctx = Context {
subject: Some("Invalidate flow cache by creating flow".to_string()),
};
let mut caches = vec![];
// if did replaced, invalidate the flow cache with drop the old flow
if did_replace {
let old_flow_info = self.data.prev_flow_info_value.as_ref().unwrap();
// only drop flow is needed, since flow name haven't changed, and flow id already invalidated below
caches.extend([CacheIdent::DropFlow(DropFlow {
flow_id,
source_table_ids: old_flow_info.source_table_ids.clone(),
flow_part2node_id: old_flow_info.flownode_ids().clone().into_iter().collect(),
})]);
}
let (_flow_info, flow_routes) = (&self.data).into();
let flow_part2peers = flow_routes
.into_iter()
.map(|(part_id, route)| (part_id, route.peer))
.collect();
caches.extend([
CacheIdent::CreateFlow(CreateFlow {
flow_id,
source_table_ids: self.data.source_table_ids.clone(),
partition_to_peer_mapping: flow_part2peers,
}),
CacheIdent::FlowId(flow_id),
]);
self.context
.cache_invalidator
.invalidate(&ctx, &caches)
.invalidate(
&ctx,
&[
CacheIdent::CreateFlow(CreateFlow {
source_table_ids: self.data.source_table_ids.clone(),
flownodes: self.data.peers.clone(),
}),
CacheIdent::FlowId(flow_id),
],
)
.await?;
Ok(Status::done_with_output(flow_id))
@@ -329,20 +306,8 @@ impl Procedure for CreateFlowProcedure {
}
}
pub fn get_flow_type_from_options(flow_task: &CreateFlowTask) -> Result<FlowType> {
let flow_type = flow_task
.flow_options
.get(FlowType::FLOW_TYPE_KEY)
.map(|s| s.as_str());
match flow_type {
Some(FlowType::BATCHING) => Ok(FlowType::Batching),
Some(FlowType::STREAMING) => Ok(FlowType::Streaming),
Some(unknown) => UnexpectedSnafu {
err_msg: format!("Unknown flow type: {}", unknown),
}
.fail(),
None => Ok(FlowType::Batching),
}
pub fn determine_flow_type(_flow_task: &CreateFlowTask) -> FlowType {
FlowType::Batching
}
/// The state of [CreateFlowProcedure].
@@ -400,10 +365,6 @@ pub struct CreateFlowData {
/// For verify if prev value is consistent when need to update flow metadata.
/// only set when `or_replace` is true.
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
/// Only set to true when replace actually happened.
/// This is used to determine whether to invalidate the cache.
#[serde(default)]
pub(crate) did_replace: bool,
pub(crate) flow_type: Option<FlowType>,
}
@@ -476,7 +437,6 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
sink_table_name,
flownode_ids,
catalog_name,
query_context: Some(value.query_context.clone()),
flow_name,
raw_sql: sql,
expire_after,

View File

@@ -13,7 +13,6 @@
// limitations under the License.
mod metadata;
use api::v1::flow::{flow_request, DropRequest, FlowRequest};
use async_trait::async_trait;
use common_catalog::format_full_flow_name;
@@ -154,12 +153,6 @@ impl DropFlowProcedure {
};
let flow_info_value = self.data.flow_info_value.as_ref().unwrap();
let flow_part2nodes = flow_info_value
.flownode_ids()
.clone()
.into_iter()
.collect::<Vec<_>>();
self.context
.cache_invalidator
.invalidate(
@@ -171,9 +164,8 @@ impl DropFlowProcedure {
flow_name: flow_info_value.flow_name.to_string(),
}),
CacheIdent::DropFlow(DropFlow {
flow_id,
source_table_ids: flow_info_value.source_table_ids.clone(),
flow_part2node_id: flow_part2nodes,
flownode_ids: flow_info_value.flownode_ids.values().cloned().collect(),
}),
],
)

View File

@@ -46,7 +46,7 @@ pub(crate) fn test_create_flow_task(
create_if_not_exists,
expire_after: Some(300),
comment: "".to_string(),
sql: "select 1".to_string(),
sql: "raw_sql".to_string(),
flow_options: Default::default(),
}
}

View File

@@ -401,13 +401,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid flow request body: {:?}", body))]
InvalidFlowRequestBody {
body: Box<Option<api::v1::flow::flow_request::Body>>,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to get kv cache, err: {}", err_msg))]
GetKvCache { err_msg: String },
@@ -514,25 +507,11 @@ pub enum Error {
},
#[snafu(display(
"Failed to get a Kafka partition client, topic: {}, partition: {}",
"Failed to build a Kafka partition client, topic: {}, partition: {}",
topic,
partition
))]
KafkaPartitionClient {
topic: String,
partition: i32,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: rskafka::client::error::Error,
},
#[snafu(display(
"Failed to get offset from Kafka, topic: {}, partition: {}",
topic,
partition
))]
KafkaGetOffset {
BuildKafkaPartitionClient {
topic: String,
partition: i32,
#[snafu(implicit)]
@@ -805,12 +784,74 @@ pub enum Error {
source: common_procedure::error::Error,
},
#[snafu(display("Failed to parse timezone"))]
InvalidTimeZone {
#[snafu(display("Invalid file path: {}", file_path))]
InvalidFilePath {
#[snafu(implicit)]
location: Location,
file_path: String,
},
#[snafu(display("Failed to serialize flexbuffers"))]
SerializeFlexbuffers {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: common_time::error::Error,
error: flexbuffers::SerializationError,
},
#[snafu(display("Failed to deserialize flexbuffers"))]
DeserializeFlexbuffers {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: flexbuffers::DeserializationError,
},
#[snafu(display("Failed to read flexbuffers"))]
ReadFlexbuffers {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: flexbuffers::ReaderError,
},
#[snafu(display("Invalid file name: {}", reason))]
InvalidFileName {
#[snafu(implicit)]
location: Location,
reason: String,
},
#[snafu(display("Invalid file extension: {}", reason))]
InvalidFileExtension {
#[snafu(implicit)]
location: Location,
reason: String,
},
#[snafu(display("Invalid file context: {}", reason))]
InvalidFileContext {
#[snafu(implicit)]
location: Location,
reason: String,
},
#[snafu(display("Failed to write object, file path: {}", file_path))]
WriteObject {
#[snafu(implicit)]
location: Location,
file_path: String,
#[snafu(source)]
error: object_store::Error,
},
#[snafu(display("Failed to read object, file path: {}", file_path))]
ReadObject {
#[snafu(implicit)]
location: Location,
file_path: String,
#[snafu(source)]
error: object_store::Error,
},
}
@@ -830,6 +871,8 @@ impl ErrorExt for Error {
| SerializeToJson { .. }
| DeserializeFromJson { .. } => StatusCode::Internal,
WriteObject { .. } | ReadObject { .. } => StatusCode::StorageUnavailable,
NoLeader { .. } => StatusCode::TableUnavailable,
ValueNotExist { .. } | ProcedurePoisonConflict { .. } => StatusCode::Unexpected,
@@ -857,7 +900,7 @@ impl ErrorExt for Error {
| EncodeWalOptions { .. }
| BuildKafkaClient { .. }
| BuildKafkaCtrlClient { .. }
| KafkaPartitionClient { .. }
| BuildKafkaPartitionClient { .. }
| ResolveKafkaEndpoint { .. }
| ProduceRecord { .. }
| CreateKafkaWalTopic { .. }
@@ -867,7 +910,10 @@ impl ErrorExt for Error {
| FromUtf8 { .. }
| MetadataCorruption { .. }
| ParseWalOptions { .. }
| KafkaGetOffset { .. } => StatusCode::Unexpected,
| ReadFlexbuffers { .. }
| SerializeFlexbuffers { .. }
| DeserializeFlexbuffers { .. }
| InvalidFileContext { .. } => StatusCode::Unexpected,
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
@@ -884,8 +930,9 @@ impl ErrorExt for Error {
| InvalidSetDatabaseOption { .. }
| InvalidUnsetDatabaseOption { .. }
| InvalidTopicNamePrefix { .. }
| InvalidTimeZone { .. } => StatusCode::InvalidArguments,
InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
| InvalidFileExtension { .. }
| InvalidFileName { .. }
| InvalidFilePath { .. } => StatusCode::InvalidArguments,
FlowNotFound { .. } => StatusCode::FlowNotFound,
FlowRouteNotFound { .. } => StatusCode::Unexpected,

View File

@@ -24,7 +24,7 @@ use table::table_name::TableName;
use crate::flow_name::FlowName;
use crate::key::schema_name::SchemaName;
use crate::key::{FlowId, FlowPartitionId};
use crate::key::FlowId;
use crate::peer::Peer;
use crate::{DatanodeId, FlownodeId};
@@ -184,19 +184,14 @@ pub enum CacheIdent {
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CreateFlow {
/// The unique identifier for the flow.
pub flow_id: FlowId,
pub source_table_ids: Vec<TableId>,
/// Mapping of flow partition to peer information
pub partition_to_peer_mapping: Vec<(FlowPartitionId, Peer)>,
pub flownodes: Vec<Peer>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct DropFlow {
pub flow_id: FlowId,
pub source_table_ids: Vec<TableId>,
/// Mapping of flow partition to flownode id
pub flow_part2node_id: Vec<(FlowPartitionId, FlownodeId)>,
pub flownode_ids: Vec<FlownodeId>,
}
/// Flushes a batch of regions.

View File

@@ -256,11 +256,6 @@ impl DatanodeTableManager {
})?
.and_then(|r| DatanodeTableValue::try_from_raw_value(&r.value))?
.region_info;
// If the region options are the same, we don't need to update it.
if region_info.region_options == new_region_options {
return Ok(Txn::new());
}
// substitute region options only.
region_info.region_options = new_region_options;

View File

@@ -45,7 +45,7 @@ use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchDeleteRequest;
/// The key of `__flow/` scope.
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, PartialEq)]
pub struct FlowScoped<T> {
inner: T,
}
@@ -246,32 +246,27 @@ impl FlowMetadataManager {
new_flow_info: &FlowInfoValue,
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
) -> Result<()> {
let (update_flow_flow_name_txn, on_create_flow_flow_name_failure) =
let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) =
self.flow_name_manager.build_update_txn(
&new_flow_info.catalog_name,
&new_flow_info.flow_name,
flow_id,
)?;
let (update_flow_txn, on_create_flow_failure) =
let (create_flow_txn, on_create_flow_failure) =
self.flow_info_manager
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
let update_flow_routes_txn = self.flow_route_manager.build_update_txn(
flow_id,
current_flow_info,
flow_routes.clone(),
)?;
let create_flow_routes_txn = self
.flow_route_manager
.build_create_txn(flow_id, flow_routes.clone())?;
let update_flownode_flow_txn = self.flownode_flow_manager.build_update_txn(
flow_id,
current_flow_info,
new_flow_info.flownode_ids().clone(),
);
let create_flownode_flow_txn = self
.flownode_flow_manager
.build_create_txn(flow_id, new_flow_info.flownode_ids().clone());
let update_table_flow_txn = self.table_flow_manager.build_update_txn(
let create_table_flow_txn = self.table_flow_manager.build_create_txn(
flow_id,
current_flow_info,
flow_routes
.into_iter()
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
@@ -280,11 +275,11 @@ impl FlowMetadataManager {
)?;
let txn = Txn::merge_all(vec![
update_flow_flow_name_txn,
update_flow_txn,
update_flow_routes_txn,
update_flownode_flow_txn,
update_table_flow_txn,
create_flow_flow_name_txn,
create_flow_txn,
create_flow_routes_txn,
create_flownode_flow_txn,
create_table_flow_txn,
]);
info!(
"Creating flow {}.{}({}), with {} txn operations",
@@ -457,7 +452,6 @@ mod tests {
};
FlowInfoValue {
catalog_name: catalog_name.to_string(),
query_context: None,
flow_name: flow_name.to_string(),
source_table_ids,
sink_table_name,
@@ -631,7 +625,6 @@ mod tests {
};
let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
query_context: None,
flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: another_sink_table_name,
@@ -788,141 +781,6 @@ mod tests {
}
}
#[tokio::test]
async fn test_update_flow_metadata_diff_flownode() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
let flow_id = 10;
let flow_value = test_flow_info_value(
"flow",
[(0u32, 1u64), (1u32, 2u64)].into(),
vec![1024, 1025, 1026],
);
let flow_routes = vec![
(
0u32,
FlowRouteValue {
peer: Peer::empty(1),
},
),
(
1,
FlowRouteValue {
peer: Peer::empty(2),
},
),
];
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
.await
.unwrap();
let new_flow_value = {
let mut tmp = flow_value.clone();
tmp.raw_sql = "new".to_string();
// move to different flownodes
tmp.flownode_ids = [(0, 3u64), (1, 4u64)].into();
tmp
};
let new_flow_routes = vec![
(
0u32,
FlowRouteValue {
peer: Peer::empty(3),
},
),
(
1,
FlowRouteValue {
peer: Peer::empty(4),
},
),
];
// Update flow instead
flow_metadata_manager
.update_flow_metadata(
flow_id,
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
&new_flow_value,
new_flow_routes.clone(),
)
.await
.unwrap();
let got = flow_metadata_manager
.flow_info_manager()
.get(flow_id)
.await
.unwrap()
.unwrap();
let routes = flow_metadata_manager
.flow_route_manager()
.routes(flow_id)
.await
.unwrap();
assert_eq!(
routes,
vec![
(
FlowRouteKey::new(flow_id, 0),
FlowRouteValue {
peer: Peer::empty(3),
},
),
(
FlowRouteKey::new(flow_id, 1),
FlowRouteValue {
peer: Peer::empty(4),
},
),
]
);
assert_eq!(got, new_flow_value);
let flows = flow_metadata_manager
.flownode_flow_manager()
.flows(1)
.try_collect::<Vec<_>>()
.await
.unwrap();
// should moved to different flownode
assert_eq!(flows, vec![]);
let flows = flow_metadata_manager
.flownode_flow_manager()
.flows(3)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(flows, vec![(flow_id, 0)]);
for table_id in [1024, 1025, 1026] {
let nodes = flow_metadata_manager
.table_flow_manager()
.flows(table_id)
.await
.unwrap();
assert_eq!(
nodes,
vec![
(
TableFlowKey::new(table_id, 3, flow_id, 0),
TableFlowValue {
peer: Peer::empty(3)
}
),
(
TableFlowKey::new(table_id, 4, flow_id, 1),
TableFlowValue {
peer: Peer::empty(4)
}
)
]
);
}
}
#[tokio::test]
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
let mem_kv = Arc::new(MemoryKvBackend::default());
@@ -1006,7 +864,6 @@ mod tests {
};
let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
query_context: None,
flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: another_sink_table_name,

View File

@@ -121,13 +121,6 @@ pub struct FlowInfoValue {
pub(crate) flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
/// The catalog name.
pub(crate) catalog_name: String,
/// The query context used when create flow.
/// Although flow doesn't belong to any schema, this query_context is needed to remember
/// the query context when `create_flow` is executed
/// for recovering flow using the same sql&query_context after db restart.
/// if none, should use default query context
#[serde(default)]
pub(crate) query_context: Option<crate::rpc::ddl::QueryContext>,
/// The flow name.
pub(crate) flow_name: String,
/// The raw sql.
@@ -153,15 +146,6 @@ impl FlowInfoValue {
&self.flownode_ids
}
/// Insert a new flownode id for a partition.
pub fn insert_flownode_id(
&mut self,
partition: FlowPartitionId,
node: FlownodeId,
) -> Option<FlownodeId> {
self.flownode_ids.insert(partition, node)
}
/// Returns the `source_table`.
pub fn source_table_ids(&self) -> &[TableId] {
&self.source_table_ids
@@ -171,10 +155,6 @@ impl FlowInfoValue {
&self.catalog_name
}
pub fn query_context(&self) -> &Option<crate::rpc::ddl::QueryContext> {
&self.query_context
}
pub fn flow_name(&self) -> &String {
&self.flow_name
}
@@ -281,11 +261,10 @@ impl FlowInfoManager {
let raw_value = new_flow_value.try_as_raw_value()?;
let prev_value = current_flow_value.get_raw_bytes();
let txn = Txn::new()
.when(vec![Compare::new(
key.clone(),
CompareOp::Equal,
Some(prev_value),
)])
.when(vec![
Compare::new(key.clone(), CompareOp::NotEqual, None),
Compare::new(key.clone(), CompareOp::Equal, Some(prev_value)),
])
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
.or_else(vec![TxnOp::Get(key.clone())]);

View File

@@ -19,12 +19,9 @@ use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::{flownode_addr_helper, FlowScoped};
use crate::key::node_address::NodeAddressKey;
use crate::key::{
BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue,
};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
@@ -42,7 +39,7 @@ lazy_static! {
/// The key stores the route info of the flow.
///
/// The layout: `__flow/route/{flow_id}/{partition_id}`.
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, PartialEq)]
pub struct FlowRouteKey(FlowScoped<FlowRouteKeyInner>);
impl FlowRouteKey {
@@ -145,12 +142,6 @@ pub struct FlowRouteValue {
pub(crate) peer: Peer,
}
impl From<Peer> for FlowRouteValue {
fn from(peer: Peer) -> Self {
Self { peer }
}
}
impl FlowRouteValue {
/// Returns the `peer`.
pub fn peer(&self) -> &Peer {
@@ -213,33 +204,6 @@ impl FlowRouteManager {
Ok(Txn::new().and_then(txns))
}
/// Builds a update flow routes transaction.
///
/// Puts `__flow/route/{flow_id}/{partition_id}` keys.
/// Also removes `__flow/route/{flow_id}/{old_partition_id}` keys.
pub(crate) fn build_update_txn<I: IntoIterator<Item = (FlowPartitionId, FlowRouteValue)>>(
&self,
flow_id: FlowId,
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
flow_routes: I,
) -> Result<Txn> {
let del_txns = current_flow_info
.flownode_ids()
.iter()
.map(|(partition_id, _)| {
let key = FlowRouteKey::new(flow_id, *partition_id).to_bytes();
Ok(TxnOp::Delete(key))
});
let put_txns = flow_routes.into_iter().map(|(partition_id, route)| {
let key = FlowRouteKey::new(flow_id, partition_id).to_bytes();
Ok(TxnOp::Put(key, route.try_as_raw_value()?))
});
let txns = del_txns.chain(put_txns).collect::<Result<Vec<_>>>()?;
Ok(Txn::new().and_then(txns))
}
async fn remap_flow_route_addresses(
&self,
flow_routes: &mut [(FlowRouteKey, FlowRouteValue)],

View File

@@ -19,9 +19,8 @@ use regex::Regex;
use snafu::OptionExt;
use crate::error::{self, Result};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::FlowScoped;
use crate::key::{BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
@@ -166,17 +165,6 @@ impl FlownodeFlowManager {
Self { kv_backend }
}
/// Whether given flow exist on this flownode.
pub async fn exists(
&self,
flownode_id: FlownodeId,
flow_id: FlowId,
partition_id: FlowPartitionId,
) -> Result<bool> {
let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes();
Ok(self.kv_backend.get(&key).await?.is_some())
}
/// Retrieves all [FlowId] and [FlowPartitionId]s of the specified `flownode_id`.
pub fn flows(
&self,
@@ -214,33 +202,6 @@ impl FlownodeFlowManager {
Txn::new().and_then(txns)
}
/// Builds a update flownode flow transaction.
///
/// Puts `__flownode_flow/{flownode_id}/{flow_id}/{partition_id}` keys.
/// Remove the old `__flownode_flow/{old_flownode_id}/{flow_id}/{old_partition_id}` keys.
pub(crate) fn build_update_txn<I: IntoIterator<Item = (FlowPartitionId, FlownodeId)>>(
&self,
flow_id: FlowId,
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
flownode_ids: I,
) -> Txn {
let del_txns =
current_flow_info
.flownode_ids()
.iter()
.map(|(partition_id, flownode_id)| {
let key = FlownodeFlowKey::new(*flownode_id, flow_id, *partition_id).to_bytes();
TxnOp::Delete(key)
});
let put_txns = flownode_ids.into_iter().map(|(partition_id, flownode_id)| {
let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes();
TxnOp::Put(key, vec![])
});
let txns = del_txns.chain(put_txns).collect::<Vec<_>>();
Txn::new().and_then(txns)
}
}
#[cfg(test)]

View File

@@ -22,12 +22,9 @@ use snafu::OptionExt;
use table::metadata::TableId;
use crate::error::{self, Result};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::{flownode_addr_helper, FlowScoped};
use crate::key::node_address::NodeAddressKey;
use crate::key::{
BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue,
};
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
@@ -218,7 +215,7 @@ impl TableFlowManager {
/// Builds a create table flow transaction.
///
/// Puts `__flow/source_table/{table_id}/{node_id}/{flow_id}/{partition_id}` keys.
/// Puts `__flow/source_table/{table_id}/{node_id}/{partition_id}` keys.
pub fn build_create_txn(
&self,
flow_id: FlowId,
@@ -242,44 +239,6 @@ impl TableFlowManager {
Ok(Txn::new().and_then(txns))
}
/// Builds a update table flow transaction.
///
/// Puts `__flow/source_table/{table_id}/{node_id}/{flow_id}/{partition_id}` keys,
/// Also remove previous
/// `__flow/source_table/{table_id}/{old_node_id}/{flow_id}/{partition_id}` keys.
pub fn build_update_txn(
&self,
flow_id: FlowId,
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
table_flow_values: Vec<(FlowPartitionId, TableFlowValue)>,
source_table_ids: &[TableId],
) -> Result<Txn> {
let mut txns = Vec::with_capacity(2 * source_table_ids.len() * table_flow_values.len());
// first remove the old keys
for (part_id, node_id) in current_flow_info.flownode_ids() {
for source_table_id in current_flow_info.source_table_ids() {
txns.push(TxnOp::Delete(
TableFlowKey::new(*source_table_id, *node_id, flow_id, *part_id).to_bytes(),
));
}
}
for (partition_id, table_flow_value) in table_flow_values {
let flownode_id = table_flow_value.peer.id;
let value = table_flow_value.try_as_raw_value()?;
for source_table_id in source_table_ids {
txns.push(TxnOp::Put(
TableFlowKey::new(*source_table_id, flownode_id, flow_id, partition_id)
.to_bytes(),
value.clone(),
));
}
}
Ok(Txn::new().and_then(txns))
}
async fn remap_table_flow_addresses(
&self,
table_flows: &mut [(TableFlowKey, TableFlowValue)],

View File

@@ -35,7 +35,7 @@ pub mod memory;
pub mod rds;
pub mod test;
pub mod txn;
pub mod util;
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
#[async_trait]

View File

@@ -1,85 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Removes sensitive information like passwords from connection strings.
///
/// This function sanitizes connection strings by removing credentials:
/// - For URL format (mysql://user:password@host:port/db): Removes everything before '@'
/// - For parameter format (host=localhost password=secret): Removes the password parameter
/// - For URL format without credentials (mysql://host:port/db): Removes the protocol prefix
///
/// # Arguments
///
/// * `conn_str` - The connection string to sanitize
///
/// # Returns
///
/// A sanitized version of the connection string with sensitive information removed
pub fn sanitize_connection_string(conn_str: &str) -> String {
// Case 1: URL format with credentials (mysql://user:password@host:port/db)
// Extract everything after the '@' symbol
if let Some(at_pos) = conn_str.find('@') {
return conn_str[at_pos + 1..].to_string();
}
// Case 2: Parameter format with password (host=localhost password=secret dbname=mydb)
// Filter out any parameter that starts with "password="
if conn_str.contains("password=") {
return conn_str
.split_whitespace()
.filter(|param| !param.starts_with("password="))
.collect::<Vec<_>>()
.join(" ");
}
// Case 3: URL format without credentials (mysql://host:port/db)
// Extract everything after the protocol prefix
if let Some(host_part) = conn_str.split("://").nth(1) {
return host_part.to_string();
}
// Case 4: Already sanitized or unknown format
// Return as is
conn_str.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sanitize_connection_string() {
// Test URL format with username/password
let conn_str = "mysql://user:password123@localhost:3306/db";
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
// Test URL format without credentials
let conn_str = "mysql://localhost:3306/db";
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
// Test parameter format with password
let conn_str = "host=localhost port=5432 user=postgres password=secret dbname=mydb";
assert_eq!(
sanitize_connection_string(conn_str),
"host=localhost port=5432 user=postgres dbname=mydb"
);
// Test parameter format without password
let conn_str = "host=localhost port=5432 user=postgres dbname=mydb";
assert_eq!(
sanitize_connection_string(conn_str),
"host=localhost port=5432 user=postgres dbname=mydb"
);
}
}

View File

@@ -43,6 +43,7 @@ pub mod region_keeper;
pub mod region_registry;
pub mod rpc;
pub mod sequence;
pub mod snapshot;
pub mod state_store;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;

View File

@@ -113,10 +113,8 @@ impl LeaderRegionManifestInfo {
pub fn prunable_entry_id(&self) -> u64 {
match self {
LeaderRegionManifestInfo::Mito {
flushed_entry_id,
topic_latest_entry_id,
..
} => (*flushed_entry_id).max(*topic_latest_entry_id),
flushed_entry_id, ..
} => *flushed_entry_id,
LeaderRegionManifestInfo::Metric {
data_flushed_entry_id,
data_topic_latest_entry_id,

View File

@@ -35,20 +35,17 @@ use api::v1::{
};
use base64::engine::general_purpose;
use base64::Engine as _;
use common_time::{DatabaseTimeToLive, Timezone};
use common_time::DatabaseTimeToLive;
use prost::Message;
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DefaultOnNull};
use session::context::{QueryContextBuilder, QueryContextRef};
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table::table_reference::TableReference;
use crate::error::{
self, InvalidSetDatabaseOptionSnafu, InvalidTimeZoneSnafu, InvalidUnsetDatabaseOptionSnafu,
Result,
};
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
use crate::key::FlowId;
/// DDL tasks
@@ -1205,7 +1202,7 @@ impl From<DropFlowTask> for PbDropFlowTask {
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryContext {
current_catalog: String,
current_schema: String,
@@ -1226,19 +1223,6 @@ impl From<QueryContextRef> for QueryContext {
}
}
impl TryFrom<QueryContext> for session::context::QueryContext {
type Error = error::Error;
fn try_from(value: QueryContext) -> std::result::Result<Self, Self::Error> {
Ok(QueryContextBuilder::default()
.current_catalog(value.current_catalog)
.current_schema(value.current_schema)
.timezone(Timezone::from_tz_string(&value.timezone).context(InvalidTimeZoneSnafu)?)
.extensions(value.extensions)
.channel((value.channel as u32).into())
.build())
}
}
impl From<QueryContext> for PbQueryContext {
fn from(
QueryContext {

View File

@@ -0,0 +1,365 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod file;
use std::fmt::{Display, Formatter};
use std::time::Instant;
use common_telemetry::info;
use file::{Metadata, MetadataContent};
use futures::TryStreamExt;
use object_store::ObjectStore;
use snafu::{OptionExt, ResultExt};
use strum::Display;
use crate::error::{
Error, InvalidFileExtensionSnafu, InvalidFileNameSnafu, InvalidFilePathSnafu, ReadObjectSnafu,
Result, WriteObjectSnafu,
};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::{BatchPutRequest, RangeRequest};
use crate::rpc::KeyValue;
use crate::snapshot::file::{Document, KeyValue as FileKeyValue};
/// The format of the backup file.
#[derive(Debug, PartialEq, Eq, Display, Clone, Copy)]
pub enum FileFormat {
#[strum(serialize = "fb")]
FlexBuffers,
}
impl TryFrom<&str> for FileFormat {
type Error = String;
fn try_from(value: &str) -> std::result::Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"fb" => Ok(FileFormat::FlexBuffers),
_ => Err(format!("Invalid file format: {}", value)),
}
}
}
#[derive(Debug, PartialEq, Eq, Display)]
#[strum(serialize_all = "lowercase")]
pub enum DataType {
Metadata,
}
impl TryFrom<&str> for DataType {
type Error = String;
fn try_from(value: &str) -> std::result::Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"metadata" => Ok(DataType::Metadata),
_ => Err(format!("Invalid data type: {}", value)),
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct FileExtension {
format: FileFormat,
data_type: DataType,
}
impl FileExtension {
pub fn new(format: FileFormat, data_type: DataType) -> Self {
Self { format, data_type }
}
}
impl Display for FileExtension {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}.{}", self.data_type, self.format)
}
}
impl TryFrom<&str> for FileExtension {
type Error = Error;
fn try_from(value: &str) -> Result<Self> {
let parts = value.split(".").collect::<Vec<&str>>();
if parts.len() != 2 {
return InvalidFileExtensionSnafu {
reason: format!(
"Extension should be in the format of <datatype>.<format>, got: {}",
value
),
}
.fail();
}
let data_type = DataType::try_from(parts[0])
.map_err(|e| InvalidFileExtensionSnafu { reason: e }.build())?;
let format = FileFormat::try_from(parts[1])
.map_err(|e| InvalidFileExtensionSnafu { reason: e }.build())?;
Ok(FileExtension { format, data_type })
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct FileName {
name: String,
extension: FileExtension,
}
impl Display for FileName {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}.{}", self.name, self.extension)
}
}
impl TryFrom<&str> for FileName {
type Error = Error;
fn try_from(value: &str) -> Result<Self> {
let Some((name, extension)) = value.split_once(".") else {
return InvalidFileNameSnafu {
reason: format!(
"The file name should be in the format of <name>.<extension>, got: {}",
value
),
}
.fail();
};
let extension = FileExtension::try_from(extension)?;
Ok(Self {
name: name.to_string(),
extension,
})
}
}
impl FileName {
fn new(name: String, extension: FileExtension) -> Self {
Self { name, extension }
}
}
/// The manager of the metadata snapshot.
///
/// It manages the metadata snapshot, including dumping and restoring.
pub struct MetadataSnapshotManager {
kv_backend: KvBackendRef,
object_store: ObjectStore,
}
/// The maximum size of the request to put metadata, use 1MiB by default.
const MAX_REQUEST_SIZE: usize = 1024 * 1024;
impl MetadataSnapshotManager {
pub fn new(kv_backend: KvBackendRef, object_store: ObjectStore) -> Self {
Self {
kv_backend,
object_store,
}
}
/// Restores the metadata from the backup file to the metadata store.
pub async fn restore(&self, file_path: &str) -> Result<u64> {
let filename = FileName::try_from(
file_path
.rsplit("/")
.next()
.context(InvalidFilePathSnafu { file_path })?,
)?;
let data = self
.object_store
.read(file_path)
.await
.context(ReadObjectSnafu { file_path })?;
let document = Document::from_slice(&filename.extension.format, &data.to_bytes())?;
let metadata_content = document.into_metadata_content()?;
let mut req = BatchPutRequest::default();
let mut total_request_size = 0;
let mut count = 0;
let now = Instant::now();
for FileKeyValue { key, value } in metadata_content.into_iter() {
count += 1;
let key_size = key.len();
let value_size = value.len();
if total_request_size + key_size + value_size > MAX_REQUEST_SIZE {
self.kv_backend.batch_put(req).await?;
req = BatchPutRequest::default();
total_request_size = 0;
}
req.kvs.push(KeyValue { key, value });
total_request_size += key_size + value_size;
}
if !req.kvs.is_empty() {
self.kv_backend.batch_put(req).await?;
}
info!(
"Restored metadata from {} successfully, total {} key-value pairs, elapsed {:?}",
file_path,
count,
now.elapsed()
);
Ok(count)
}
/// Dumps the metadata to the backup file.
pub async fn dump(&self, path: &str, filename: &str) -> Result<(String, u64)> {
let format = FileFormat::FlexBuffers;
let filename = FileName::new(
filename.to_string(),
FileExtension {
format,
data_type: DataType::Metadata,
},
);
let file_path = format!("{}/{}", path.trim_end_matches('/'), filename);
let now = Instant::now();
let req = RangeRequest::new().with_range(vec![0], vec![0]);
let stream = PaginationStream::new(self.kv_backend.clone(), req, DEFAULT_PAGE_SIZE, |kv| {
Ok(FileKeyValue {
key: kv.key,
value: kv.value,
})
})
.into_stream();
let keyvalues = stream.try_collect::<Vec<_>>().await?;
let num_keyvalues = keyvalues.len();
let document = Document::new(
Metadata::new(),
file::Content::Metadata(MetadataContent::new(keyvalues)),
);
let bytes = document.to_bytes(&format)?;
let r = self
.object_store
.write(&file_path, bytes)
.await
.context(WriteObjectSnafu {
file_path: &file_path,
})?;
info!(
"Dumped metadata to {} successfully, total {} key-value pairs, file size {} bytes, elapsed {:?}",
file_path,
num_keyvalues,
r.content_length(),
now.elapsed()
);
Ok((filename.to_string(), num_keyvalues as u64))
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::sync::Arc;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use object_store::services::Fs;
use super::*;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::KvBackend;
use crate::rpc::store::PutRequest;
#[test]
fn test_file_name() {
let file_name = FileName::try_from("test.metadata.fb").unwrap();
assert_eq!(file_name.name, "test");
assert_eq!(file_name.extension.format, FileFormat::FlexBuffers);
assert_eq!(file_name.extension.data_type, DataType::Metadata);
assert_eq!(file_name.to_string(), "test.metadata.fb");
let invalid_file_name = FileName::try_from("test.metadata").unwrap_err();
assert_eq!(
invalid_file_name.to_string(),
"Invalid file extension: Extension should be in the format of <datatype>.<format>, got: metadata"
);
let invalid_file_extension = FileName::try_from("test.metadata.hello").unwrap_err();
assert_eq!(
invalid_file_extension.to_string(),
"Invalid file extension: Invalid file format: hello"
);
}
fn test_env(
prefix: &str,
) -> (
TempDir,
Arc<MemoryKvBackend<Error>>,
MetadataSnapshotManager,
) {
let temp_dir = create_temp_dir(prefix);
let kv_backend = Arc::new(MemoryKvBackend::default());
let temp_path = temp_dir.path();
let data_path = temp_path.join("data").as_path().display().to_string();
let builder = Fs::default().root(&data_path);
let object_store = ObjectStore::new(builder).unwrap().finish();
let manager = MetadataSnapshotManager::new(kv_backend.clone(), object_store);
(temp_dir, kv_backend, manager)
}
#[tokio::test]
async fn test_dump_and_restore() {
common_telemetry::init_default_ut_logging();
let (temp_dir, kv_backend, manager) = test_env("test_dump_and_restore");
let temp_path = temp_dir.path();
for i in 0..10 {
kv_backend
.put(
PutRequest::new()
.with_key(format!("test_{}", i).as_bytes().to_vec())
.with_value(format!("value_{}", i).as_bytes().to_vec()),
)
.await
.unwrap();
}
let dump_path = temp_path.join("snapshot");
manager
.dump(
&dump_path.as_path().display().to_string(),
"metadata_snapshot",
)
.await
.unwrap();
// Clean up the kv backend
kv_backend.clear();
let restore_path = dump_path
.join("metadata_snapshot.metadata.fb")
.as_path()
.display()
.to_string();
manager.restore(&restore_path).await.unwrap();
for i in 0..10 {
let key = format!("test_{}", i);
let value = kv_backend.get(key.as_bytes()).await.unwrap().unwrap();
assert_eq!(value.value, format!("value_{}", i).as_bytes());
}
}
#[tokio::test]
async fn test_restore_from_nonexistent_file() {
let (temp_dir, _kv_backend, manager) = test_env("test_restore_from_nonexistent_file");
let restore_path = temp_dir
.path()
.join("nonexistent.metadata.fb")
.as_path()
.display()
.to_string();
let err = manager.restore(&restore_path).await.unwrap_err();
assert_matches!(err, Error::ReadObject { .. })
}
}

View File

@@ -0,0 +1,145 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_time::util::current_time_millis;
use flexbuffers::{FlexbufferSerializer, Reader};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{
DeserializeFlexbuffersSnafu, ReadFlexbuffersSnafu, Result, SerializeFlexbuffersSnafu,
};
use crate::snapshot::FileFormat;
/// The layout of the backup file.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct Document {
metadata: Metadata,
content: Content,
}
impl Document {
/// Creates a new document.
pub fn new(metadata: Metadata, content: Content) -> Self {
Self { metadata, content }
}
fn serialize_to_flexbuffer(&self) -> Result<Vec<u8>> {
let mut builder = FlexbufferSerializer::new();
self.serialize(&mut builder)
.context(SerializeFlexbuffersSnafu)?;
Ok(builder.take_buffer())
}
/// Converts the [`Document`] to a bytes.
pub(crate) fn to_bytes(&self, format: &FileFormat) -> Result<Vec<u8>> {
match format {
FileFormat::FlexBuffers => self.serialize_to_flexbuffer(),
}
}
fn deserialize_from_flexbuffer(data: &[u8]) -> Result<Self> {
let reader = Reader::get_root(data).context(ReadFlexbuffersSnafu)?;
Document::deserialize(reader).context(DeserializeFlexbuffersSnafu)
}
/// Deserializes the [`Document`] from a bytes.
pub(crate) fn from_slice(format: &FileFormat, data: &[u8]) -> Result<Self> {
match format {
FileFormat::FlexBuffers => Self::deserialize_from_flexbuffer(data),
}
}
/// Converts the [`Document`] to a [`MetadataContent`].
pub(crate) fn into_metadata_content(self) -> Result<MetadataContent> {
match self.content {
Content::Metadata(metadata) => Ok(metadata),
}
}
}
/// The metadata of the backup file.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct Metadata {
// UNIX_EPOCH in milliseconds.
created_timestamp_mills: i64,
}
impl Metadata {
/// Create a new metadata.
///
/// The `created_timestamp_mills` will be the current time in milliseconds.
pub fn new() -> Self {
Self {
created_timestamp_mills: current_time_millis(),
}
}
}
/// The content of the backup file.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub(crate) enum Content {
Metadata(MetadataContent),
}
/// The content of the backup file.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct MetadataContent {
values: Vec<KeyValue>,
}
impl MetadataContent {
/// Create a new metadata content.
pub fn new(values: impl IntoIterator<Item = KeyValue>) -> Self {
Self {
values: values.into_iter().collect(),
}
}
/// Returns an iterator over the key-value pairs.
pub fn into_iter(self) -> impl Iterator<Item = KeyValue> {
self.values.into_iter()
}
}
/// The key-value pair of the backup file.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct KeyValue {
pub key: Vec<u8>,
pub value: Vec<u8>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_document() {
let document = Document::new(
Metadata::new(),
Content::Metadata(MetadataContent::new(vec![KeyValue {
key: b"key".to_vec(),
value: b"value".to_vec(),
}])),
);
let bytes = document.to_bytes(&FileFormat::FlexBuffers).unwrap();
let document_deserialized = Document::from_slice(&FileFormat::FlexBuffers, &bytes).unwrap();
assert_eq!(
document.metadata.created_timestamp_mills,
document_deserialized.metadata.created_timestamp_mills
);
assert_eq!(document.content, document_deserialized.content);
}
}

View File

@@ -20,8 +20,6 @@ use api::v1::region::{InsertRequests, RegionRequest};
pub use common_base::AffectedRows;
use common_query::request::QueryRequest;
use common_recordbatch::SendableRecordBatchStream;
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
use common_wal::config::kafka::MetasrvKafkaConfig;
use crate::cache_invalidator::DummyCacheInvalidator;
use crate::ddl::flow_meta::FlowMetadataAllocator;
@@ -39,8 +37,7 @@ use crate::peer::{Peer, PeerLookupService};
use crate::region_keeper::MemoryRegionKeeper;
use crate::region_registry::LeaderRegionRegistry;
use crate::sequence::SequenceBuilder;
use crate::wal_options_allocator::topic_pool::KafkaTopicPool;
use crate::wal_options_allocator::{build_kafka_topic_creator, WalOptionsAllocator};
use crate::wal_options_allocator::WalOptionsAllocator;
use crate::{DatanodeId, FlownodeId};
#[async_trait::async_trait]
@@ -202,34 +199,3 @@ impl PeerLookupService for NoopPeerLookupService {
Ok(Some(Peer::empty(id)))
}
}
/// Create a kafka topic pool for testing.
pub async fn test_kafka_topic_pool(
broker_endpoints: Vec<String>,
num_topics: usize,
auto_create_topics: bool,
topic_name_prefix: Option<&str>,
) -> KafkaTopicPool {
let mut config = MetasrvKafkaConfig {
connection: KafkaConnectionConfig {
broker_endpoints,
..Default::default()
},
kafka_topic: KafkaTopicConfig {
num_topics,
..Default::default()
},
auto_create_topics,
..Default::default()
};
if let Some(prefix) = topic_name_prefix {
config.kafka_topic.topic_name_prefix = prefix.to_string();
}
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let topic_creator = build_kafka_topic_creator(&config.connection, &config.kafka_topic)
.await
.unwrap();
KafkaTopicPool::new(&config, kv_backend, topic_creator)
}

View File

@@ -112,9 +112,7 @@ pub async fn build_wal_options_allocator(
NAME_PATTERN_REGEX.is_match(prefix),
InvalidTopicNamePrefixSnafu { prefix }
);
let topic_creator =
build_kafka_topic_creator(&kafka_config.connection, &kafka_config.kafka_topic)
.await?;
let topic_creator = build_kafka_topic_creator(kafka_config).await?;
let topic_pool = KafkaTopicPool::new(kafka_config, kv_backend, topic_creator);
Ok(WalOptionsAllocator::Kafka(topic_pool))
}
@@ -153,16 +151,13 @@ pub fn prepare_wal_options(
mod tests {
use std::assert_matches::assert_matches;
use common_wal::config::kafka::common::KafkaTopicConfig;
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
use common_wal::config::kafka::MetasrvKafkaConfig;
use common_wal::maybe_skip_kafka_integration_test;
use common_wal::test_util::get_kafka_endpoints;
use common_wal::test_util::run_test_with_kafka_wal;
use super::*;
use crate::error::Error;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::test_util::test_kafka_topic_pool;
use crate::wal_options_allocator::selector::RoundRobinTopicSelector;
// Tests that the wal options allocator could successfully allocate raft-engine wal options.
#[tokio::test]
@@ -202,42 +197,55 @@ mod tests {
assert_matches!(got, Error::InvalidTopicNamePrefix { .. });
}
// Tests that the wal options allocator could successfully allocate Kafka wal options.
#[tokio::test]
async fn test_allocator_with_kafka_allocate_wal_options() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let num_topics = 5;
let mut topic_pool = test_kafka_topic_pool(
get_kafka_endpoints(),
num_topics,
true,
Some("test_allocator_with_kafka"),
)
.await;
topic_pool.selector = Arc::new(RoundRobinTopicSelector::default());
let topics = topic_pool.topics.clone();
// clean up the topics before test
let topic_creator = topic_pool.topic_creator();
topic_creator.delete_topics(&topics).await.unwrap();
async fn test_allocator_with_kafka() {
run_test_with_kafka_wal(|broker_endpoints| {
Box::pin(async {
let topics = (0..256)
.map(|i| format!("test_allocator_with_kafka_{}_{}", i, uuid::Uuid::new_v4()))
.collect::<Vec<_>>();
// Creates an options allocator.
let allocator = WalOptionsAllocator::Kafka(topic_pool);
allocator.start().await.unwrap();
// Creates a topic manager.
let kafka_topic = KafkaTopicConfig {
replication_factor: broker_endpoints.len() as i16,
..Default::default()
};
let config = MetasrvKafkaConfig {
connection: KafkaConnectionConfig {
broker_endpoints,
..Default::default()
},
kafka_topic,
..Default::default()
};
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let topic_creator = build_kafka_topic_creator(&config).await.unwrap();
let mut topic_pool = KafkaTopicPool::new(&config, kv_backend, topic_creator);
topic_pool.topics.clone_from(&topics);
topic_pool.selector = Arc::new(selector::RoundRobinTopicSelector::default());
let num_regions = 3;
let regions = (0..num_regions).collect::<Vec<_>>();
let got = allocate_region_wal_options(regions.clone(), &allocator, false).unwrap();
// Creates an options allocator.
let allocator = WalOptionsAllocator::Kafka(topic_pool);
allocator.start().await.unwrap();
// Check the allocated wal options contain the expected topics.
let expected = (0..num_regions)
.map(|i| {
let options = WalOptions::Kafka(KafkaWalOptions {
topic: topics[i as usize].clone(),
});
(i, serde_json::to_string(&options).unwrap())
let num_regions = 32;
let regions = (0..num_regions).collect::<Vec<_>>();
let got = allocate_region_wal_options(regions.clone(), &allocator, false).unwrap();
// Check the allocated wal options contain the expected topics.
let expected = (0..num_regions)
.map(|i| {
let options = WalOptions::Kafka(KafkaWalOptions {
topic: topics[i as usize].clone(),
});
(i, serde_json::to_string(&options).unwrap())
})
.collect::<HashMap<_, _>>();
assert_eq!(got, expected);
})
.collect::<HashMap<_, _>>();
assert_eq!(got, expected);
})
.await;
}
#[tokio::test]

View File

@@ -12,21 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_telemetry::{debug, error, info};
use common_wal::config::kafka::common::{
KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_BACKOFF_CONFIG,
};
use common_telemetry::{error, info};
use common_wal::config::kafka::common::DEFAULT_BACKOFF_CONFIG;
use common_wal::config::kafka::MetasrvKafkaConfig;
use rskafka::client::error::Error as RsKafkaError;
use rskafka::client::error::ProtocolError::TopicAlreadyExists;
use rskafka::client::partition::{Compression, OffsetAt, PartitionClient, UnknownTopicHandling};
use rskafka::client::partition::{Compression, UnknownTopicHandling};
use rskafka::client::{Client, ClientBuilder};
use rskafka::record::Record;
use snafu::ResultExt;
use crate::error::{
BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, CreateKafkaWalTopicSnafu,
KafkaGetOffsetSnafu, KafkaPartitionClientSnafu, ProduceRecordSnafu, ResolveKafkaEndpointSnafu,
Result, TlsConfigSnafu,
BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, BuildKafkaPartitionClientSnafu,
CreateKafkaWalTopicSnafu, ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result,
TlsConfigSnafu,
};
// Each topic only has one partition for now.
@@ -71,47 +70,21 @@ impl KafkaTopicCreator {
info!("The topic {} already exists", topic);
Ok(())
} else {
error!(e; "Failed to create a topic {}", topic);
error!("Failed to create a topic {}, error {:?}", topic, e);
Err(e).context(CreateKafkaWalTopicSnafu)
}
}
}
}
async fn prepare_topic(&self, topic: &String) -> Result<()> {
let partition_client = self.partition_client(topic).await?;
self.append_noop_record(topic, &partition_client).await?;
Ok(())
}
/// Creates a [PartitionClient] for the given topic.
async fn partition_client(&self, topic: &str) -> Result<PartitionClient> {
self.client
async fn append_noop_record(&self, topic: &String, client: &Client) -> Result<()> {
let partition_client = client
.partition_client(topic, DEFAULT_PARTITION, UnknownTopicHandling::Retry)
.await
.context(KafkaPartitionClientSnafu {
.context(BuildKafkaPartitionClientSnafu {
topic,
partition: DEFAULT_PARTITION,
})
}
/// Appends a noop record to the topic.
/// It only appends a noop record if the topic is empty.
async fn append_noop_record(
&self,
topic: &String,
partition_client: &PartitionClient,
) -> Result<()> {
let end_offset = partition_client
.get_offset(OffsetAt::Latest)
.await
.context(KafkaGetOffsetSnafu {
topic: topic.to_string(),
partition: DEFAULT_PARTITION,
})?;
if end_offset > 0 {
return Ok(());
}
partition_client
.produce(
@@ -125,28 +98,22 @@ impl KafkaTopicCreator {
)
.await
.context(ProduceRecordSnafu { topic })?;
debug!("Appended a noop record to topic {}", topic);
Ok(())
}
/// Creates topics in Kafka.
pub async fn create_topics(&self, topics: &[String]) -> Result<()> {
let tasks = topics
.iter()
.map(|topic| async { self.create_topic(topic, &self.client).await })
.collect::<Vec<_>>();
futures::future::try_join_all(tasks).await.map(|_| ())
}
/// Prepares topics in Kafka.
///
/// It appends a noop record to each topic if the topic is empty.
pub async fn prepare_topics(&self, topics: &[String]) -> Result<()> {
/// 1. Creates missing topics.
/// 2. Appends a noop record to each topic.
pub async fn prepare_topics(&self, topics: &[&String]) -> Result<()> {
// Try to create missing topics.
let tasks = topics
.iter()
.map(|topic| async { self.prepare_topic(topic).await })
.map(|topic| async {
self.create_topic(topic, &self.client).await?;
self.append_noop_record(topic, &self.client).await?;
Ok(())
})
.collect::<Vec<_>>();
futures::future::try_join_all(tasks).await.map(|_| ())
}
@@ -162,244 +129,34 @@ impl KafkaTopicCreator {
}
}
#[cfg(test)]
impl KafkaTopicCreator {
pub async fn delete_topics(&self, topics: &[String]) -> Result<()> {
let tasks = topics
.iter()
.map(|topic| async { self.delete_topic(topic, &self.client).await })
.collect::<Vec<_>>();
futures::future::try_join_all(tasks).await.map(|_| ())
}
async fn delete_topic(&self, topic: &String, client: &Client) -> Result<()> {
let controller = client
.controller_client()
.context(BuildKafkaCtrlClientSnafu)?;
match controller.delete_topic(topic, 10).await {
Ok(_) => {
info!("Successfully deleted topic {}", topic);
Ok(())
}
Err(e) => {
if Self::is_unknown_topic_err(&e) {
info!("The topic {} does not exist", topic);
Ok(())
} else {
panic!("Failed to delete a topic {}, error: {}", topic, e);
}
}
}
}
fn is_unknown_topic_err(e: &RsKafkaError) -> bool {
matches!(
e,
&RsKafkaError::ServerError {
protocol_error: rskafka::client::error::ProtocolError::UnknownTopicOrPartition,
..
}
)
}
pub async fn get_partition_client(&self, topic: &str) -> PartitionClient {
self.partition_client(topic).await.unwrap()
}
}
/// Builds a kafka [Client](rskafka::client::Client).
pub async fn build_kafka_client(connection: &KafkaConnectionConfig) -> Result<Client> {
pub async fn build_kafka_client(config: &MetasrvKafkaConfig) -> Result<Client> {
// Builds an kafka controller client for creating topics.
let broker_endpoints = common_wal::resolve_to_ipv4(&connection.broker_endpoints)
let broker_endpoints = common_wal::resolve_to_ipv4(&config.connection.broker_endpoints)
.await
.context(ResolveKafkaEndpointSnafu)?;
let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(DEFAULT_BACKOFF_CONFIG);
if let Some(sasl) = &connection.sasl {
if let Some(sasl) = &config.connection.sasl {
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
};
if let Some(tls) = &connection.tls {
if let Some(tls) = &config.connection.tls {
builder = builder.tls_config(tls.to_tls_config().await.context(TlsConfigSnafu)?)
};
builder
.build()
.await
.with_context(|_| BuildKafkaClientSnafu {
broker_endpoints: connection.broker_endpoints.clone(),
broker_endpoints: config.connection.broker_endpoints.clone(),
})
}
/// Builds a [KafkaTopicCreator].
pub async fn build_kafka_topic_creator(
connection: &KafkaConnectionConfig,
kafka_topic: &KafkaTopicConfig,
) -> Result<KafkaTopicCreator> {
let client = build_kafka_client(connection).await?;
pub async fn build_kafka_topic_creator(config: &MetasrvKafkaConfig) -> Result<KafkaTopicCreator> {
let client = build_kafka_client(config).await?;
Ok(KafkaTopicCreator {
client,
num_partitions: kafka_topic.num_partitions,
replication_factor: kafka_topic.replication_factor,
create_topic_timeout: kafka_topic.create_topic_timeout.as_millis() as i32,
num_partitions: config.kafka_topic.num_partitions,
replication_factor: config.kafka_topic.replication_factor,
create_topic_timeout: config.kafka_topic.create_topic_timeout.as_millis() as i32,
})
}
#[cfg(test)]
mod tests {
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
use common_wal::maybe_skip_kafka_integration_test;
use common_wal::test_util::get_kafka_endpoints;
use super::*;
async fn test_topic_creator(broker_endpoints: Vec<String>) -> KafkaTopicCreator {
let connection = KafkaConnectionConfig {
broker_endpoints,
..Default::default()
};
let kafka_topic = KafkaTopicConfig::default();
build_kafka_topic_creator(&connection, &kafka_topic)
.await
.unwrap()
}
async fn append_records(partition_client: &PartitionClient, num_records: usize) -> Result<()> {
for i in 0..num_records {
partition_client
.produce(
vec![Record {
key: Some(b"test".to_vec()),
value: Some(format!("test {}", i).as_bytes().to_vec()),
timestamp: chrono::Utc::now(),
headers: Default::default(),
}],
Compression::Lz4,
)
.await
.unwrap();
}
Ok(())
}
#[tokio::test]
async fn test_append_noop_record_to_empty_topic() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let prefix = "append_noop_record_to_empty_topic";
let creator = test_topic_creator(get_kafka_endpoints()).await;
let topic = format!("{}{}", prefix, "0");
// Clean up the topics before test
creator.delete_topics(&[topic.to_string()]).await.unwrap();
creator.create_topics(&[topic.to_string()]).await.unwrap();
let partition_client = creator.partition_client(&topic).await.unwrap();
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 0);
// The topic is not empty, so no noop record is appended.
creator
.append_noop_record(&topic, &partition_client)
.await
.unwrap();
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 1);
}
#[tokio::test]
async fn test_append_noop_record_to_non_empty_topic() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let prefix = "append_noop_record_to_non_empty_topic";
let creator = test_topic_creator(get_kafka_endpoints()).await;
let topic = format!("{}{}", prefix, "0");
// Clean up the topics before test
creator.delete_topics(&[topic.to_string()]).await.unwrap();
creator.create_topics(&[topic.to_string()]).await.unwrap();
let partition_client = creator.partition_client(&topic).await.unwrap();
append_records(&partition_client, 2).await.unwrap();
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 2);
// The topic is not empty, so no noop record is appended.
creator
.append_noop_record(&topic, &partition_client)
.await
.unwrap();
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 2);
}
#[tokio::test]
async fn test_create_topic() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let prefix = "create_topic";
let creator = test_topic_creator(get_kafka_endpoints()).await;
let topic = format!("{}{}", prefix, "0");
// Clean up the topics before test
creator.delete_topics(&[topic.to_string()]).await.unwrap();
creator.create_topics(&[topic.to_string()]).await.unwrap();
// Should be ok
creator.create_topics(&[topic.to_string()]).await.unwrap();
let partition_client = creator.partition_client(&topic).await.unwrap();
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 0);
}
#[tokio::test]
async fn test_prepare_topic() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let prefix = "prepare_topic";
let creator = test_topic_creator(get_kafka_endpoints()).await;
let topic = format!("{}{}", prefix, "0");
// Clean up the topics before test
creator.delete_topics(&[topic.to_string()]).await.unwrap();
creator.create_topics(&[topic.to_string()]).await.unwrap();
creator.prepare_topic(&topic).await.unwrap();
let partition_client = creator.partition_client(&topic).await.unwrap();
let start_offset = partition_client
.get_offset(OffsetAt::Earliest)
.await
.unwrap();
assert_eq!(start_offset, 0);
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 1);
}
#[tokio::test]
async fn test_prepare_topic_with_stale_records_without_pruning() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let prefix = "prepare_topic_with_stale_records_without_pruning";
let creator = test_topic_creator(get_kafka_endpoints()).await;
let topic = format!("{}{}", prefix, "0");
// Clean up the topics before test
creator.delete_topics(&[topic.to_string()]).await.unwrap();
creator.create_topics(&[topic.to_string()]).await.unwrap();
let partition_client = creator.partition_client(&topic).await.unwrap();
append_records(&partition_client, 10).await.unwrap();
creator.prepare_topic(&topic).await.unwrap();
let end_offset = partition_client.get_offset(OffsetAt::Latest).await.unwrap();
assert_eq!(end_offset, 10);
let start_offset = partition_client
.get_offset(OffsetAt::Earliest)
.await
.unwrap();
assert_eq!(start_offset, 0);
}
}

View File

@@ -40,21 +40,24 @@ impl KafkaTopicManager {
Ok(topics)
}
/// Returns the topics that are not prepared.
pub async fn unprepare_topics(&self, all_topics: &[String]) -> Result<Vec<String>> {
/// Restores topics from the key-value backend. and returns the topics that are not stored in kvbackend.
pub async fn get_topics_to_create<'a>(
&self,
all_topics: &'a [String],
) -> Result<Vec<&'a String>> {
let existing_topics = self.restore_topics().await?;
let existing_topic_set = existing_topics.iter().collect::<HashSet<_>>();
let mut topics_to_create = Vec::with_capacity(all_topics.len());
for topic in all_topics {
if !existing_topic_set.contains(topic) {
topics_to_create.push(topic.to_string());
topics_to_create.push(topic);
}
}
Ok(topics_to_create)
}
/// Persists prepared topics into the key-value backend.
pub async fn persist_prepared_topics(&self, topics: &[String]) -> Result<()> {
/// Persists topics into the key-value backend.
pub async fn persist_topics(&self, topics: &[String]) -> Result<()> {
self.topic_name_manager
.batch_put(
topics
@@ -67,14 +70,6 @@ impl KafkaTopicManager {
}
}
#[cfg(test)]
impl KafkaTopicManager {
/// Lists all topics in the key-value backend.
pub async fn list_topics(&self) -> Result<Vec<String>> {
self.topic_name_manager.range().await
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
@@ -95,11 +90,11 @@ mod tests {
// No legacy topics.
let mut topics_to_be_created = topic_kvbackend_manager
.unprepare_topics(&all_topics)
.get_topics_to_create(&all_topics)
.await
.unwrap();
topics_to_be_created.sort();
let mut expected = all_topics.clone();
let mut expected = all_topics.iter().collect::<Vec<_>>();
expected.sort();
assert_eq!(expected, topics_to_be_created);
@@ -114,7 +109,7 @@ mod tests {
assert!(res.prev_kv.is_none());
let topics_to_be_created = topic_kvbackend_manager
.unprepare_topics(&all_topics)
.get_topics_to_create(&all_topics)
.await
.unwrap();
assert!(topics_to_be_created.is_empty());
@@ -149,21 +144,21 @@ mod tests {
let topic_kvbackend_manager = KafkaTopicManager::new(kv_backend);
let mut topics_to_be_created = topic_kvbackend_manager
.unprepare_topics(&all_topics)
.get_topics_to_create(&all_topics)
.await
.unwrap();
topics_to_be_created.sort();
let mut expected = all_topics.clone();
let mut expected = all_topics.iter().collect::<Vec<_>>();
expected.sort();
assert_eq!(expected, topics_to_be_created);
// Persists topics to kv backend.
topic_kvbackend_manager
.persist_prepared_topics(&all_topics)
.persist_topics(&all_topics)
.await
.unwrap();
let topics_to_be_created = topic_kvbackend_manager
.unprepare_topics(&all_topics)
.get_topics_to_create(&all_topics)
.await
.unwrap();
assert!(topics_to_be_created.is_empty());

View File

@@ -15,7 +15,6 @@
use std::fmt::{self, Formatter};
use std::sync::Arc;
use common_telemetry::info;
use common_wal::config::kafka::MetasrvKafkaConfig;
use common_wal::TopicSelectorType;
use snafu::ensure;
@@ -78,35 +77,27 @@ impl KafkaTopicPool {
}
/// Tries to activate the topic manager when metasrv becomes the leader.
///
/// First tries to restore persisted topics from the kv backend.
/// If there are unprepared topics (topics that exist in the configuration but not in the kv backend),
/// it will create these topics in Kafka if `auto_create_topics` is enabled.
///
/// Then it prepares all unprepared topics by appending a noop record if the topic is empty,
/// and persists them in the kv backend for future use.
/// If not enough topics retrieved, it will try to contact the Kafka cluster and request creating more topics.
pub async fn activate(&self) -> Result<()> {
if !self.auto_create_topics {
return Ok(());
}
let num_topics = self.topics.len();
ensure!(num_topics > 0, InvalidNumTopicsSnafu { num_topics });
let unprepared_topics = self.topic_manager.unprepare_topics(&self.topics).await?;
let topics_to_be_created = self
.topic_manager
.get_topics_to_create(&self.topics)
.await?;
if !unprepared_topics.is_empty() {
if self.auto_create_topics {
info!("Creating {} topics", unprepared_topics.len());
self.topic_creator.create_topics(&unprepared_topics).await?;
} else {
info!("Auto create topics is disabled, skipping topic creation.");
}
if !topics_to_be_created.is_empty() {
self.topic_creator
.prepare_topics(&unprepared_topics)
.await?;
self.topic_manager
.persist_prepared_topics(&unprepared_topics)
.prepare_topics(&topics_to_be_created)
.await?;
self.topic_manager.persist_topics(&self.topics).await?;
}
info!("Activated topic pool with {} topics", self.topics.len());
Ok(())
}
@@ -123,147 +114,77 @@ impl KafkaTopicPool {
}
}
#[cfg(test)]
impl KafkaTopicPool {
pub(crate) fn topic_manager(&self) -> &KafkaTopicManager {
&self.topic_manager
}
pub(crate) fn topic_creator(&self) -> &KafkaTopicCreator {
&self.topic_creator
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use common_wal::maybe_skip_kafka_integration_test;
use common_wal::test_util::get_kafka_endpoints;
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
use common_wal::test_util::run_test_with_kafka_wal;
use super::*;
use crate::error::Error;
use crate::test_util::test_kafka_topic_pool;
use crate::wal_options_allocator::selector::RoundRobinTopicSelector;
#[tokio::test]
async fn test_pool_invalid_number_topics_err() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let endpoints = get_kafka_endpoints();
let pool = test_kafka_topic_pool(endpoints.clone(), 0, false, None).await;
let err = pool.activate().await.unwrap_err();
assert_matches!(err, Error::InvalidNumTopics { .. });
let pool = test_kafka_topic_pool(endpoints, 0, true, None).await;
let err = pool.activate().await.unwrap_err();
assert_matches!(err, Error::InvalidNumTopics { .. });
}
#[tokio::test]
async fn test_pool_activate_unknown_topics_err() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let pool =
test_kafka_topic_pool(get_kafka_endpoints(), 1, false, Some("unknown_topic")).await;
let err = pool.activate().await.unwrap_err();
assert_matches!(err, Error::KafkaPartitionClient { .. });
}
#[tokio::test]
async fn test_pool_activate() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let pool =
test_kafka_topic_pool(get_kafka_endpoints(), 2, true, Some("pool_activate")).await;
// clean up the topics before test
let topic_creator = pool.topic_creator();
topic_creator.delete_topics(&pool.topics).await.unwrap();
let topic_manager = pool.topic_manager();
pool.activate().await.unwrap();
let topics = topic_manager.list_topics().await.unwrap();
assert_eq!(topics.len(), 2);
}
#[tokio::test]
async fn test_pool_activate_with_existing_topics() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let prefix = "pool_activate_with_existing_topics";
let pool = test_kafka_topic_pool(get_kafka_endpoints(), 2, true, Some(prefix)).await;
let topic_creator = pool.topic_creator();
topic_creator.delete_topics(&pool.topics).await.unwrap();
let topic_manager = pool.topic_manager();
// persists one topic info, then pool.activate() will create new topics that not persisted.
topic_manager
.persist_prepared_topics(&pool.topics[0..1])
.await
.unwrap();
pool.activate().await.unwrap();
let topics = topic_manager.list_topics().await.unwrap();
assert_eq!(topics.len(), 2);
let client = pool.topic_creator().client();
let topics = client
.list_topics()
.await
.unwrap()
.into_iter()
.filter(|t| t.name.starts_with(prefix))
.collect::<Vec<_>>();
assert_eq!(topics.len(), 1);
}
use crate::kv_backend::memory::MemoryKvBackend;
use crate::wal_options_allocator::topic_creator::build_kafka_topic_creator;
/// Tests that the topic manager could allocate topics correctly.
#[tokio::test]
async fn test_alloc_topics() {
common_telemetry::init_default_ut_logging();
maybe_skip_kafka_integration_test!();
let num_topics = 5;
let mut topic_pool = test_kafka_topic_pool(
get_kafka_endpoints(),
num_topics,
true,
Some("test_allocator_with_kafka"),
)
run_test_with_kafka_wal(|broker_endpoints| {
Box::pin(async {
// Constructs topics that should be created.
let topics = (0..256)
.map(|i| format!("test_alloc_topics_{}_{}", i, uuid::Uuid::new_v4()))
.collect::<Vec<_>>();
// Creates a topic manager.
let kafka_topic = KafkaTopicConfig {
replication_factor: broker_endpoints.len() as i16,
..Default::default()
};
let config = MetasrvKafkaConfig {
connection: KafkaConnectionConfig {
broker_endpoints,
..Default::default()
},
kafka_topic,
..Default::default()
};
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let topic_creator = build_kafka_topic_creator(&config).await.unwrap();
let mut topic_pool = KafkaTopicPool::new(&config, kv_backend, topic_creator);
// Replaces the default topic pool with the constructed topics.
topic_pool.topics.clone_from(&topics);
// Replaces the default selector with a round-robin selector without shuffled.
topic_pool.selector = Arc::new(RoundRobinTopicSelector::default());
topic_pool.activate().await.unwrap();
// Selects exactly the number of `num_topics` topics one by one.
let got = (0..topics.len())
.map(|_| topic_pool.select().unwrap())
.cloned()
.collect::<Vec<_>>();
assert_eq!(got, topics);
// Selects exactly the number of `num_topics` topics in a batching manner.
let got = topic_pool
.select_batch(topics.len())
.unwrap()
.into_iter()
.map(ToString::to_string)
.collect::<Vec<_>>();
assert_eq!(got, topics);
// Selects more than the number of `num_topics` topics.
let got = topic_pool
.select_batch(2 * topics.len())
.unwrap()
.into_iter()
.map(ToString::to_string)
.collect::<Vec<_>>();
let expected = vec![topics.clone(); 2]
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert_eq!(got, expected);
})
})
.await;
topic_pool.selector = Arc::new(RoundRobinTopicSelector::default());
let topics = topic_pool.topics.clone();
// clean up the topics before test
let topic_creator = topic_pool.topic_creator();
topic_creator.delete_topics(&topics).await.unwrap();
// Selects exactly the number of `num_topics` topics one by one.
let got = (0..topics.len())
.map(|_| topic_pool.select().unwrap())
.cloned()
.collect::<Vec<_>>();
assert_eq!(got, topics);
// Selects exactly the number of `num_topics` topics in a batching manner.
let got = topic_pool
.select_batch(topics.len())
.unwrap()
.into_iter()
.map(ToString::to_string)
.collect::<Vec<_>>();
assert_eq!(got, topics);
// Selects more than the number of `num_topics` topics.
let got = topic_pool
.select_batch(2 * topics.len())
.unwrap()
.into_iter()
.map(ToString::to_string)
.collect::<Vec<_>>();
let expected = vec![topics.clone(); 2]
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert_eq!(got, expected);
}
}

View File

@@ -162,6 +162,13 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to do metadata snapshot"))]
MetadataSnapshot {
source: BoxedError,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to do procedure task"))]
ProcedureService {
source: BoxedError,
@@ -187,6 +194,12 @@ pub enum Error {
location: Location,
},
#[snafu(display("Missing MetadataSnapshotHandler, not expected"))]
MissingMetadataSnapshotHandler {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid function args: {}", err_msg))]
InvalidFuncArgs {
err_msg: String,
@@ -251,6 +264,7 @@ impl ErrorExt for Error {
Error::MissingTableMutationHandler { .. }
| Error::MissingProcedureServiceHandler { .. }
| Error::MissingFlowServiceHandler { .. }
| Error::MissingMetadataSnapshotHandler { .. }
| Error::RegisterUdf { .. } => StatusCode::Unexpected,
Error::UnsupportedInputDataType { .. }
@@ -262,7 +276,8 @@ impl ErrorExt for Error {
Error::DecodePlan { source, .. }
| Error::Execute { source, .. }
| Error::ProcedureService { source, .. }
| Error::TableMutation { source, .. } => source.status_code(),
| Error::TableMutation { source, .. }
| Error::MetadataSnapshot { source, .. } => source.status_code(),
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
}

View File

@@ -18,19 +18,16 @@ mod udaf;
use std::sync::Arc;
use api::v1::TableName;
use datafusion::catalog::CatalogProviderList;
use datafusion::error::Result as DatafusionResult;
use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder};
use datafusion_common::{Column, TableReference};
use datafusion_expr::dml::InsertOp;
use datafusion_expr::{col, DmlStatement, WriteOp};
use datafusion_common::Column;
use datafusion_expr::col;
pub use expr::{build_filter_from_timestamp, build_same_type_ts_filter};
use snafu::ResultExt;
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
pub use self::udaf::AggregateFunction;
use crate::error::{GeneralDataFusionSnafu, Result};
use crate::error::Result;
use crate::logical_plan::accumulator::*;
use crate::signature::{Signature, Volatility};
@@ -82,74 +79,6 @@ pub fn rename_logical_plan_columns(
LogicalPlanBuilder::from(plan).project(projection)?.build()
}
/// Convert a insert into logical plan to an (table_name, logical_plan)
/// where table_name is the name of the table to insert into.
/// logical_plan is the plan to be executed.
///
/// if input logical plan is not `insert into table_name <input>`, return None
///
/// Returned TableName will use provided catalog and schema if not specified in the logical plan,
/// if table scan in logical plan have full table name, will **NOT** override it.
pub fn breakup_insert_plan(
plan: &LogicalPlan,
default_catalog: &str,
default_schema: &str,
) -> Option<(TableName, Arc<LogicalPlan>)> {
if let LogicalPlan::Dml(dml) = plan {
if dml.op != WriteOp::Insert(InsertOp::Append) {
return None;
}
let table_name = &dml.table_name;
let table_name = match table_name {
TableReference::Bare { table } => TableName {
catalog_name: default_catalog.to_string(),
schema_name: default_schema.to_string(),
table_name: table.to_string(),
},
TableReference::Partial { schema, table } => TableName {
catalog_name: default_catalog.to_string(),
schema_name: schema.to_string(),
table_name: table.to_string(),
},
TableReference::Full {
catalog,
schema,
table,
} => TableName {
catalog_name: catalog.to_string(),
schema_name: schema.to_string(),
table_name: table.to_string(),
},
};
let logical_plan = dml.input.clone();
Some((table_name, logical_plan))
} else {
None
}
}
/// create a `insert into table_name <input>` logical plan
pub fn add_insert_to_logical_plan(
table_name: TableName,
table_schema: datafusion_common::DFSchemaRef,
input: LogicalPlan,
) -> Result<LogicalPlan> {
let table_name = TableReference::Full {
catalog: table_name.catalog_name.into(),
schema: table_name.schema_name.into(),
table: table_name.table_name.into(),
};
let plan = LogicalPlan::Dml(DmlStatement::new(
table_name,
table_schema,
WriteOp::Insert(InsertOp::Append),
Arc::new(input),
));
let plan = plan.recompute_schema().context(GeneralDataFusionSnafu)?;
Ok(plan)
}
/// The datafusion `[LogicalPlan]` decoder.
#[async_trait::async_trait]
pub trait SubstraitPlanDecoder {

View File

@@ -23,22 +23,17 @@ use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
/// The default backoff config for kafka client.
///
/// If the operation fails, the client will retry 3 times.
/// The backoff time is 100ms, 300ms, 900ms.
pub const DEFAULT_BACKOFF_CONFIG: BackoffConfig = BackoffConfig {
init_backoff: Duration::from_millis(100),
max_backoff: Duration::from_secs(1),
base: 3.0,
// The deadline shouldn't be too long,
// otherwise the client will block the worker loop for a long time.
deadline: Some(Duration::from_secs(3)),
max_backoff: Duration::from_secs(10),
base: 2.0,
deadline: Some(Duration::from_secs(120)),
};
/// Default interval for auto WAL pruning.
pub const DEFAULT_AUTO_PRUNE_INTERVAL: Duration = Duration::ZERO;
/// Default limit for concurrent auto pruning tasks.
pub const DEFAULT_AUTO_PRUNE_PARALLELISM: usize = 10;
/// Default interval for active WAL pruning.
pub const DEFAULT_ACTIVE_PRUNE_INTERVAL: Duration = Duration::ZERO;
/// Default limit for concurrent active pruning tasks.
pub const DEFAULT_ACTIVE_PRUNE_TASK_LIMIT: usize = 10;
/// Default interval for sending flush request to regions when pruning remote WAL.
pub const DEFAULT_TRIGGER_FLUSH_THRESHOLD: u64 = 0;

View File

@@ -18,8 +18,8 @@ use common_base::readable_size::ReadableSize;
use serde::{Deserialize, Serialize};
use crate::config::kafka::common::{
KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_AUTO_PRUNE_INTERVAL,
DEFAULT_AUTO_PRUNE_PARALLELISM, DEFAULT_TRIGGER_FLUSH_THRESHOLD,
KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_ACTIVE_PRUNE_INTERVAL,
DEFAULT_ACTIVE_PRUNE_TASK_LIMIT, DEFAULT_TRIGGER_FLUSH_THRESHOLD,
};
/// Kafka wal configurations for datanode.
@@ -47,8 +47,9 @@ pub struct DatanodeKafkaConfig {
pub dump_index_interval: Duration,
/// Ignore missing entries during read WAL.
pub overwrite_entry_start_id: bool,
// Active WAL pruning.
pub auto_prune_topic_records: bool,
// Interval of WAL pruning.
#[serde(with = "humantime_serde")]
pub auto_prune_interval: Duration,
// Threshold for sending flush request when pruning remote WAL.
// `None` stands for never sending flush request.
@@ -69,9 +70,10 @@ impl Default for DatanodeKafkaConfig {
create_index: true,
dump_index_interval: Duration::from_secs(60),
overwrite_entry_start_id: false,
auto_prune_interval: DEFAULT_AUTO_PRUNE_INTERVAL,
auto_prune_topic_records: false,
auto_prune_interval: DEFAULT_ACTIVE_PRUNE_INTERVAL,
trigger_flush_threshold: DEFAULT_TRIGGER_FLUSH_THRESHOLD,
auto_prune_parallelism: DEFAULT_AUTO_PRUNE_PARALLELISM,
auto_prune_parallelism: DEFAULT_ACTIVE_PRUNE_TASK_LIMIT,
}
}
}

View File

@@ -17,8 +17,8 @@ use std::time::Duration;
use serde::{Deserialize, Serialize};
use crate::config::kafka::common::{
KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_AUTO_PRUNE_INTERVAL,
DEFAULT_AUTO_PRUNE_PARALLELISM, DEFAULT_TRIGGER_FLUSH_THRESHOLD,
KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_ACTIVE_PRUNE_INTERVAL,
DEFAULT_ACTIVE_PRUNE_TASK_LIMIT, DEFAULT_TRIGGER_FLUSH_THRESHOLD,
};
/// Kafka wal configurations for metasrv.
@@ -34,7 +34,6 @@ pub struct MetasrvKafkaConfig {
// Automatically create topics for WAL.
pub auto_create_topics: bool,
// Interval of WAL pruning.
#[serde(with = "humantime_serde")]
pub auto_prune_interval: Duration,
// Threshold for sending flush request when pruning remote WAL.
// `None` stands for never sending flush request.
@@ -49,9 +48,9 @@ impl Default for MetasrvKafkaConfig {
connection: Default::default(),
kafka_topic: Default::default(),
auto_create_topics: true,
auto_prune_interval: DEFAULT_AUTO_PRUNE_INTERVAL,
auto_prune_interval: DEFAULT_ACTIVE_PRUNE_INTERVAL,
trigger_flush_threshold: DEFAULT_TRIGGER_FLUSH_THRESHOLD,
auto_prune_parallelism: DEFAULT_AUTO_PRUNE_PARALLELISM,
auto_prune_parallelism: DEFAULT_ACTIVE_PRUNE_TASK_LIMIT,
}
}
}

View File

@@ -31,33 +31,3 @@ where
test(endpoints).await
}
/// Get the kafka endpoints from the environment variable `GT_KAFKA_ENDPOINTS`.
///
/// The format of the environment variable is:
/// ```
/// GT_KAFKA_ENDPOINTS=localhost:9092,localhost:9093
/// ```
pub fn get_kafka_endpoints() -> Vec<String> {
let endpoints = std::env::var("GT_KAFKA_ENDPOINTS").unwrap();
endpoints
.split(',')
.map(|s| s.trim().to_string())
.collect::<Vec<_>>()
}
#[macro_export]
/// Skip the test if the environment variable `GT_KAFKA_ENDPOINTS` is not set.
///
/// The format of the environment variable is:
/// ```
/// GT_KAFKA_ENDPOINTS=localhost:9092,localhost:9093
/// ```
macro_rules! maybe_skip_kafka_integration_test {
() => {
if std::env::var("GT_KAFKA_ENDPOINTS").is_err() {
common_telemetry::warn!("The endpoints is empty, skipping the test");
return;
}
};
}

View File

@@ -57,9 +57,9 @@ use tokio::sync::Notify;
use crate::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use crate::error::{
self, BuildMetricEngineSnafu, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu,
MissingCacheSnafu, MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result,
ShutdownInstanceSnafu, ShutdownServerSnafu, StartServerSnafu,
self, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu, MissingCacheSnafu,
MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result, ShutdownInstanceSnafu,
ShutdownServerSnafu, StartServerSnafu,
};
use crate::event_listener::{
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
@@ -357,6 +357,7 @@ impl DatanodeBuilder {
None,
None,
None,
None,
false,
self.plugins.clone(),
opts.query.clone(),
@@ -398,46 +399,44 @@ impl DatanodeBuilder {
schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<Vec<RegionEngineRef>> {
let mut metric_engine_config = metric_engine::config::EngineConfig::default();
let mut mito_engine_config = MitoConfig::default();
let mut file_engine_config = file_engine::config::EngineConfig::default();
let mut engines = vec![];
let mut metric_engine_config = opts.region_engine.iter().find_map(|c| match c {
RegionEngineConfig::Metric(config) => Some(config.clone()),
_ => None,
});
for engine in &opts.region_engine {
match engine {
RegionEngineConfig::Mito(config) => {
mito_engine_config = config.clone();
let mito_engine = Self::build_mito_engine(
opts,
object_store_manager.clone(),
config.clone(),
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
let metric_engine = MetricEngine::new(
mito_engine.clone(),
metric_engine_config.take().unwrap_or_default(),
);
engines.push(Arc::new(mito_engine) as _);
engines.push(Arc::new(metric_engine) as _);
}
RegionEngineConfig::File(config) => {
file_engine_config = config.clone();
let engine = FileRegionEngine::new(
config.clone(),
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
);
engines.push(Arc::new(engine) as _);
}
RegionEngineConfig::Metric(metric_config) => {
metric_engine_config = metric_config.clone();
RegionEngineConfig::Metric(_) => {
// Already handled in `build_mito_engine`.
}
}
}
let mito_engine = Self::build_mito_engine(
opts,
object_store_manager.clone(),
mito_engine_config,
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
let metric_engine = MetricEngine::try_new(mito_engine.clone(), metric_engine_config)
.context(BuildMetricEngineSnafu)?;
let file_engine = FileRegionEngine::new(
file_engine_config,
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
);
Ok(vec![
Arc::new(mito_engine) as _,
Arc::new(metric_engine) as _,
Arc::new(file_engine) as _,
])
Ok(engines)
}
/// Builds [MitoEngine] according to options.

View File

@@ -336,13 +336,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to build metric engine"))]
BuildMetricEngine {
source: metric_engine::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to serialize options to TOML"))]
TomlFormat {
#[snafu(implicit)]
@@ -459,7 +452,6 @@ impl ErrorExt for Error {
FindLogicalRegions { source, .. } => source.status_code(),
BuildMitoEngine { source, .. } => source.status_code(),
BuildMetricEngine { source, .. } => source.status_code(),
ConcurrentQueryLimiterClosed { .. } | ConcurrentQueryLimiterTimeout { .. } => {
StatusCode::RegionBusy
}

View File

@@ -25,7 +25,6 @@ use std::sync::Arc;
use std::time::Duration;
use common_telemetry::{info, warn};
use mito2::access_layer::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
use object_store::services::Fs;
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
@@ -169,13 +168,9 @@ async fn build_cache_layer(
if let Some(path) = cache_path.as_ref()
&& !path.trim().is_empty()
{
let atomic_temp_dir = join_dir(path, ATOMIC_WRITE_DIR);
let atomic_temp_dir = join_dir(path, ".tmp/");
clean_temp_dir(&atomic_temp_dir)?;
// Compatible code. Remove this after a major release.
let old_atomic_temp_dir = join_dir(path, OLD_ATOMIC_WRITE_DIR);
clean_temp_dir(&old_atomic_temp_dir)?;
let cache_store = Fs::default()
.root(path)
.atomic_write_dir(&atomic_temp_dir)

View File

@@ -15,7 +15,6 @@
use std::{fs, path};
use common_telemetry::info;
use mito2::access_layer::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR};
use object_store::services::Fs;
use object_store::util::join_dir;
use object_store::ObjectStore;
@@ -34,13 +33,9 @@ pub async fn new_fs_object_store(
.context(error::CreateDirSnafu { dir: data_home })?;
info!("The file storage home is: {}", data_home);
let atomic_write_dir = join_dir(data_home, ATOMIC_WRITE_DIR);
let atomic_write_dir = join_dir(data_home, ".tmp/");
store::clean_temp_dir(&atomic_write_dir)?;
// Compatible code. Remove this after a major release.
let old_atomic_temp_dir = join_dir(data_home, OLD_ATOMIC_WRITE_DIR);
store::clean_temp_dir(&old_atomic_temp_dir)?;
let builder = Fs::default()
.root(data_home)
.atomic_write_dir(&atomic_write_dir);

View File

@@ -60,7 +60,6 @@ partition.workspace = true
prometheus.workspace = true
prost.workspace = true
query.workspace = true
rand.workspace = true
serde.workspace = true
servers.workspace = true
session.workspace = true

View File

@@ -58,7 +58,7 @@ use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_R
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
use crate::{CreateFlowArgs, FlowId, TableName};
pub(crate) mod flownode_impl;
mod flownode_impl;
mod parse_expr;
pub(crate) mod refill;
mod stat;
@@ -135,13 +135,12 @@ impl Configurable for FlownodeOptions {
}
/// Arc-ed FlowNodeManager, cheaper to clone
pub type FlowStreamingEngineRef = Arc<StreamingEngine>;
pub type FlowWorkerManagerRef = Arc<FlowWorkerManager>;
/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
///
/// The choice of timestamp is just using current system timestamp for now
///
pub struct StreamingEngine {
pub struct FlowWorkerManager {
/// The handler to the worker that will run the dataflow
/// which is `!Send` so a handle is used
pub worker_handles: Vec<WorkerHandle>,
@@ -159,8 +158,7 @@ pub struct StreamingEngine {
flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
tick_manager: FlowTickManager,
/// This node id is only available in distributed mode, on standalone mode this is guaranteed to be `None`
pub node_id: Option<u32>,
node_id: Option<u32>,
/// Lock for flushing, will be `read` by `handle_inserts` and `write` by `flush_flow`
///
/// So that a series of event like `inserts -> flush` can be handled correctly
@@ -170,7 +168,7 @@ pub struct StreamingEngine {
}
/// Building FlownodeManager
impl StreamingEngine {
impl FlowWorkerManager {
/// set frontend invoker
pub async fn set_frontend_invoker(&self, frontend: FrontendInvoker) {
*self.frontend_invoker.write().await = Some(frontend);
@@ -189,7 +187,7 @@ impl StreamingEngine {
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
let tick_manager = FlowTickManager::new();
let worker_handles = Vec::new();
StreamingEngine {
FlowWorkerManager {
worker_handles,
worker_selector: Mutex::new(0),
query_engine,
@@ -265,7 +263,7 @@ pub fn batches_to_rows_req(batches: Vec<Batch>) -> Result<Vec<DiffRequest>, Erro
}
/// This impl block contains methods to send writeback requests to frontend
impl StreamingEngine {
impl FlowWorkerManager {
/// Return the number of requests it made
pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
let all_reqs = self.generate_writeback_request().await?;
@@ -536,7 +534,7 @@ impl StreamingEngine {
}
/// Flow Runtime related methods
impl StreamingEngine {
impl FlowWorkerManager {
/// Start state report handler, which will receive a sender from HeartbeatTask to send state size report back
///
/// if heartbeat task is shutdown, this future will exit too
@@ -661,7 +659,7 @@ impl StreamingEngine {
}
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_streaming_engine.frontend_invoker
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_worker_manager.frontend_invoker
self.frontend_invoker.write().await.take();
}
@@ -730,7 +728,7 @@ impl StreamingEngine {
}
/// Create&Remove flow
impl StreamingEngine {
impl FlowWorkerManager {
/// remove a flow by it's id
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
for handle in self.worker_handles.iter() {
@@ -748,6 +746,7 @@ impl StreamingEngine {
/// steps to create task:
/// 1. parse query into typed plan(and optional parse expire_after expr)
/// 2. render source/sink with output table id and used input table id
#[allow(clippy::too_many_arguments)]
pub async fn create_flow_inner(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
let CreateFlowArgs {
flow_id,

View File

@@ -14,511 +14,41 @@
//! impl `FlowNode` trait for FlowNodeManager so standalone can call them
use std::collections::{HashMap, HashSet};
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use api::v1::flow::{
flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
};
use api::v1::region::InsertRequests;
use catalog::CatalogManager;
use common_error::ext::BoxedError;
use common_meta::ddl::create_flow::FlowType;
use common_meta::error::Result as MetaResult;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::error::{Result as MetaResult, UnexpectedSnafu};
use common_runtime::JoinHandle;
use common_telemetry::{error, info, trace, warn};
use common_telemetry::{trace, warn};
use datatypes::value::Value;
use futures::TryStreamExt;
use itertools::Itertools;
use session::context::QueryContextBuilder;
use snafu::{ensure, IntoError, OptionExt, ResultExt};
use snafu::{IntoError, OptionExt, ResultExt};
use store_api::storage::{RegionId, TableId};
use tokio::sync::{Mutex, RwLock};
use crate::adapter::{CreateFlowArgs, StreamingEngine};
use crate::adapter::{CreateFlowArgs, FlowWorkerManager};
use crate::batching_mode::engine::BatchingEngine;
use crate::batching_mode::{FRONTEND_SCAN_TIMEOUT, MIN_REFRESH_DURATION};
use crate::engine::FlowEngine;
use crate::error::{
CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu, FlowNotRecoveredSnafu,
IllegalCheckTaskStateSnafu, InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu,
NoAvailableFrontendSnafu, SyncCheckTaskSnafu, UnexpectedSnafu,
};
use crate::error::{CreateFlowSnafu, FlowNotFoundSnafu, InsertIntoFlowSnafu, InternalSnafu};
use crate::metrics::METRIC_FLOW_TASK_COUNT;
use crate::repr::{self, DiffRow};
use crate::{Error, FlowId};
/// Ref to [`FlowDualEngine`]
pub type FlowDualEngineRef = Arc<FlowDualEngine>;
/// Manage both streaming and batching mode engine
///
/// including create/drop/flush flow
/// and redirect insert requests to the appropriate engine
pub struct FlowDualEngine {
streaming_engine: Arc<StreamingEngine>,
streaming_engine: Arc<FlowWorkerManager>,
batching_engine: Arc<BatchingEngine>,
/// helper struct for faster query flow by table id or vice versa
src_table2flow: RwLock<SrcTableToFlow>,
flow_metadata_manager: Arc<FlowMetadataManager>,
catalog_manager: Arc<dyn CatalogManager>,
check_task: tokio::sync::Mutex<Option<ConsistentCheckTask>>,
done_recovering: AtomicBool,
src_table2flow: std::sync::RwLock<SrcTableToFlow>,
}
impl FlowDualEngine {
pub fn new(
streaming_engine: Arc<StreamingEngine>,
batching_engine: Arc<BatchingEngine>,
flow_metadata_manager: Arc<FlowMetadataManager>,
catalog_manager: Arc<dyn CatalogManager>,
) -> Self {
Self {
streaming_engine,
batching_engine,
src_table2flow: RwLock::new(SrcTableToFlow::default()),
flow_metadata_manager,
catalog_manager,
check_task: Mutex::new(None),
done_recovering: AtomicBool::new(false),
}
}
/// Set `done_recovering` to true
/// indicate that we are ready to handle requests
pub fn set_done_recovering(&self) {
info!("FlowDualEngine done recovering");
self.done_recovering
.store(true, std::sync::atomic::Ordering::Release);
}
/// Check if `done_recovering` is true
pub fn is_recover_done(&self) -> bool {
self.done_recovering
.load(std::sync::atomic::Ordering::Acquire)
}
/// wait for recovering to be done, this will only happen when flownode just started
async fn wait_for_all_flow_recover(&self, waiting_req_cnt: usize) -> Result<(), Error> {
if self.is_recover_done() {
return Ok(());
}
warn!(
"FlowDualEngine is not done recovering, {} insert request waiting for recovery",
waiting_req_cnt
);
// wait 3 seconds, check every 1 second
// TODO(discord9): make this configurable
let mut retry = 0;
let max_retry = 3;
while retry < max_retry && !self.is_recover_done() {
warn!(
"FlowDualEngine is not done recovering, retry {} in 1s",
retry
);
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
retry += 1;
}
if retry == max_retry {
return FlowNotRecoveredSnafu.fail();
} else {
info!("FlowDualEngine is done recovering");
}
// TODO(discord9): also put to centralized logging for flow once it implemented
Ok(())
}
/// Determine if the engine is in distributed mode
pub fn is_distributed(&self) -> bool {
self.streaming_engine.node_id.is_some()
}
pub fn streaming_engine(&self) -> Arc<StreamingEngine> {
self.streaming_engine.clone()
}
pub fn batching_engine(&self) -> Arc<BatchingEngine> {
self.batching_engine.clone()
}
/// In distributed mode, scan periodically(1s) until available frontend is found, or timeout,
/// in standalone mode, return immediately
/// notice here if any frontend appear in cluster info this function will return immediately
async fn wait_for_available_frontend(&self, timeout: std::time::Duration) -> Result<(), Error> {
if !self.is_distributed() {
return Ok(());
}
let frontend_client = self.batching_engine().frontend_client.clone();
let sleep_duration = std::time::Duration::from_millis(1_000);
let now = std::time::Instant::now();
loop {
let frontend_list = frontend_client.scan_for_frontend().await?;
if !frontend_list.is_empty() {
let fe_list = frontend_list
.iter()
.map(|(_, info)| &info.peer.addr)
.collect::<Vec<_>>();
info!("Available frontend found: {:?}", fe_list);
return Ok(());
}
let elapsed = now.elapsed();
tokio::time::sleep(sleep_duration).await;
info!("Waiting for available frontend, elapsed={:?}", elapsed);
if elapsed >= timeout {
return NoAvailableFrontendSnafu {
timeout,
context: "No available frontend found in cluster info",
}
.fail();
}
}
}
/// Try to sync with check task, this is only used in drop flow&flush flow, so a flow id is required
///
/// the need to sync is to make sure flush flow actually get called
async fn try_sync_with_check_task(
&self,
flow_id: FlowId,
allow_drop: bool,
) -> Result<(), Error> {
// this function rarely get called so adding some log is helpful
info!("Try to sync with check task for flow {}", flow_id);
let mut retry = 0;
let max_retry = 10;
// keep trying to trigger consistent check
while retry < max_retry {
if let Some(task) = self.check_task.lock().await.as_ref() {
task.trigger(false, allow_drop).await?;
break;
}
retry += 1;
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
if retry == max_retry {
error!(
"Can't sync with check task for flow {} with allow_drop={}",
flow_id, allow_drop
);
return SyncCheckTaskSnafu {
flow_id,
allow_drop,
}
.fail();
}
info!("Successfully sync with check task for flow {}", flow_id);
Ok(())
}
/// Spawn a task to consistently check if all flow tasks in metasrv is created on flownode,
/// so on startup, this will create all missing flow tasks, and constantly check at a interval
async fn check_flow_consistent(
&self,
allow_create: bool,
allow_drop: bool,
) -> Result<(), Error> {
// use nodeid to determine if this is standalone/distributed mode, and retrieve all flows in this node(in distributed mode)/or all flows(in standalone mode)
let nodeid = self.streaming_engine.node_id;
let should_exists: Vec<_> = if let Some(nodeid) = nodeid {
// nodeid is available, so we only need to check flows on this node
// which also means we are in distributed mode
let to_be_recover = self
.flow_metadata_manager
.flownode_flow_manager()
.flows(nodeid.into())
.try_collect::<Vec<_>>()
.await
.context(ListFlowsSnafu {
id: Some(nodeid.into()),
})?;
to_be_recover.into_iter().map(|(id, _)| id).collect()
} else {
// nodeid is not available, so we need to check all flows
// which also means we are in standalone mode
let all_catalogs = self
.catalog_manager
.catalog_names()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let mut all_flow_ids = vec![];
for catalog in all_catalogs {
let flows = self
.flow_metadata_manager
.flow_name_manager()
.flow_names(&catalog)
.await
.try_collect::<Vec<_>>()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
all_flow_ids.extend(flows.into_iter().map(|(_, id)| id.flow_id()));
}
all_flow_ids
};
let should_exists = should_exists
.into_iter()
.map(|i| i as FlowId)
.collect::<HashSet<_>>();
let actual_exists = self.list_flows().await?.into_iter().collect::<HashSet<_>>();
let to_be_created = should_exists
.iter()
.filter(|id| !actual_exists.contains(id))
.collect::<Vec<_>>();
let to_be_dropped = actual_exists
.iter()
.filter(|id| !should_exists.contains(id))
.collect::<Vec<_>>();
if !to_be_created.is_empty() {
if allow_create {
info!(
"Recovering {} flows: {:?}",
to_be_created.len(),
to_be_created
);
let mut errors = vec![];
for flow_id in to_be_created.clone() {
let flow_id = *flow_id;
let info = self
.flow_metadata_manager
.flow_info_manager()
.get(flow_id as u32)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.context(FlowNotFoundSnafu { id: flow_id })?;
let sink_table_name = [
info.sink_table_name().catalog_name.clone(),
info.sink_table_name().schema_name.clone(),
info.sink_table_name().table_name.clone(),
];
let args = CreateFlowArgs {
flow_id,
sink_table_name,
source_table_ids: info.source_table_ids().to_vec(),
// because recover should only happen on restart the `create_if_not_exists` and `or_replace` can be arbitrary value(since flow doesn't exist)
// but for the sake of consistency and to make sure recover of flow actually happen, we set both to true
// (which is also fine since checks for not allow both to be true is on metasrv and we already pass that)
create_if_not_exists: true,
or_replace: true,
expire_after: info.expire_after(),
comment: Some(info.comment().clone()),
sql: info.raw_sql().clone(),
flow_options: info.options().clone(),
query_ctx: info
.query_context()
.clone()
.map(|ctx| {
ctx.try_into()
.map_err(BoxedError::new)
.context(ExternalSnafu)
})
.transpose()?
// or use default QueryContext with catalog_name from info
// to keep compatibility with old version
.or_else(|| {
Some(
QueryContextBuilder::default()
.current_catalog(info.catalog_name().to_string())
.build(),
)
}),
};
if let Err(err) = self
.create_flow(args)
.await
.map_err(BoxedError::new)
.with_context(|_| CreateFlowSnafu {
sql: info.raw_sql().clone(),
})
{
errors.push((flow_id, err));
}
}
if errors.is_empty() {
info!("Recover flows successfully, flows: {:?}", to_be_created);
}
for (flow_id, err) in errors {
warn!("Failed to recreate flow {}, err={:#?}", flow_id, err);
}
} else {
warn!(
"Flows do not exist in flownode for node {:?}, flow_ids={:?}",
nodeid, to_be_created
);
}
}
if !to_be_dropped.is_empty() {
if allow_drop {
info!("Dropping flows: {:?}", to_be_dropped);
let mut errors = vec![];
for flow_id in to_be_dropped {
let flow_id = *flow_id;
if let Err(err) = self.remove_flow(flow_id).await {
errors.push((flow_id, err));
}
}
for (flow_id, err) in errors {
warn!("Failed to drop flow {}, err={:#?}", flow_id, err);
}
} else {
warn!(
"Flows do not exist in metadata for node {:?}, flow_ids={:?}",
nodeid, to_be_dropped
);
}
}
Ok(())
}
// TODO(discord9): consider sync this with heartbeat(might become necessary in the future)
pub async fn start_flow_consistent_check_task(self: &Arc<Self>) -> Result<(), Error> {
let mut check_task = self.check_task.lock().await;
ensure!(
check_task.is_none(),
IllegalCheckTaskStateSnafu {
reason: "Flow consistent check task already exists",
}
);
let task = ConsistentCheckTask::start_check_task(self).await?;
*check_task = Some(task);
Ok(())
}
pub async fn stop_flow_consistent_check_task(&self) -> Result<(), Error> {
info!("Stopping flow consistent check task");
let mut check_task = self.check_task.lock().await;
ensure!(
check_task.is_some(),
IllegalCheckTaskStateSnafu {
reason: "Flow consistent check task does not exist",
}
);
check_task.take().unwrap().stop().await?;
info!("Stopped flow consistent check task");
Ok(())
}
/// TODO(discord9): also add a `exists` api using flow metadata manager's `exists` method
async fn flow_exist_in_metadata(&self, flow_id: FlowId) -> Result<bool, Error> {
self.flow_metadata_manager
.flow_info_manager()
.get(flow_id as u32)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
.map(|info| info.is_some())
}
}
struct ConsistentCheckTask {
handle: JoinHandle<()>,
shutdown_tx: tokio::sync::mpsc::Sender<()>,
trigger_tx: tokio::sync::mpsc::Sender<(bool, bool, tokio::sync::oneshot::Sender<()>)>,
}
impl ConsistentCheckTask {
async fn start_check_task(engine: &Arc<FlowDualEngine>) -> Result<Self, Error> {
let engine = engine.clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let (trigger_tx, mut trigger_rx) =
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
let handle = common_runtime::spawn_global(async move {
// first check if available frontend is found
if let Err(err) = engine
.wait_for_available_frontend(FRONTEND_SCAN_TIMEOUT)
.await
{
warn!("No frontend is available yet:\n {err:?}");
}
// then do recover flows, if failed, always retry
let mut recover_retry = 0;
while let Err(err) = engine.check_flow_consistent(true, false).await {
recover_retry += 1;
error!(
"Failed to recover flows:\n {err:?}, retry {} in {}s",
recover_retry,
MIN_REFRESH_DURATION.as_secs()
);
tokio::time::sleep(MIN_REFRESH_DURATION).await;
}
engine.set_done_recovering();
// then do check flows, with configurable allow_create and allow_drop
let (mut allow_create, mut allow_drop) = (false, false);
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
loop {
if let Err(err) = engine.check_flow_consistent(allow_create, allow_drop).await {
error!(err; "Failed to check flow consistent");
}
if let Some(done) = ret_signal.take() {
let _ = done.send(());
}
tokio::select! {
_ = rx.recv() => break,
incoming = trigger_rx.recv() => if let Some(incoming) = incoming {
(allow_create, allow_drop) = (incoming.0, incoming.1);
ret_signal = Some(incoming.2);
},
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => {
(allow_create, allow_drop) = (false, false);
},
}
}
});
Ok(ConsistentCheckTask {
handle,
shutdown_tx: tx,
trigger_tx,
})
}
async fn trigger(&self, allow_create: bool, allow_drop: bool) -> Result<(), Error> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.trigger_tx
.send((allow_create, allow_drop, tx))
.await
.map_err(|_| {
IllegalCheckTaskStateSnafu {
reason: "Failed to send trigger signal",
}
.build()
})?;
rx.await.map_err(|_| {
IllegalCheckTaskStateSnafu {
reason: "Failed to receive trigger signal",
}
.build()
})?;
Ok(())
}
async fn stop(self) -> Result<(), Error> {
self.shutdown_tx.send(()).await.map_err(|_| {
IllegalCheckTaskStateSnafu {
reason: "Failed to send shutdown signal",
}
.build()
})?;
// abort so no need to wait
self.handle.abort();
Ok(())
}
}
#[derive(Default)]
struct SrcTableToFlow {
/// mapping of table ids to flow ids for streaming mode
stream: HashMap<TableId, HashSet<FlowId>>,
@@ -608,54 +138,35 @@ impl FlowEngine for FlowDualEngine {
self.src_table2flow
.write()
.await
.unwrap()
.add_flow(flow_id, flow_type, src_table_ids);
Ok(res)
}
async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
let flow_type = self.src_table2flow.read().await.get_flow_type(flow_id);
let flow_type = self.src_table2flow.read().unwrap().get_flow_type(flow_id);
match flow_type {
Some(FlowType::Batching) => self.batching_engine.remove_flow(flow_id).await,
Some(FlowType::Streaming) => self.streaming_engine.remove_flow(flow_id).await,
None => {
// this can happen if flownode just restart, and is stilling creating the flow
// since now that this flow should dropped, we need to trigger the consistent check and allow drop
// this rely on drop flow ddl delete metadata first, see src/common/meta/src/ddl/drop_flow.rs
warn!(
"Flow {} is not exist in the underlying engine, but exist in metadata",
flow_id
);
self.try_sync_with_check_task(flow_id, true).await?;
Ok(())
}
None => FlowNotFoundSnafu { id: flow_id }.fail(),
}?;
// remove mapping
self.src_table2flow.write().await.remove_flow(flow_id);
self.src_table2flow.write().unwrap().remove_flow(flow_id);
Ok(())
}
async fn flush_flow(&self, flow_id: FlowId) -> Result<usize, Error> {
// sync with check task
self.try_sync_with_check_task(flow_id, false).await?;
let flow_type = self.src_table2flow.read().await.get_flow_type(flow_id);
let flow_type = self.src_table2flow.read().unwrap().get_flow_type(flow_id);
match flow_type {
Some(FlowType::Batching) => self.batching_engine.flush_flow(flow_id).await,
Some(FlowType::Streaming) => self.streaming_engine.flush_flow(flow_id).await,
None => {
warn!(
"Currently flow={flow_id} doesn't exist in flownode, ignore flush_flow request"
);
Ok(0)
}
None => FlowNotFoundSnafu { id: flow_id }.fail(),
}
}
async fn flow_exist(&self, flow_id: FlowId) -> Result<bool, Error> {
let flow_type = self.src_table2flow.read().await.get_flow_type(flow_id);
let flow_type = self.src_table2flow.read().unwrap().get_flow_type(flow_id);
// not using `flow_type.is_some()` to make sure the flow is actually exist in the underlying engine
match flow_type {
Some(FlowType::Batching) => self.batching_engine.flow_exist(flow_id).await,
@@ -664,26 +175,16 @@ impl FlowEngine for FlowDualEngine {
}
}
async fn list_flows(&self) -> Result<impl IntoIterator<Item = FlowId>, Error> {
let stream_flows = self.streaming_engine.list_flows().await?;
let batch_flows = self.batching_engine.list_flows().await?;
Ok(stream_flows.into_iter().chain(batch_flows))
}
async fn handle_flow_inserts(
&self,
request: api::v1::region::InsertRequests,
) -> Result<(), Error> {
self.wait_for_all_flow_recover(request.requests.len())
.await?;
// TODO(discord9): make as little clone as possible
let mut to_stream_engine = Vec::with_capacity(request.requests.len());
let mut to_batch_engine = request.requests;
{
// not locking this, or recover flows will be starved when also handling flow inserts
let src_table2flow = self.src_table2flow.read().await;
let src_table2flow = self.src_table2flow.read().unwrap();
to_batch_engine.retain(|req| {
let region_id = RegionId::from(req.region_id);
let table_id = region_id.table_id();
@@ -720,7 +221,12 @@ impl FlowEngine for FlowDualEngine {
requests: to_batch_engine,
})
.await?;
stream_handler.await.context(JoinTaskSnafu)??;
stream_handler.await.map_err(|e| {
crate::error::UnexpectedSnafu {
reason: format!("JoinError when handle inserts for flow stream engine: {e:?}"),
}
.build()
})??;
Ok(())
}
@@ -801,7 +307,14 @@ impl common_meta::node_manager::Flownode for FlowDualEngine {
..Default::default()
})
}
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
None => UnexpectedSnafu {
err_msg: "Missing request body",
}
.fail(),
_ => UnexpectedSnafu {
err_msg: "Invalid request body.",
}
.fail(),
}
}
@@ -818,23 +331,15 @@ fn to_meta_err(
location: snafu::Location,
) -> impl FnOnce(crate::error::Error) -> common_meta::error::Error {
move |err: crate::error::Error| -> common_meta::error::Error {
match err {
crate::error::Error::FlowNotFound { id, .. } => {
common_meta::error::Error::FlowNotFound {
flow_name: format!("flow_id={id}"),
location,
}
}
_ => common_meta::error::Error::External {
location,
source: BoxedError::new(err),
},
common_meta::error::Error::External {
location,
source: BoxedError::new(err),
}
}
}
#[async_trait::async_trait]
impl common_meta::node_manager::Flownode for StreamingEngine {
impl common_meta::node_manager::Flownode for FlowWorkerManager {
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
let query_ctx = request
.header
@@ -908,7 +413,14 @@ impl common_meta::node_manager::Flownode for StreamingEngine {
..Default::default()
})
}
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
None => UnexpectedSnafu {
err_msg: "Missing request body",
}
.fail(),
_ => UnexpectedSnafu {
err_msg: "Invalid request body.",
}
.fail(),
}
}
@@ -920,7 +432,7 @@ impl common_meta::node_manager::Flownode for StreamingEngine {
}
}
impl FlowEngine for StreamingEngine {
impl FlowEngine for FlowWorkerManager {
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
self.create_flow_inner(args).await
}
@@ -937,16 +449,6 @@ impl FlowEngine for StreamingEngine {
self.flow_exist_inner(flow_id).await
}
async fn list_flows(&self) -> Result<impl IntoIterator<Item = FlowId>, Error> {
Ok(self
.flow_err_collectors
.read()
.await
.keys()
.cloned()
.collect::<Vec<_>>())
}
async fn handle_flow_inserts(
&self,
request: api::v1::region::InsertRequests,
@@ -972,7 +474,7 @@ impl FetchFromRow {
}
}
impl StreamingEngine {
impl FlowWorkerManager {
async fn handle_inserts_inner(
&self,
request: InsertRequests,
@@ -1050,7 +552,7 @@ impl StreamingEngine {
.copied()
.map(FetchFromRow::Idx)
.or_else(|| col_default_val.clone().map(FetchFromRow::Default))
.with_context(|| UnexpectedSnafu {
.with_context(|| crate::error::UnexpectedSnafu {
reason: format!(
"Column not found: {}, default_value: {:?}",
col_name, col_default_val

View File

@@ -31,7 +31,7 @@ use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
use crate::adapter::table_source::ManagedTableSource;
use crate::adapter::{FlowId, FlowStreamingEngineRef, StreamingEngine};
use crate::adapter::{FlowId, FlowWorkerManager, FlowWorkerManagerRef};
use crate::error::{FlowNotFoundSnafu, JoinTaskSnafu, UnexpectedSnafu};
use crate::expr::error::ExternalSnafu;
use crate::expr::utils::find_plan_time_window_expr_lower_bound;
@@ -39,10 +39,10 @@ use crate::repr::RelationDesc;
use crate::server::get_all_flow_ids;
use crate::{Error, FrontendInvoker};
impl StreamingEngine {
impl FlowWorkerManager {
/// Create and start refill flow tasks in background
pub async fn create_and_start_refill_flow_tasks(
self: &FlowStreamingEngineRef,
self: &FlowWorkerManagerRef,
flow_metadata_manager: &FlowMetadataManagerRef,
catalog_manager: &CatalogManagerRef,
) -> Result<(), Error> {
@@ -130,7 +130,7 @@ impl StreamingEngine {
/// Starting to refill flows, if any error occurs, will rebuild the flow and retry
pub(crate) async fn starting_refill_flows(
self: &FlowStreamingEngineRef,
self: &FlowWorkerManagerRef,
tasks: Vec<RefillTask>,
) -> Result<(), Error> {
// TODO(discord9): add a back pressure mechanism
@@ -266,7 +266,7 @@ impl TaskState<()> {
fn start_running(
&mut self,
task_data: &TaskData,
manager: FlowStreamingEngineRef,
manager: FlowWorkerManagerRef,
mut output_stream: SendableRecordBatchStream,
) -> Result<(), Error> {
let data = (*task_data).clone();
@@ -383,7 +383,7 @@ impl RefillTask {
/// Start running the task in background, non-blocking
pub async fn start_running(
&mut self,
manager: FlowStreamingEngineRef,
manager: FlowWorkerManagerRef,
invoker: &FrontendInvoker,
) -> Result<(), Error> {
let TaskState::Prepared { sql } = &mut self.state else {

View File

@@ -16,9 +16,9 @@ use std::collections::BTreeMap;
use common_meta::key::flow::flow_state::FlowStat;
use crate::StreamingEngine;
use crate::FlowWorkerManager;
impl StreamingEngine {
impl FlowWorkerManager {
pub async fn gen_state_report(&self) -> FlowStat {
let mut full_report = BTreeMap::new();
let mut last_exec_time_map = BTreeMap::new();

View File

@@ -33,8 +33,8 @@ use crate::adapter::table_source::TableDesc;
use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
use crate::repr::{ColumnType, RelationDesc, RelationType};
use crate::StreamingEngine;
impl StreamingEngine {
use crate::FlowWorkerManager;
impl FlowWorkerManager {
/// Get a worker handle for creating flow, using round robin to select a worker
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
let use_idx = {

View File

@@ -31,19 +31,4 @@ pub const DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
/// The minimum duration between two queries execution by batching mode task
pub const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
/// Grpc connection timeout
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
/// Grpc max retry number
const GRPC_MAX_RETRIES: u32 = 3;
/// Flow wait for available frontend timeout,
/// if failed to find available frontend after FRONTEND_SCAN_TIMEOUT elapsed, return error
/// which should prevent flownode from starting
pub const FRONTEND_SCAN_TIMEOUT: Duration = Duration::from_secs(30);
/// Frontend activity timeout
/// if frontend is down(not sending heartbeat) for more than FRONTEND_ACTIVITY_TIMEOUT, it will be removed from the list that flownode use to connect
pub const FRONTEND_ACTIVITY_TIMEOUT: Duration = Duration::from_secs(60);
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);

View File

@@ -17,16 +17,14 @@
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use catalog::CatalogManagerRef;
use common_error::ext::BoxedError;
use common_meta::ddl::create_flow::FlowType;
use common_meta::key::flow::FlowMetadataManagerRef;
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
use common_meta::key::table_info::TableInfoManager;
use common_meta::key::TableMetadataManagerRef;
use common_runtime::JoinHandle;
use common_telemetry::info;
use common_telemetry::tracing::warn;
use common_telemetry::{debug, info};
use common_time::TimeToLive;
use query::QueryEngineRef;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionId;
@@ -38,10 +36,7 @@ use crate::batching_mode::task::BatchingTask;
use crate::batching_mode::time_window::{find_time_window_expr, TimeWindowExpr};
use crate::batching_mode::utils::sql_to_df_plan;
use crate::engine::FlowEngine;
use crate::error::{
ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, TableNotFoundMetaSnafu,
UnexpectedSnafu, UnsupportedSnafu,
};
use crate::error::{ExternalSnafu, FlowAlreadyExistSnafu, TableNotFoundMetaSnafu, UnexpectedSnafu};
use crate::{CreateFlowArgs, Error, FlowId, TableName};
/// Batching mode Engine, responsible for driving all the batching mode tasks
@@ -50,11 +45,9 @@ use crate::{CreateFlowArgs, Error, FlowId, TableName};
pub struct BatchingEngine {
tasks: RwLock<BTreeMap<FlowId, BatchingTask>>,
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
/// frontend client for insert request
pub(crate) frontend_client: Arc<FrontendClient>,
frontend_client: Arc<FrontendClient>,
flow_metadata_manager: FlowMetadataManagerRef,
table_meta: TableMetadataManagerRef,
catalog_manager: CatalogManagerRef,
query_engine: QueryEngineRef,
}
@@ -64,7 +57,6 @@ impl BatchingEngine {
query_engine: QueryEngineRef,
flow_metadata_manager: FlowMetadataManagerRef,
table_meta: TableMetadataManagerRef,
catalog_manager: CatalogManagerRef,
) -> Self {
Self {
tasks: Default::default(),
@@ -72,7 +64,6 @@ impl BatchingEngine {
frontend_client,
flow_metadata_manager,
table_meta,
catalog_manager,
query_engine,
}
}
@@ -188,16 +179,6 @@ async fn get_table_name(
table_info: &TableInfoManager,
table_id: &TableId,
) -> Result<TableName, Error> {
get_table_info(table_info, table_id).await.map(|info| {
let name = info.table_name();
[name.catalog_name, name.schema_name, name.table_name]
})
}
async fn get_table_info(
table_info: &TableInfoManager,
table_id: &TableId,
) -> Result<TableInfoValue, Error> {
table_info
.get(*table_id)
.await
@@ -206,7 +187,8 @@ async fn get_table_info(
.with_context(|| UnexpectedSnafu {
reason: format!("Table id = {:?}, couldn't found table name", table_id),
})
.map(|info| info.into_inner())
.map(|name| name.table_name())
.map(|name| [name.catalog_name, name.schema_name, name.table_name])
}
impl BatchingEngine {
@@ -266,20 +248,7 @@ impl BatchingEngine {
let query_ctx = Arc::new(query_ctx);
let mut source_table_names = Vec::with_capacity(2);
for src_id in source_table_ids {
// also check table option to see if ttl!=instant
let table_name = get_table_name(self.table_meta.table_info_manager(), &src_id).await?;
let table_info = get_table_info(self.table_meta.table_info_manager(), &src_id).await?;
ensure!(
table_info.table_info.meta.options.ttl != Some(TimeToLive::Instant),
UnsupportedSnafu {
reason: format!(
"Source table `{}`(id={}) has instant TTL, Instant TTL is not supported under batching mode. Consider using a TTL longer than flush interval",
table_name.join("."),
src_id
),
}
);
source_table_names.push(table_name);
}
@@ -304,14 +273,7 @@ impl BatchingEngine {
})
.transpose()?;
info!(
"Flow id={}, found time window expr={}",
flow_id,
phy_expr
.as_ref()
.map(|phy_expr| phy_expr.to_string())
.unwrap_or("None".to_string())
);
info!("Flow id={}, found time window expr={:?}", flow_id, phy_expr);
let task = BatchingTask::new(
flow_id,
@@ -322,7 +284,7 @@ impl BatchingEngine {
sink_table_name,
source_table_names,
query_ctx,
self.catalog_manager.clone(),
self.table_meta.clone(),
rx,
);
@@ -331,13 +293,12 @@ impl BatchingEngine {
let frontend = self.frontend_client.clone();
// check execute once first to detect any error early
task.check_or_create_sink_table(&engine, &frontend).await?;
task.check_execute(&engine, &frontend).await?;
// TODO(discord9): use time wheel or what for better
let handle = common_runtime::spawn_global(async move {
// TODO(discord9): also save handle & use time wheel or what for better
let _handle = common_runtime::spawn_global(async move {
task_inner.start_executing_loop(engine, frontend).await;
});
task.state.write().unwrap().task_handle = Some(handle);
// only replace here not earlier because we want the old one intact if something went wrong before this line
let replaced_old_task_opt = self.tasks.write().await.insert(flow_id, task);
@@ -350,8 +311,7 @@ impl BatchingEngine {
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
if self.tasks.write().await.remove(&flow_id).is_none() {
warn!("Flow {flow_id} not found in tasks");
FlowNotFoundSnafu { id: flow_id }.fail()?;
warn!("Flow {flow_id} not found in tasks")
}
let Some(tx) = self.shutdown_txs.write().await.remove(&flow_id) else {
UnexpectedSnafu {
@@ -366,21 +326,15 @@ impl BatchingEngine {
}
pub async fn flush_flow_inner(&self, flow_id: FlowId) -> Result<usize, Error> {
debug!("Try flush flow {flow_id}");
let task = self.tasks.read().await.get(&flow_id).cloned();
let task = task.with_context(|| FlowNotFoundSnafu { id: flow_id })?;
task.mark_all_windows_as_dirty()?;
let task = task.with_context(|| UnexpectedSnafu {
reason: format!("Can't found task for flow {flow_id}"),
})?;
let res = task
.gen_exec_once(&self.query_engine, &self.frontend_client)
.await?;
let affected_rows = res.map(|(r, _)| r).unwrap_or_default() as usize;
debug!(
"Successfully flush flow {flow_id}, affected rows={}",
affected_rows
);
Ok(affected_rows)
}
@@ -403,9 +357,6 @@ impl FlowEngine for BatchingEngine {
async fn flow_exist(&self, flow_id: FlowId) -> Result<bool, Error> {
Ok(self.flow_exist_inner(flow_id).await)
}
async fn list_flows(&self) -> Result<impl IntoIterator<Item = FlowId>, Error> {
Ok(self.tasks.read().await.keys().cloned().collect::<Vec<_>>())
}
async fn handle_flow_inserts(
&self,
request: api::v1::region::InsertRequests,

View File

@@ -14,113 +14,44 @@
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
use std::sync::{Arc, Weak};
use std::time::SystemTime;
use std::sync::Arc;
use api::v1::greptime_request::Request;
use api::v1::CreateTableExpr;
use client::{Client, Database};
use common_error::ext::{BoxedError, ErrorExt};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
use common_meta::peer::Peer;
use common_meta::rpc::store::RangeRequest;
use common_query::Output;
use common_telemetry::warn;
use meta_client::client::MetaClient;
use rand::rng;
use rand::seq::SliceRandom;
use servers::query_handler::grpc::GrpcQueryHandler;
use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use snafu::ResultExt;
use crate::batching_mode::{
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
GRPC_MAX_RETRIES,
};
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
use crate::batching_mode::DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT;
use crate::error::{ExternalSnafu, UnexpectedSnafu};
use crate::Error;
/// Just like [`GrpcQueryHandler`] but use BoxedError
///
/// basically just a specialized `GrpcQueryHandler<Error=BoxedError>`
///
/// this is only useful for flownode to
/// invoke frontend Instance in standalone mode
#[async_trait::async_trait]
pub trait GrpcQueryHandlerWithBoxedError: Send + Sync + 'static {
async fn do_query(
&self,
query: Request,
ctx: QueryContextRef,
) -> std::result::Result<Output, BoxedError>;
fn default_channel_mgr() -> ChannelManager {
let cfg = ChannelConfig::new().timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
ChannelManager::with_config(cfg)
}
/// auto impl
#[async_trait::async_trait]
impl<
E: ErrorExt + Send + Sync + 'static,
T: GrpcQueryHandler<Error = E> + Send + Sync + 'static,
> GrpcQueryHandlerWithBoxedError for T
{
async fn do_query(
&self,
query: Request,
ctx: QueryContextRef,
) -> std::result::Result<Output, BoxedError> {
self.do_query(query, ctx).await.map_err(BoxedError::new)
}
fn client_from_urls(addrs: Vec<String>) -> Client {
Client::with_manager_and_urls(default_channel_mgr(), addrs)
}
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
/// A simple frontend client able to execute sql using grpc protocol
///
/// This is for computation-heavy query which need to offload computation to frontend, lifting the load from flownode
#[derive(Debug, Clone)]
#[derive(Debug)]
pub enum FrontendClient {
Distributed {
meta_client: Arc<MetaClient>,
chnl_mgr: ChannelManager,
},
Standalone {
/// for the sake of simplicity still use grpc even in standalone mode
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
database_client: HandlerMutable,
/// TODO(discord9): not use grpc under standalone mode
database_client: DatabaseWithPeer,
},
}
impl FrontendClient {
/// Create a new empty frontend client, with a `HandlerMutable` to set the grpc handler later
pub fn from_empty_grpc_handler() -> (Self, HandlerMutable) {
let handler = Arc::new(std::sync::Mutex::new(None));
(
Self::Standalone {
database_client: handler.clone(),
},
handler,
)
}
pub fn from_meta_client(meta_client: Arc<MetaClient>) -> Self {
Self::Distributed {
meta_client,
chnl_mgr: {
let cfg = ChannelConfig::new()
.connect_timeout(GRPC_CONN_TIMEOUT)
.timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
ChannelManager::with_config(cfg)
},
}
}
pub fn from_grpc_handler(grpc_handler: Weak<dyn GrpcQueryHandlerWithBoxedError>) -> Self {
Self::Standalone {
database_client: Arc::new(std::sync::Mutex::new(Some(grpc_handler))),
}
}
}
#[derive(Debug, Clone)]
pub struct DatabaseWithPeer {
pub database: Database,
@@ -131,24 +62,29 @@ impl DatabaseWithPeer {
fn new(database: Database, peer: Peer) -> Self {
Self { database, peer }
}
}
/// Try sending a "SELECT 1" to the database
async fn try_select_one(&self) -> Result<(), Error> {
// notice here use `sql` for `SELECT 1` return 1 row
let _ = self
.database
.sql("SELECT 1")
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to handle `SELECT 1` request at {:?}", self.peer),
})?;
Ok(())
impl FrontendClient {
pub fn from_meta_client(meta_client: Arc<MetaClient>) -> Self {
Self::Distributed { meta_client }
}
pub fn from_static_grpc_addr(addr: String) -> Self {
let peer = Peer {
id: 0,
addr: addr.clone(),
};
let client = client_from_urls(vec![addr]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
Self::Standalone {
database_client: DatabaseWithPeer::new(database, peer),
}
}
}
impl FrontendClient {
/// scan for available frontend from metadata
pub(crate) async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
let Self::Distributed { meta_client, .. } = self else {
return Ok(vec![]);
};
@@ -178,178 +114,35 @@ impl FrontendClient {
Ok(res)
}
/// Get the frontend with recent enough(less than 1 minute from now) `last_activity_ts`
/// and is able to process query
async fn get_random_active_frontend(
&self,
catalog: &str,
schema: &str,
) -> Result<DatabaseWithPeer, Error> {
let Self::Distributed {
meta_client: _,
chnl_mgr,
} = self
else {
return UnexpectedSnafu {
reason: "Expect distributed mode",
/// Get the database with max `last_activity_ts`
async fn get_last_active_frontend(&self) -> Result<DatabaseWithPeer, Error> {
if let Self::Standalone { database_client } = self {
return Ok(database_client.clone());
}
let frontends = self.scan_for_frontend().await?;
let mut peer = None;
if let Some((_, val)) = frontends.iter().max_by_key(|(_, val)| val.last_activity_ts) {
peer = Some(val.peer.clone());
}
let Some(peer) = peer else {
UnexpectedSnafu {
reason: format!("No frontend available: {:?}", frontends),
}
.fail();
.fail()?
};
let mut interval = tokio::time::interval(GRPC_CONN_TIMEOUT);
interval.tick().await;
for retry in 0..GRPC_MAX_RETRIES {
let mut frontends = self.scan_for_frontend().await?;
let now_in_ms = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_millis() as i64;
// shuffle the frontends to avoid always pick the same one
frontends.shuffle(&mut rng());
// found node with maximum last_activity_ts
for (_, node_info) in frontends
.iter()
// filter out frontend that have been down for more than 1 min
.filter(|(_, node_info)| {
node_info.last_activity_ts + FRONTEND_ACTIVITY_TIMEOUT.as_millis() as i64
> now_in_ms
})
{
let addr = &node_info.peer.addr;
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![addr.clone()]);
let database = Database::new(catalog, schema, client);
let db = DatabaseWithPeer::new(database, node_info.peer.clone());
match db.try_select_one().await {
Ok(_) => return Ok(db),
Err(e) => {
warn!(
"Failed to connect to frontend {} on retry={}: \n{e:?}",
addr, retry
);
}
}
}
// no available frontend
// sleep and retry
interval.tick().await;
}
NoAvailableFrontendSnafu {
timeout: GRPC_CONN_TIMEOUT,
context: "No available frontend found that is able to process query",
}
.fail()
let client = client_from_urls(vec![peer.addr.clone()]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
Ok(DatabaseWithPeer::new(database, peer))
}
pub async fn create(
&self,
create: CreateTableExpr,
catalog: &str,
schema: &str,
) -> Result<u32, Error> {
self.handle(
Request::Ddl(api::v1::DdlRequest {
expr: Some(api::v1::ddl_request::Expr::CreateTable(create)),
}),
catalog,
schema,
&mut None,
)
.await
}
/// Handle a request to frontend
pub(crate) async fn handle(
&self,
req: api::v1::greptime_request::Request,
catalog: &str,
schema: &str,
peer_desc: &mut Option<PeerDesc>,
) -> Result<u32, Error> {
/// Get a database client, and possibly update it before returning.
pub async fn get_database_client(&self) -> Result<DatabaseWithPeer, Error> {
match self {
FrontendClient::Distributed { .. } => {
let db = self.get_random_active_frontend(catalog, schema).await?;
*peer_desc = Some(PeerDesc::Dist {
peer: db.peer.clone(),
});
db.database
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to handle request at {:?}: {:?}", db.peer, req),
})
}
FrontendClient::Standalone { database_client } => {
let ctx = QueryContextBuilder::default()
.current_catalog(catalog.to_string())
.current_schema(schema.to_string())
.build();
let ctx = Arc::new(ctx);
{
let database_client = {
database_client
.lock()
.map_err(|e| {
UnexpectedSnafu {
reason: format!("Failed to lock database client: {e}"),
}
.build()
})?
.as_ref()
.context(UnexpectedSnafu {
reason: "Standalone's frontend instance is not set",
})?
.upgrade()
.context(UnexpectedSnafu {
reason: "Failed to upgrade database client",
})?
};
let resp: common_query::Output = database_client
.do_query(req.clone(), ctx)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
match resp.data {
common_query::OutputData::AffectedRows(rows) => {
Ok(rows.try_into().map_err(|_| {
UnexpectedSnafu {
reason: format!("Failed to convert rows to u32: {}", rows),
}
.build()
})?)
}
_ => UnexpectedSnafu {
reason: "Unexpected output data",
}
.fail(),
}
}
}
}
}
}
/// Describe a peer of frontend
#[derive(Debug, Default)]
pub(crate) enum PeerDesc {
/// Distributed mode's frontend peer address
Dist {
/// frontend peer address
peer: Peer,
},
/// Standalone mode
#[default]
Standalone,
}
impl std::fmt::Display for PeerDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PeerDesc::Dist { peer } => write!(f, "{}", peer.addr),
PeerDesc::Standalone => write!(f, "standalone"),
Self::Standalone { database_client } => Ok(database_client.clone()),
Self::Distributed { meta_client: _ } => self.get_last_active_frontend().await,
}
}
}

View File

@@ -22,14 +22,13 @@ use common_telemetry::tracing::warn;
use common_time::Timestamp;
use datatypes::value::Value;
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use snafu::ResultExt;
use tokio::sync::oneshot;
use tokio::time::Instant;
use crate::batching_mode::task::BatchingTask;
use crate::batching_mode::time_window::TimeWindowExpr;
use crate::batching_mode::MIN_REFRESH_DURATION;
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu, UnexpectedSnafu};
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu};
use crate::{Error, FlowId};
/// The state of the [`BatchingTask`].
@@ -47,8 +46,6 @@ pub struct TaskState {
exec_state: ExecState,
/// Shutdown receiver
pub(crate) shutdown_rx: oneshot::Receiver<()>,
/// Task handle
pub(crate) task_handle: Option<tokio::task::JoinHandle<()>>,
}
impl TaskState {
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
@@ -59,7 +56,6 @@ impl TaskState {
dirty_time_windows: Default::default(),
exec_state: ExecState::Idle,
shutdown_rx,
task_handle: None,
}
}
@@ -71,44 +67,19 @@ impl TaskState {
self.last_update_time = Instant::now();
}
/// Compute the next query delay based on the time window size or the last query duration.
/// Aiming to avoid too frequent queries. But also not too long delay.
/// The delay is computed as follows:
/// - If `time_window_size` is set, the delay is half the time window size, constrained to be
/// at least `last_query_duration` and at most `max_timeout`.
/// - If `time_window_size` is not set, the delay defaults to `last_query_duration`, constrained
/// to be at least `MIN_REFRESH_DURATION` and at most `max_timeout`.
/// wait for at least `last_query_duration`, at most `max_timeout` to start next query
///
/// If there are dirty time windows, the function returns an immediate execution time to clean them.
/// TODO: Make this behavior configurable.
pub fn get_next_start_query_time(
&self,
flow_id: FlowId,
time_window_size: &Option<Duration>,
max_timeout: Option<Duration>,
) -> Instant {
let last_duration = max_timeout
/// if have more dirty time window, exec next query immediately
pub fn get_next_start_query_time(&self, max_timeout: Option<Duration>) -> Instant {
let next_duration = max_timeout
.unwrap_or(self.last_query_duration)
.min(self.last_query_duration)
.max(MIN_REFRESH_DURATION);
let next_duration = time_window_size
.map(|t| {
let half = t / 2;
half.max(last_duration)
})
.unwrap_or(last_duration);
.min(self.last_query_duration);
let next_duration = next_duration.max(MIN_REFRESH_DURATION);
// if have dirty time window, execute immediately to clean dirty time window
if self.dirty_time_windows.windows.is_empty() {
self.last_update_time + next_duration
} else {
debug!(
"Flow id = {}, still have {} dirty time window({:?}), execute immediately",
flow_id,
self.dirty_time_windows.windows.len(),
self.dirty_time_windows.windows
);
Instant::now()
}
}
@@ -144,15 +115,6 @@ impl DirtyTimeWindows {
}
}
pub fn add_window(&mut self, start: Timestamp, end: Option<Timestamp>) {
self.windows.insert(start, end);
}
/// Clean all dirty time windows, useful when can't found time window expr
pub fn clean(&mut self) {
self.windows.clear();
}
/// Generate all filter expressions consuming all time windows
pub fn gen_filter_exprs(
&mut self,
@@ -215,18 +177,6 @@ impl DirtyTimeWindows {
let mut expr_lst = vec![];
for (start, end) in first_nth.into_iter() {
// align using time window exprs
let (start, end) = if let Some(ctx) = task_ctx {
let Some(time_window_expr) = &ctx.config.time_window_expr else {
UnexpectedSnafu {
reason: "time_window_expr is not set",
}
.fail()?
};
self.align_time_window(start, end, time_window_expr)?
} else {
(start, end)
};
debug!(
"Time window start: {:?}, end: {:?}",
start.to_iso8601_string(),
@@ -249,30 +199,6 @@ impl DirtyTimeWindows {
Ok(expr)
}
fn align_time_window(
&self,
start: Timestamp,
end: Option<Timestamp>,
time_window_expr: &TimeWindowExpr,
) -> Result<(Timestamp, Option<Timestamp>), Error> {
let align_start = time_window_expr.eval(start)?.0.context(UnexpectedSnafu {
reason: format!(
"Failed to align start time {:?} with time window expr {:?}",
start, time_window_expr
),
})?;
let align_end = end
.and_then(|end| {
time_window_expr
.eval(end)
// if after aligned, end is the same, then use end(because it's already aligned) else use aligned end
.map(|r| if r.0 == Some(end) { r.0 } else { r.1 })
.transpose()
})
.transpose()?;
Ok((align_start, align_end))
}
/// Merge time windows that overlaps or get too close
pub fn merge_dirty_time_windows(
&mut self,
@@ -361,12 +287,8 @@ enum ExecState {
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use session::context::QueryContext;
use super::*;
use crate::batching_mode::time_window::find_time_window_expr;
use crate::batching_mode::utils::sql_to_df_plan;
use crate::test_utils::create_test_query_engine;
#[test]
fn test_merge_dirty_time_windows() {
@@ -482,59 +404,4 @@ mod test {
assert_eq!(expected_filter_expr, to_sql.as_deref());
}
}
#[tokio::test]
async fn test_align_time_window() {
type TimeWindow = (Timestamp, Option<Timestamp>);
struct TestCase {
sql: String,
aligns: Vec<(TimeWindow, TimeWindow)>,
}
let testcases: Vec<TestCase> = vec![TestCase{
sql: "SELECT date_bin(INTERVAL '5 second', ts) AS time_window FROM numbers_with_ts GROUP BY time_window;".to_string(),
aligns: vec![
((Timestamp::new_second(3), None), (Timestamp::new_second(0), None)),
((Timestamp::new_second(8), None), (Timestamp::new_second(5), None)),
((Timestamp::new_second(8), Some(Timestamp::new_second(10))), (Timestamp::new_second(5), Some(Timestamp::new_second(10)))),
((Timestamp::new_second(8), Some(Timestamp::new_second(9))), (Timestamp::new_second(5), Some(Timestamp::new_second(10)))),
],
}];
let query_engine = create_test_query_engine();
let ctx = QueryContext::arc();
for TestCase { sql, aligns } in testcases {
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), &sql, true)
.await
.unwrap();
let (column_name, time_window_expr, _, df_schema) = find_time_window_expr(
&plan,
query_engine.engine_state().catalog_manager().clone(),
ctx.clone(),
)
.await
.unwrap();
let time_window_expr = time_window_expr
.map(|expr| {
TimeWindowExpr::from_expr(
&expr,
&column_name,
&df_schema,
&query_engine.engine_state().session_state(),
)
})
.transpose()
.unwrap()
.unwrap();
let dirty = DirtyTimeWindows::default();
for (before_align, expected_after_align) in aligns {
let after_align = dirty
.align_time_window(before_align.0, before_align.1, &time_window_expr)
.unwrap();
assert_eq!(expected_after_align, after_align);
}
}
}
}

View File

@@ -12,32 +12,33 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeSet, HashSet};
use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, RwLock};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use api::v1::CreateTableExpr;
use arrow_schema::Fields;
use catalog::CatalogManagerRef;
use common_error::ext::BoxedError;
use common_query::logical_plan::breakup_insert_plan;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::TableMetadataManagerRef;
use common_telemetry::tracing::warn;
use common_telemetry::{debug, info};
use common_time::Timestamp;
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
use datafusion::optimizer::AnalyzerRule;
use datafusion::sql::unparser::expr_to_sql;
use datafusion_common::tree_node::{Transformed, TreeNode};
use datafusion_common::tree_node::TreeNode;
use datafusion_expr::{DmlStatement, LogicalPlan, WriteOp};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::schema::constraint::NOW_FN;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
use datatypes::value::Value;
use operator::expr_helper::column_schemas_to_defs;
use query::query_engine::DefaultSerializer;
use query::QueryEngineRef;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use snafu::{OptionExt, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::metadata::RawTableMeta;
use tokio::sync::oneshot;
use tokio::sync::oneshot::error::TryRecvError;
use tokio::time::Instant;
@@ -47,16 +48,14 @@ use crate::batching_mode::frontend_client::FrontendClient;
use crate::batching_mode::state::TaskState;
use crate::batching_mode::time_window::TimeWindowExpr;
use crate::batching_mode::utils::{
get_table_info_df_schema, sql_to_df_plan, AddAutoColumnRewriter, AddFilterRewriter,
FindGroupByFinalName,
sql_to_df_plan, AddAutoColumnRewriter, AddFilterRewriter, FindGroupByFinalName,
};
use crate::batching_mode::{
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, MIN_REFRESH_DURATION, SLOW_QUERY_THRESHOLD,
};
use crate::df_optimizer::apply_df_optimizer;
use crate::error::{
ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu,
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
ConvertColumnSchemaSnafu, DatafusionSnafu, DatatypesSnafu, ExternalSnafu, InvalidRequestSnafu,
SubstraitEncodeLogicalPlanSnafu, TableNotFoundMetaSnafu, TableNotFoundSnafu, UnexpectedSnafu,
};
use crate::metrics::{
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME, METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY,
@@ -74,7 +73,7 @@ pub struct TaskConfig {
pub expire_after: Option<i64>,
sink_table_name: [String; 3],
pub source_table_names: HashSet<[String; 3]>,
catalog_manager: CatalogManagerRef,
table_meta: TableMetadataManagerRef,
}
#[derive(Clone)]
@@ -94,7 +93,7 @@ impl BatchingTask {
sink_table_name: [String; 3],
source_table_names: Vec<[String; 3]>,
query_ctx: QueryContextRef,
catalog_manager: CatalogManagerRef,
table_meta: TableMetadataManagerRef,
shutdown_rx: oneshot::Receiver<()>,
) -> Self {
Self {
@@ -106,48 +105,32 @@ impl BatchingTask {
expire_after,
sink_table_name,
source_table_names: source_table_names.into_iter().collect(),
catalog_manager,
table_meta,
}),
state: Arc::new(RwLock::new(TaskState::new(query_ctx, shutdown_rx))),
}
}
/// mark time window range (now - expire_after, now) as dirty (or (0, now) if expire_after not set)
///
/// useful for flush_flow to flush dirty time windows range
pub fn mark_all_windows_as_dirty(&self) -> Result<(), Error> {
let now = SystemTime::now();
let now = Timestamp::new_second(
now.duration_since(UNIX_EPOCH)
/// Test execute, for check syntax or such
pub async fn check_execute(
&self,
engine: &QueryEngineRef,
frontend_client: &Arc<FrontendClient>,
) -> Result<Option<(u32, Duration)>, Error> {
// use current time to test get a dirty time window, which should be safe
let start = SystemTime::now();
let ts = Timestamp::new_second(
start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs() as _,
);
let lower_bound = self
.config
.expire_after
.map(|e| now.sub_duration(Duration::from_secs(e as _)))
.transpose()
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.unwrap_or(Timestamp::new_second(0));
debug!(
"Flow {} mark range ({:?}, {:?}) as dirty",
self.config.flow_id, lower_bound, now
);
self.state
.write()
.unwrap()
.dirty_time_windows
.add_window(lower_bound, Some(now));
Ok(())
}
.add_lower_bounds(vec![ts].into_iter());
/// Create sink table if not exists
pub async fn check_or_create_sink_table(
&self,
engine: &QueryEngineRef,
frontend_client: &Arc<FrontendClient>,
) -> Result<Option<(u32, Duration)>, Error> {
if !self.is_table_exist(&self.config.sink_table_name).await? {
let create_table = self.gen_create_table_expr(engine.clone()).await?;
info!(
@@ -160,14 +143,18 @@ impl BatchingTask {
self.config.sink_table_name.join(".")
);
}
Ok(None)
self.gen_exec_once(engine, frontend_client).await
}
async fn is_table_exist(&self, table_name: &[String; 3]) -> Result<bool, Error> {
self.config
.catalog_manager
.table_exists(&table_name[0], &table_name[1], &table_name[2], None)
.table_meta
.table_name_manager()
.exists(TableNameKey {
catalog: &table_name[0],
schema: &table_name[1],
table: &table_name[2],
})
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
@@ -179,10 +166,8 @@ impl BatchingTask {
frontend_client: &Arc<FrontendClient>,
) -> Result<Option<(u32, Duration)>, Error> {
if let Some(new_query) = self.gen_insert_plan(engine).await? {
debug!("Generate new query: {:#?}", new_query);
self.execute_logical_plan(frontend_client, &new_query).await
} else {
debug!("Generate no query");
Ok(None)
}
}
@@ -191,35 +176,67 @@ impl BatchingTask {
&self,
engine: &QueryEngineRef,
) -> Result<Option<LogicalPlan>, Error> {
let (table, df_schema) = get_table_info_df_schema(
self.config.catalog_manager.clone(),
self.config.sink_table_name.clone(),
)
.await?;
let full_table_name = self.config.sink_table_name.clone().join(".");
let table_id = self
.config
.table_meta
.table_name_manager()
.get(common_meta::key::table_name::TableNameKey::new(
&self.config.sink_table_name[0],
&self.config.sink_table_name[1],
&self.config.sink_table_name[2],
))
.await
.with_context(|_| TableNotFoundMetaSnafu {
msg: full_table_name.clone(),
})?
.map(|t| t.table_id())
.with_context(|| TableNotFoundSnafu {
name: full_table_name.clone(),
})?;
let table = self
.config
.table_meta
.table_info_manager()
.get(table_id)
.await
.with_context(|_| TableNotFoundMetaSnafu {
msg: full_table_name.clone(),
})?
.with_context(|| TableNotFoundSnafu {
name: full_table_name.clone(),
})?
.into_inner();
let schema: datatypes::schema::Schema = table
.table_info
.meta
.schema
.clone()
.try_into()
.with_context(|_| DatatypesSnafu {
extra: format!(
"Failed to convert schema from raw schema, raw_schema={:?}",
table.table_info.meta.schema
),
})?;
let df_schema = Arc::new(schema.arrow_schema().clone().try_into().with_context(|_| {
DatafusionSnafu {
context: format!(
"Failed to convert arrow schema to datafusion schema, arrow_schema={:?}",
schema.arrow_schema()
),
}
})?);
let new_query = self
.gen_query_with_time_window(engine.clone(), &table.meta.schema)
.gen_query_with_time_window(engine.clone(), &table.table_info.meta)
.await?;
let insert_into = if let Some((new_query, _column_cnt)) = new_query {
// first check if all columns in input query exists in sink table
// since insert into ref to names in record batch generate by given query
let table_columns = df_schema
.columns()
.into_iter()
.map(|c| c.name)
.collect::<BTreeSet<_>>();
for column in new_query.schema().columns() {
ensure!(
table_columns.contains(column.name()),
InvalidQuerySnafu {
reason: format!(
"Column {} not found in sink table with columns {:?}",
column, table_columns
),
}
);
}
// update_at& time index placeholder (if exists) should have default value
LogicalPlan::Dml(DmlStatement::new(
datafusion_common::TableReference::Full {
@@ -234,9 +251,6 @@ impl BatchingTask {
} else {
return Ok(None);
};
let insert_into = insert_into.recompute_schema().context(DatafusionSnafu {
context: "Failed to recompute schema",
})?;
Ok(Some(insert_into))
}
@@ -245,11 +259,14 @@ impl BatchingTask {
frontend_client: &Arc<FrontendClient>,
expr: CreateTableExpr,
) -> Result<(), Error> {
let catalog = &self.config.sink_table_name[0];
let schema = &self.config.sink_table_name[1];
frontend_client
.create(expr.clone(), catalog, schema)
.await?;
let db_client = frontend_client.get_database_client().await?;
db_client
.database
.create(expr.clone())
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to create table with expr: {:?}", expr),
})?;
Ok(())
}
@@ -260,78 +277,27 @@ impl BatchingTask {
) -> Result<Option<(u32, Duration)>, Error> {
let instant = Instant::now();
let flow_id = self.config.flow_id;
let db_client = frontend_client.get_database_client().await?;
let peer_addr = db_client.peer.addr;
debug!(
"Executing flow {flow_id}(expire_after={:?} secs) with query {}",
self.config.expire_after, &plan
"Executing flow {flow_id}(expire_after={:?} secs) on {:?} with query {}",
self.config.expire_after, peer_addr, &plan
);
let catalog = &self.config.sink_table_name[0];
let schema = &self.config.sink_table_name[1];
let timer = METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME
.with_label_values(&[flow_id.to_string().as_str()])
.start_timer();
// fix all table ref by make it fully qualified, i.e. "table_name" => "catalog_name.schema_name.table_name"
let fixed_plan = plan
.clone()
.transform_down_with_subqueries(|p| {
if let LogicalPlan::TableScan(mut table_scan) = p {
let resolved = table_scan.table_name.resolve(catalog, schema);
table_scan.table_name = resolved.into();
Ok(Transformed::yes(LogicalPlan::TableScan(table_scan)))
} else {
Ok(Transformed::no(p))
}
})
.with_context(|_| DatafusionSnafu {
context: format!("Failed to fix table ref in logical plan, plan={:?}", plan),
})?
.data;
let message = DFLogicalSubstraitConvertor {}
.encode(plan, DefaultSerializer)
.context(SubstraitEncodeLogicalPlanSnafu)?;
let expanded_plan = CountWildcardRule::new()
.analyze(fixed_plan.clone(), &Default::default())
.with_context(|_| DatafusionSnafu {
context: format!(
"Failed to expand wildcard in logical plan, plan={:?}",
fixed_plan
),
})?;
let req = api::v1::greptime_request::Request::Query(api::v1::QueryRequest {
query: Some(api::v1::query_request::Query::LogicalPlan(message.to_vec())),
});
let plan = expanded_plan;
let mut peer_desc = None;
let res = {
let _timer = METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME
.with_label_values(&[flow_id.to_string().as_str()])
.start_timer();
// hack and special handling the insert logical plan
let req = if let Some((insert_to, insert_plan)) =
breakup_insert_plan(&plan, catalog, schema)
{
let message = DFLogicalSubstraitConvertor {}
.encode(&insert_plan, DefaultSerializer)
.context(SubstraitEncodeLogicalPlanSnafu)?;
api::v1::greptime_request::Request::Query(api::v1::QueryRequest {
query: Some(api::v1::query_request::Query::InsertIntoPlan(
api::v1::InsertIntoPlan {
table_name: Some(insert_to),
logical_plan: message.to_vec(),
},
)),
})
} else {
let message = DFLogicalSubstraitConvertor {}
.encode(&plan, DefaultSerializer)
.context(SubstraitEncodeLogicalPlanSnafu)?;
api::v1::greptime_request::Request::Query(api::v1::QueryRequest {
query: Some(api::v1::query_request::Query::LogicalPlan(message.to_vec())),
})
};
frontend_client
.handle(req, catalog, schema, &mut peer_desc)
.await
};
let res = db_client.database.handle(req).await;
drop(timer);
let elapsed = instant.elapsed();
if let Ok(affected_rows) = &res {
@@ -341,23 +307,19 @@ impl BatchingTask {
);
} else if let Err(err) = &res {
warn!(
"Failed to execute Flow {flow_id} on frontend {:?}, result: {err:?}, elapsed: {:?} with query: {}",
peer_desc, elapsed, &plan
"Failed to execute Flow {flow_id} on frontend {}, result: {err:?}, elapsed: {:?} with query: {}",
peer_addr, elapsed, &plan
);
}
// record slow query
if elapsed >= SLOW_QUERY_THRESHOLD {
warn!(
"Flow {flow_id} on frontend {:?} executed for {:?} before complete, query: {}",
peer_desc, elapsed, &plan
"Flow {flow_id} on frontend {} executed for {:?} before complete, query: {}",
peer_addr, elapsed, &plan
);
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY
.with_label_values(&[
flow_id.to_string().as_str(),
&plan.to_string(),
&peer_desc.unwrap_or_default().to_string(),
])
.with_label_values(&[flow_id.to_string().as_str(), &plan.to_string(), &peer_addr])
.observe(elapsed.as_secs_f64());
}
@@ -366,7 +328,12 @@ impl BatchingTask {
.unwrap()
.after_query_exec(elapsed, res.is_ok());
let res = res?;
let res = res.context(InvalidRequestSnafu {
context: format!(
"Failed to execute query for flow={}: \'{}\'",
self.config.flow_id, &plan
),
})?;
Ok(Some((res, elapsed)))
}
@@ -380,23 +347,6 @@ impl BatchingTask {
frontend_client: Arc<FrontendClient>,
) {
loop {
// first check if shutdown signal is received
// if so, break the loop
{
let mut state = self.state.write().unwrap();
match state.shutdown_rx.try_recv() {
Ok(()) => break,
Err(TryRecvError::Closed) => {
warn!(
"Unexpected shutdown flow {}, shutdown anyway",
self.config.flow_id
);
break;
}
Err(TryRecvError::Empty) => (),
}
}
let mut new_query = None;
let mut gen_and_exec = async || {
new_query = self.gen_insert_plan(&engine).await?;
@@ -410,17 +360,19 @@ impl BatchingTask {
// normal execute, sleep for some time before doing next query
Ok(Some(_)) => {
let sleep_until = {
let state = self.state.write().unwrap();
state.get_next_start_query_time(
self.config.flow_id,
&self
.config
.time_window_expr
.as_ref()
.and_then(|t| *t.time_window_size()),
Some(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT),
)
let mut state = self.state.write().unwrap();
match state.shutdown_rx.try_recv() {
Ok(()) => break,
Err(TryRecvError::Closed) => {
warn!(
"Unexpected shutdown flow {}, shutdown anyway",
self.config.flow_id
);
break;
}
Err(TryRecvError::Empty) => (),
}
state.get_next_start_query_time(Some(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT))
};
tokio::time::sleep_until(sleep_until).await;
}
@@ -434,18 +386,14 @@ impl BatchingTask {
continue;
}
// TODO(discord9): this error should have better place to go, but for now just print error, also more context is needed
Err(err) => {
match new_query {
Some(query) => {
common_telemetry::error!(err; "Failed to execute query for flow={} with query: {query}", self.config.flow_id)
}
None => {
common_telemetry::error!(err; "Failed to generate query for flow={}", self.config.flow_id)
}
Err(err) => match new_query {
Some(query) => {
common_telemetry::error!(err; "Failed to execute query for flow={} with query: {query}", self.config.flow_id)
}
// also sleep for a little while before try again to prevent flooding logs
tokio::time::sleep(MIN_REFRESH_DURATION).await;
}
None => {
common_telemetry::error!(err; "Failed to generate query for flow={}", self.config.flow_id)
}
},
}
}
}
@@ -470,7 +418,7 @@ impl BatchingTask {
async fn gen_query_with_time_window(
&self,
engine: QueryEngineRef,
sink_table_schema: &Arc<Schema>,
sink_table_meta: &RawTableMeta,
) -> Result<Option<(LogicalPlan, usize)>, Error> {
let query_ctx = self.state.read().unwrap().query_ctx.clone();
let start = SystemTime::now();
@@ -529,11 +477,9 @@ impl BatchingTask {
debug!(
"Flow id = {:?}, can't get window size: precise_lower_bound={expire_time_window_bound:?}, using the same query", self.config.flow_id
);
// clean dirty time window too, this could be from create flow's check_execute
self.state.write().unwrap().dirty_time_windows.clean();
let mut add_auto_column =
AddAutoColumnRewriter::new(sink_table_schema.clone());
AddAutoColumnRewriter::new(sink_table_meta.schema.clone());
let plan = self
.config
.plan
@@ -541,10 +487,7 @@ impl BatchingTask {
.clone()
.rewrite(&mut add_auto_column)
.with_context(|_| DatafusionSnafu {
context: format!(
"Failed to rewrite plan:\n {}\n",
self.config.plan
),
context: format!("Failed to rewrite plan {:?}", self.config.plan),
})?
.data;
let schema_len = plan.schema().fields().len();
@@ -572,23 +515,18 @@ impl BatchingTask {
return Ok(None);
};
// TODO(discord9): add auto column or not? This might break compatibility for auto created sink table before this, but that's ok right?
let mut add_filter = AddFilterRewriter::new(expr);
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_schema.clone());
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_meta.schema.clone());
// make a not optimized plan for clearer unparse
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.config.query, false)
.await?;
let rewrite = plan
.clone()
plan.clone()
.rewrite(&mut add_filter)
.and_then(|p| p.data.rewrite(&mut add_auto_column))
.with_context(|_| DatafusionSnafu {
context: format!("Failed to rewrite plan:\n {}\n", plan),
context: format!("Failed to rewrite plan {plan:?}"),
})?
.data;
// only apply optimize after complex rewrite is done
apply_df_optimizer(rewrite).await?
.data
};
Ok(Some((new_plan, schema_len)))
@@ -596,7 +534,7 @@ impl BatchingTask {
}
// auto created table have a auto added column `update_at`, and optional have a `AUTO_CREATED_PLACEHOLDER_TS_COL` column for time index placeholder if no timestamp column is specified
// TODO(discord9): for now no default value is set for auto added column for compatibility reason with streaming mode, but this might change in favor of simpler code?
// TODO(discord9): unit test
fn create_table_with_expr(
plan: &LogicalPlan,
sink_table_name: &[String; 3],
@@ -620,7 +558,11 @@ fn create_table_with_expr(
AUTO_CREATED_UPDATE_AT_TS_COL,
ConcreteDataType::timestamp_millisecond_datatype(),
true,
);
)
.with_default_constraint(Some(ColumnDefaultConstraint::Function(NOW_FN.to_string())))
.context(DatatypesSnafu {
extra: "Failed to build column `update_at TimestampMillisecond default now()`",
})?;
column_schemas.push(update_at_schema);
let time_index = if let Some(time_index) = first_time_stamp {
@@ -632,7 +574,16 @@ fn create_table_with_expr(
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true),
.with_time_index(true)
.with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::Timestamp(
Timestamp::new_millisecond(0),
))))
.context(DatatypesSnafu {
extra: format!(
"Failed to build column `{} TimestampMillisecond TIME INDEX default 0`",
AUTO_CREATED_PLACEHOLDER_TS_COL
),
})?,
);
AUTO_CREATED_PLACEHOLDER_TS_COL.to_string()
};
@@ -724,14 +675,20 @@ mod test {
AUTO_CREATED_UPDATE_AT_TS_COL,
ConcreteDataType::timestamp_millisecond_datatype(),
true,
);
)
.with_default_constraint(Some(ColumnDefaultConstraint::Function(NOW_FN.to_string())))
.unwrap();
let ts_placeholder_schema = ColumnSchema::new(
AUTO_CREATED_PLACEHOLDER_TS_COL,
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true);
.with_time_index(true)
.with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::Timestamp(
Timestamp::new_millisecond(0),
))))
.unwrap();
let testcases = vec![
TestCase {

View File

@@ -55,9 +55,6 @@ use crate::error::{
use crate::expr::error::DataTypeSnafu;
use crate::Error;
/// Represents a test timestamp in seconds since the Unix epoch.
const DEFAULT_TEST_TIMESTAMP: Timestamp = Timestamp::new_second(17_0000_0000);
/// Time window expr like `date_bin(INTERVAL '1' MINUTE, ts)`, this type help with
/// evaluating the expr using given timestamp
///
@@ -73,26 +70,9 @@ pub struct TimeWindowExpr {
pub column_name: String,
logical_expr: Expr,
df_schema: DFSchema,
eval_time_window_size: Option<std::time::Duration>,
}
impl std::fmt::Display for TimeWindowExpr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TimeWindowExpr")
.field("phy_expr", &self.phy_expr.to_string())
.field("column_name", &self.column_name)
.field("logical_expr", &self.logical_expr.to_string())
.field("df_schema", &self.df_schema)
.finish()
}
}
impl TimeWindowExpr {
/// The time window size of the expr, get from calling `eval` with a test timestamp
pub fn time_window_size(&self) -> &Option<std::time::Duration> {
&self.eval_time_window_size
}
pub fn from_expr(
expr: &Expr,
column_name: &str,
@@ -100,28 +80,12 @@ impl TimeWindowExpr {
session: &SessionState,
) -> Result<Self, Error> {
let phy_expr: PhysicalExprRef = to_phy_expr(expr, df_schema, session)?;
let mut zelf = Self {
Ok(Self {
phy_expr,
column_name: column_name.to_string(),
logical_expr: expr.clone(),
df_schema: df_schema.clone(),
eval_time_window_size: None,
};
let test_ts = DEFAULT_TEST_TIMESTAMP;
let (l, u) = zelf.eval(test_ts)?;
let time_window_size = match (l, u) {
(Some(l), Some(u)) => u.sub(&l).map(|r| r.to_std()).transpose().map_err(|_| {
UnexpectedSnafu {
reason: format!(
"Expect upper bound older than lower bound, found upper={u:?} and lower={l:?}"
),
}
.build()
})?,
_ => None,
};
zelf.eval_time_window_size = time_window_size;
Ok(zelf)
})
}
pub fn eval(
@@ -292,7 +256,7 @@ fn columnar_to_ts_vector(columnar: &ColumnarValue) -> Result<Vec<Option<Timestam
Ok(val)
}
/// Return (`the column name of time index column`, `the time window expr`, `the expected time unit of time index column`, `the expr's schema for evaluating the time window`)
/// Return (the column name of time index column, the time window expr, the expected time unit of time index column, the expr's schema for evaluating the time window)
///
/// The time window expr is expected to have one input column with Timestamp type, and also return Timestamp type, the time window expr is expected
/// to be monotonic increasing and appears in the innermost GROUP BY clause
@@ -729,28 +693,6 @@ mod test {
),
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
),
// complex time window index with where
(
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number in (2, 3, 4) GROUP BY time_window;",
Timestamp::new(1740394109, TimeUnit::Second),
(
"ts".to_string(),
Some(Timestamp::new(1740394080, TimeUnit::Second)),
Some(Timestamp::new(1740394140, TimeUnit::Second)),
),
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
),
// complex time window index with between and
(
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number BETWEEN 2 AND 4 GROUP BY time_window;",
Timestamp::new(1740394109, TimeUnit::Second),
(
"ts".to_string(),
Some(Timestamp::new(1740394080, TimeUnit::Second)),
Some(Timestamp::new(1740394140, TimeUnit::Second)),
),
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
),
// no time index
(
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",

View File

@@ -14,63 +14,29 @@
//! some utils for helping with batching mode
use std::collections::{BTreeSet, HashSet};
use std::collections::HashSet;
use std::sync::Arc;
use catalog::CatalogManagerRef;
use common_error::ext::BoxedError;
use common_telemetry::debug;
use common_telemetry::{debug, info};
use datafusion::error::Result as DfResult;
use datafusion::logical_expr::Expr;
use datafusion::sql::unparser::Unparser;
use datafusion_common::tree_node::{
Transformed, TreeNodeRecursion, TreeNodeRewriter, TreeNodeVisitor,
};
use datafusion_common::{DFSchema, DataFusionError, ScalarValue};
use datafusion_expr::{Distinct, LogicalPlan, Projection};
use datatypes::schema::SchemaRef;
use datafusion_common::DataFusionError;
use datafusion_expr::{Distinct, LogicalPlan};
use datatypes::schema::RawSchema;
use query::parser::QueryLanguageParser;
use query::QueryEngineRef;
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use table::metadata::TableInfo;
use snafu::ResultExt;
use crate::adapter::AUTO_CREATED_PLACEHOLDER_TS_COL;
use crate::df_optimizer::apply_df_optimizer;
use crate::error::{DatafusionSnafu, ExternalSnafu, TableNotFoundSnafu};
use crate::{Error, TableName};
pub async fn get_table_info_df_schema(
catalog_mr: CatalogManagerRef,
table_name: TableName,
) -> Result<(Arc<TableInfo>, Arc<DFSchema>), Error> {
let full_table_name = table_name.clone().join(".");
let table = catalog_mr
.table(&table_name[0], &table_name[1], &table_name[2], None)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.context(TableNotFoundSnafu {
name: &full_table_name,
})?;
let table_info = table.table_info().clone();
let schema = table_info.meta.schema.clone();
let df_schema: Arc<DFSchema> = Arc::new(
schema
.arrow_schema()
.clone()
.try_into()
.with_context(|_| DatafusionSnafu {
context: format!(
"Failed to convert arrow schema to datafusion schema, arrow_schema={:?}",
schema.arrow_schema()
),
})?,
);
Ok((table_info, df_schema))
}
use crate::error::{DatafusionSnafu, ExternalSnafu};
use crate::Error;
/// Convert sql to datafusion logical plan
pub async fn sql_to_df_plan(
@@ -198,16 +164,14 @@ impl TreeNodeVisitor<'_> for FindGroupByFinalName {
/// (which doesn't necessary need to have exact name just need to be a extra timestamp column)
/// and `__ts_placeholder`(this column need to have exact this name and be a timestamp)
/// with values like `now()` and `0`
///
/// it also give existing columns alias to column in sink table if needed
#[derive(Debug)]
pub struct AddAutoColumnRewriter {
pub schema: SchemaRef,
pub schema: RawSchema,
pub is_rewritten: bool,
}
impl AddAutoColumnRewriter {
pub fn new(schema: SchemaRef) -> Self {
pub fn new(schema: RawSchema) -> Self {
Self {
schema,
is_rewritten: false,
@@ -217,97 +181,37 @@ impl AddAutoColumnRewriter {
impl TreeNodeRewriter for AddAutoColumnRewriter {
type Node = LogicalPlan;
fn f_down(&mut self, mut node: Self::Node) -> DfResult<Transformed<Self::Node>> {
fn f_down(&mut self, node: Self::Node) -> DfResult<Transformed<Self::Node>> {
if self.is_rewritten {
return Ok(Transformed::no(node));
}
// if is distinct all, wrap it in a projection
if let LogicalPlan::Distinct(Distinct::All(_)) = &node {
let mut exprs = vec![];
for field in node.schema().fields().iter() {
exprs.push(Expr::Column(datafusion::common::Column::new_unqualified(
field.name(),
)));
}
let projection =
LogicalPlan::Projection(Projection::try_new(exprs, Arc::new(node.clone()))?);
node = projection;
}
// handle table_scan by wrap it in a projection
else if let LogicalPlan::TableScan(table_scan) = node {
let mut exprs = vec![];
for field in table_scan.projected_schema.fields().iter() {
exprs.push(Expr::Column(datafusion::common::Column::new(
Some(table_scan.table_name.clone()),
field.name(),
)));
}
let projection = LogicalPlan::Projection(Projection::try_new(
exprs,
Arc::new(LogicalPlan::TableScan(table_scan)),
)?);
node = projection;
}
// only do rewrite if found the outermost projection
let mut exprs = if let LogicalPlan::Projection(project) = &node {
project.expr.clone()
} else {
// if is distinct all, go one level down
if let LogicalPlan::Distinct(Distinct::All(_)) = node {
return Ok(Transformed::no(node));
};
let all_names = self
.schema
.column_schemas()
.iter()
.map(|c| c.name.clone())
.collect::<BTreeSet<_>>();
// first match by position
for (idx, expr) in exprs.iter_mut().enumerate() {
if !all_names.contains(&expr.qualified_name().1) {
if let Some(col_name) = self
.schema
.column_schemas()
.get(idx)
.map(|c| c.name.clone())
{
// if the data type mismatched, later check_execute will error out
// hence no need to check it here, beside, optimize pass might be able to cast it
// so checking here is not necessary
*expr = expr.clone().alias(col_name);
}
}
}
// FIXME(discord9): just read plan.expr and do stuffs
let mut exprs = node.expressions();
// add columns if have different column count
let query_col_cnt = exprs.len();
let table_col_cnt = self.schema.column_schemas().len();
debug!("query_col_cnt={query_col_cnt}, table_col_cnt={table_col_cnt}");
let placeholder_ts_expr =
datafusion::logical_expr::lit(ScalarValue::TimestampMillisecond(Some(0), None))
.alias(AUTO_CREATED_PLACEHOLDER_TS_COL);
let table_col_cnt = self.schema.column_schemas.len();
info!("query_col_cnt={query_col_cnt}, table_col_cnt={table_col_cnt}");
if query_col_cnt == table_col_cnt {
// still need to add alias, see below
self.is_rewritten = true;
return Ok(Transformed::no(node));
} else if query_col_cnt + 1 == table_col_cnt {
let last_col_schema = self.schema.column_schemas().last().unwrap();
let last_col_schema = self.schema.column_schemas.last().unwrap();
// if time index column is auto created add it
if last_col_schema.name == AUTO_CREATED_PLACEHOLDER_TS_COL
&& self.schema.timestamp_index() == Some(table_col_cnt - 1)
&& self.schema.timestamp_index == Some(table_col_cnt - 1)
{
exprs.push(placeholder_ts_expr);
exprs.push(datafusion::logical_expr::lit(0));
} else if last_col_schema.data_type.is_timestamp() {
// is the update at column
exprs.push(datafusion::prelude::now().alias(&last_col_schema.name));
exprs.push(datafusion::prelude::now());
} else {
// helpful error message
return Err(DataFusionError::Plan(format!(
@@ -317,11 +221,11 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
)));
}
} else if query_col_cnt + 2 == table_col_cnt {
let mut col_iter = self.schema.column_schemas().iter().rev();
let mut col_iter = self.schema.column_schemas.iter().rev();
let last_col_schema = col_iter.next().unwrap();
let second_last_col_schema = col_iter.next().unwrap();
if second_last_col_schema.data_type.is_timestamp() {
exprs.push(datafusion::prelude::now().alias(&second_last_col_schema.name));
exprs.push(datafusion::prelude::now());
} else {
return Err(DataFusionError::Plan(format!(
"Expect the second last column in the table to be timestamp column, found column {} with type {:?}",
@@ -331,9 +235,9 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
}
if last_col_schema.name == AUTO_CREATED_PLACEHOLDER_TS_COL
&& self.schema.timestamp_index() == Some(table_col_cnt - 1)
&& self.schema.timestamp_index == Some(table_col_cnt - 1)
{
exprs.push(placeholder_ts_expr);
exprs.push(datafusion::logical_expr::lit(0));
} else {
return Err(DataFusionError::Plan(format!(
"Expect timestamp column {}, found {:?}",
@@ -343,7 +247,7 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
} else {
return Err(DataFusionError::Plan(format!(
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}",
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas()
query_col_cnt, node.expressions(), table_col_cnt, self.schema.column_schemas
)));
}
@@ -351,13 +255,10 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
let new_plan = node.with_new_exprs(exprs, node.inputs().into_iter().cloned().collect())?;
Ok(Transformed::yes(new_plan))
}
/// We might add new columns, so we need to recompute the schema
fn f_up(&mut self, node: Self::Node) -> DfResult<Transformed<Self::Node>> {
node.recompute_schema().map(Transformed::yes)
}
}
// TODO(discord9): a method to found out the precise time window
/// Find out the `Filter` Node corresponding to innermost(deepest) `WHERE` and add a new filter expr to it
#[derive(Debug)]
pub struct AddFilterRewriter {
@@ -400,15 +301,11 @@ impl TreeNodeRewriter for AddFilterRewriter {
#[cfg(test)]
mod test {
use std::sync::Arc;
use datafusion_common::tree_node::TreeNode as _;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::schema::ColumnSchema;
use pretty_assertions::assert_eq;
use query::query_engine::DefaultSerializer;
use session::context::QueryContext;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use super::*;
use crate::test_utils::create_test_query_engine;
@@ -489,7 +386,7 @@ mod test {
// add update_at
(
"SELECT number FROM numbers_with_ts",
Ok("SELECT numbers_with_ts.number, now() AS ts FROM numbers_with_ts"),
Ok("SELECT numbers_with_ts.number, now() FROM numbers_with_ts"),
vec![
ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true),
ColumnSchema::new(
@@ -503,7 +400,7 @@ mod test {
// add ts placeholder
(
"SELECT number FROM numbers_with_ts",
Ok("SELECT numbers_with_ts.number, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts"),
Ok("SELECT numbers_with_ts.number, 0 FROM numbers_with_ts"),
vec![
ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true),
ColumnSchema::new(
@@ -531,7 +428,7 @@ mod test {
// add update_at and ts placeholder
(
"SELECT number FROM numbers_with_ts",
Ok("SELECT numbers_with_ts.number, now() AS update_at, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts"),
Ok("SELECT numbers_with_ts.number, now(), 0 FROM numbers_with_ts"),
vec![
ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true),
ColumnSchema::new(
@@ -550,7 +447,7 @@ mod test {
// add ts placeholder
(
"SELECT number, ts FROM numbers_with_ts",
Ok("SELECT numbers_with_ts.number, numbers_with_ts.ts AS update_at, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts"),
Ok("SELECT numbers_with_ts.number, numbers_with_ts.ts, 0 FROM numbers_with_ts"),
vec![
ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true),
ColumnSchema::new(
@@ -569,7 +466,7 @@ mod test {
// add update_at after time index column
(
"SELECT number, ts FROM numbers_with_ts",
Ok("SELECT numbers_with_ts.number, numbers_with_ts.ts, now() AS update_atat FROM numbers_with_ts"),
Ok("SELECT numbers_with_ts.number, numbers_with_ts.ts, now() FROM numbers_with_ts"),
vec![
ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true),
ColumnSchema::new(
@@ -631,8 +528,8 @@ mod test {
let query_engine = create_test_query_engine();
let ctx = QueryContext::arc();
for (before, after, column_schemas) in testcases {
let schema = Arc::new(Schema::new(column_schemas));
let mut add_auto_column_rewriter = AddAutoColumnRewriter::new(schema);
let raw_schema = RawSchema::new(column_schemas);
let mut add_auto_column_rewriter = AddAutoColumnRewriter::new(raw_schema);
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), before, false)
.await
@@ -703,18 +600,4 @@ mod test {
);
}
}
#[tokio::test]
async fn test_null_cast() {
let query_engine = create_test_query_engine();
let ctx = QueryContext::arc();
let sql = "SELECT NULL::DOUBLE FROM numbers_with_ts";
let plan = sql_to_df_plan(ctx, query_engine.clone(), sql, false)
.await
.unwrap();
let _sub_plan = DFLogicalSubstraitConvertor {}
.encode(&plan, DefaultSerializer)
.unwrap();
}
}

View File

@@ -25,6 +25,7 @@ use datafusion::config::ConfigOptions;
use datafusion::error::DataFusionError;
use datafusion::functions_aggregate::count::count_udaf;
use datafusion::functions_aggregate::sum::sum_udaf;
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
use datafusion::optimizer::optimize_projections::OptimizeProjections;
@@ -41,7 +42,6 @@ use datafusion_expr::{
BinaryExpr, ColumnarValue, Expr, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl,
Signature, TypeSignature, Volatility,
};
use query::optimizer::count_wildcard::CountWildcardToTimeIndexRule;
use query::parser::QueryLanguageParser;
use query::query_engine::DefaultSerializer;
use query::QueryEngine;
@@ -61,9 +61,9 @@ pub async fn apply_df_optimizer(
) -> Result<datafusion_expr::LogicalPlan, Error> {
let cfg = ConfigOptions::new();
let analyzer = Analyzer::with_rules(vec![
Arc::new(CountWildcardToTimeIndexRule),
Arc::new(AvgExpandRule),
Arc::new(TumbleExpandRule),
Arc::new(CountWildcardRule::new()),
Arc::new(AvgExpandRule::new()),
Arc::new(TumbleExpandRule::new()),
Arc::new(CheckGroupByRule::new()),
Arc::new(TypeCoercion::new()),
]);
@@ -128,7 +128,13 @@ pub async fn sql_to_flow_plan(
}
#[derive(Debug)]
struct AvgExpandRule;
struct AvgExpandRule {}
impl AvgExpandRule {
pub fn new() -> Self {
Self {}
}
}
impl AnalyzerRule for AvgExpandRule {
fn analyze(
@@ -325,7 +331,13 @@ impl TreeNodeRewriter for ExpandAvgRewriter<'_> {
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
#[derive(Debug)]
struct TumbleExpandRule;
struct TumbleExpandRule {}
impl TumbleExpandRule {
pub fn new() -> Self {
Self {}
}
}
impl AnalyzerRule for TumbleExpandRule {
fn analyze(

View File

@@ -49,8 +49,6 @@ pub trait FlowEngine {
async fn flush_flow(&self, flow_id: FlowId) -> Result<usize, Error>;
/// Check if the flow exists
async fn flow_exist(&self, flow_id: FlowId) -> Result<bool, Error>;
/// List all flows
async fn list_flows(&self) -> Result<impl IntoIterator<Item = FlowId>, Error>;
/// Handle the insert requests for the flow
async fn handle_flow_inserts(
&self,

View File

@@ -46,12 +46,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Flow engine is still recovering"))]
FlowNotRecovered {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Error encountered while creating flow: {sql}"))]
CreateFlow {
sql: String,
@@ -67,16 +61,6 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"No available frontend found after timeout: {timeout:?}, context: {context}"
))]
NoAvailableFrontend {
timeout: std::time::Duration,
context: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("External error"))]
External {
source: BoxedError,
@@ -165,13 +149,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Unsupported: {reason}"))]
Unsupported {
reason: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Unsupported temporal filter: {reason}"))]
UnsupportedTemporalFilter {
reason: String,
@@ -212,25 +189,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Illegal check task state: {reason}"))]
IllegalCheckTaskState {
reason: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display(
"Failed to sync with check task for flow {} with allow_drop={}",
flow_id,
allow_drop
))]
SyncCheckTask {
flow_id: FlowId,
allow_drop: bool,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to start server"))]
StartServer {
#[snafu(implicit)]
@@ -312,24 +270,20 @@ impl ErrorExt for Error {
Self::Eval { .. }
| Self::JoinTask { .. }
| Self::Datafusion { .. }
| Self::InsertIntoFlow { .. }
| Self::NoAvailableFrontend { .. }
| Self::FlowNotRecovered { .. } => StatusCode::Internal,
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
Self::TableNotFound { .. }
| Self::TableNotFoundMeta { .. }
| Self::FlowNotFound { .. }
| Self::ListFlows { .. } => StatusCode::TableNotFound,
Self::FlowNotFound { .. } => StatusCode::FlowNotFound,
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
Self::CreateFlow { .. } | Self::Arrow { .. } | Self::Time { .. } => {
StatusCode::EngineExecuteQuery
}
Self::Unexpected { .. }
| Self::SyncCheckTask { .. }
| Self::IllegalCheckTaskState { .. } => StatusCode::Unexpected,
Self::NotImplemented { .. }
| Self::UnsupportedTemporalFilter { .. }
| Self::Unsupported { .. } => StatusCode::Unsupported,
Self::Unexpected { .. } => StatusCode::Unexpected,
Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
StatusCode::Unsupported
}
Self::External { source, .. } => source.status_code(),
Self::Internal { .. } | Self::CacheRequired { .. } => StatusCode::Internal,
Self::StartServer { source, .. } | Self::ShutdownServer { source, .. } => {

View File

@@ -60,7 +60,7 @@ pub enum GenericFn {
Mul,
Div,
Mod,
// variadic func
// varadic func
And,
Or,
// unmaterized func

View File

@@ -43,8 +43,8 @@ mod utils;
#[cfg(test)]
mod test_utils;
pub use adapter::{FlowConfig, FlowStreamingEngineRef, FlownodeOptions, StreamingEngine};
pub use batching_mode::frontend_client::{FrontendClient, GrpcQueryHandlerWithBoxedError};
pub use adapter::{FlowConfig, FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions};
pub use batching_mode::frontend_client::FrontendClient;
pub(crate) use engine::{CreateFlowArgs, FlowId, TableName};
pub use error::{Error, Result};
pub use server::{

View File

@@ -29,7 +29,6 @@ use common_meta::key::TableMetadataManagerRef;
use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::{Flownode, NodeManagerRef};
use common_query::Output;
use common_runtime::JoinHandle;
use common_telemetry::tracing::info;
use futures::{FutureExt, TryStreamExt};
use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests};
@@ -43,7 +42,7 @@ use servers::error::{StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
use servers::http::HttpServerBuilder;
use servers::metrics_handler::MetricsHandler;
use servers::server::{ServerHandler, ServerHandlers};
use session::context::QueryContextRef;
use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use tokio::net::TcpListener;
use tokio::sync::{broadcast, oneshot, Mutex};
@@ -51,32 +50,28 @@ use tonic::codec::CompressionEncoding;
use tonic::transport::server::TcpIncoming;
use tonic::{Request, Response, Status};
use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef};
use crate::adapter::{create_worker, FlowStreamingEngineRef};
use crate::batching_mode::engine::BatchingEngine;
use crate::adapter::{create_worker, FlowWorkerManagerRef};
use crate::error::{
to_status_with_last_err, CacheRequiredSnafu, ExternalSnafu, ListFlowsSnafu, ParseAddrSnafu,
ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
to_status_with_last_err, CacheRequiredSnafu, CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu,
ListFlowsSnafu, ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
};
use crate::heartbeat::HeartbeatTask;
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
use crate::transform::register_function_to_query_engine;
use crate::utils::{SizeReportSender, StateReportHandler};
use crate::{Error, FlownodeOptions, FrontendClient, StreamingEngine};
use crate::{CreateFlowArgs, Error, FlowWorkerManager, FlownodeOptions, FrontendClient};
pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
/// wrapping flow node manager to avoid orphan rule with Arc<...>
#[derive(Clone)]
pub struct FlowService {
pub dual_engine: FlowDualEngineRef,
/// TODO(discord9): replace with dual engine
pub manager: FlowWorkerManagerRef,
}
impl FlowService {
pub fn new(manager: FlowDualEngineRef) -> Self {
Self {
dual_engine: manager,
}
pub fn new(manager: FlowWorkerManagerRef) -> Self {
Self { manager }
}
}
@@ -91,7 +86,7 @@ impl flow_server::Flow for FlowService {
.start_timer();
let request = request.into_inner();
self.dual_engine
self.manager
.handle(request)
.await
.map_err(|err| {
@@ -131,7 +126,7 @@ impl flow_server::Flow for FlowService {
.with_label_values(&["in"])
.inc_by(row_count as u64);
self.dual_engine
self.manager
.handle_inserts(request)
.await
.map(Response::new)
@@ -144,16 +139,11 @@ pub struct FlownodeServer {
inner: Arc<FlownodeServerInner>,
}
/// FlownodeServerInner is the inner state of FlownodeServer,
/// this struct mostly useful for construct/start and stop the
/// flow node server
struct FlownodeServerInner {
/// worker shutdown signal, not to be confused with server_shutdown_tx
worker_shutdown_tx: Mutex<broadcast::Sender<()>>,
/// server shutdown signal for shutdown grpc server
server_shutdown_tx: Mutex<broadcast::Sender<()>>,
/// streaming task handler
streaming_task_handler: Mutex<Option<JoinHandle<()>>>,
flow_service: FlowService,
}
@@ -166,30 +156,16 @@ impl FlownodeServer {
flow_service,
worker_shutdown_tx: Mutex::new(tx),
server_shutdown_tx: Mutex::new(server_tx),
streaming_task_handler: Mutex::new(None),
}),
}
}
/// Start the background task for streaming computation.
///
/// Should be called only after heartbeat is establish, hence can get cluster info
async fn start_workers(&self) -> Result<(), Error> {
let manager_ref = self.inner.flow_service.dual_engine.clone();
let handle = manager_ref
.streaming_engine()
let manager_ref = self.inner.flow_service.manager.clone();
let _handle = manager_ref
.clone()
.run_background(Some(self.inner.worker_shutdown_tx.lock().await.subscribe()));
self.inner
.streaming_task_handler
.lock()
.await
.replace(handle);
self.inner
.flow_service
.dual_engine
.start_flow_consistent_check_task()
.await?;
Ok(())
}
@@ -200,11 +176,6 @@ impl FlownodeServer {
if tx.send(()).is_err() {
info!("Receiver dropped, the flow node server has already shutdown");
}
self.inner
.flow_service
.dual_engine
.stop_flow_consistent_check_task()
.await?;
Ok(())
}
}
@@ -301,8 +272,8 @@ impl FlownodeInstance {
&self.flownode_server
}
pub fn flow_engine(&self) -> FlowDualEngineRef {
self.flownode_server.inner.flow_service.dual_engine.clone()
pub fn flow_worker_manager(&self) -> FlowWorkerManagerRef {
self.flownode_server.inner.flow_service.manager.clone()
}
pub fn setup_services(&mut self, services: ServerHandlers) {
@@ -363,6 +334,7 @@ impl FlownodeBuilder {
None,
None,
None,
None,
false,
Default::default(),
self.opts.query.clone(),
@@ -371,21 +343,12 @@ impl FlownodeBuilder {
self.build_manager(query_engine_factory.query_engine())
.await?,
);
let batching = Arc::new(BatchingEngine::new(
self.frontend_client.clone(),
query_engine_factory.query_engine(),
self.flow_metadata_manager.clone(),
self.table_meta.clone(),
self.catalog_manager.clone(),
));
let dual = FlowDualEngine::new(
manager.clone(),
batching,
self.flow_metadata_manager.clone(),
self.catalog_manager.clone(),
);
let server = FlownodeServer::new(FlowService::new(Arc::new(dual)));
if let Err(err) = self.recover_flows(&manager).await {
common_telemetry::error!(err; "Failed to recover flows");
}
let server = FlownodeServer::new(FlowService::new(manager.clone()));
let heartbeat_task = self.heartbeat_task;
@@ -397,12 +360,100 @@ impl FlownodeBuilder {
Ok(instance)
}
/// recover all flow tasks in this flownode in distributed mode(nodeid is Some(<num>))
///
/// or recover all existing flow tasks if in standalone mode(nodeid is None)
///
/// TODO(discord9): persistent flow tasks with internal state
async fn recover_flows(&self, manager: &FlowWorkerManagerRef) -> Result<usize, Error> {
let nodeid = self.opts.node_id;
let to_be_recovered: Vec<_> = if let Some(nodeid) = nodeid {
let to_be_recover = self
.flow_metadata_manager
.flownode_flow_manager()
.flows(nodeid)
.try_collect::<Vec<_>>()
.await
.context(ListFlowsSnafu { id: Some(nodeid) })?;
to_be_recover.into_iter().map(|(id, _)| id).collect()
} else {
let all_catalogs = self
.catalog_manager
.catalog_names()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let mut all_flow_ids = vec![];
for catalog in all_catalogs {
let flows = self
.flow_metadata_manager
.flow_name_manager()
.flow_names(&catalog)
.await
.try_collect::<Vec<_>>()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
all_flow_ids.extend(flows.into_iter().map(|(_, id)| id.flow_id()));
}
all_flow_ids
};
let cnt = to_be_recovered.len();
// TODO(discord9): recover in parallel
for flow_id in to_be_recovered {
let info = self
.flow_metadata_manager
.flow_info_manager()
.get(flow_id)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.context(FlowNotFoundSnafu { id: flow_id })?;
let sink_table_name = [
info.sink_table_name().catalog_name.clone(),
info.sink_table_name().schema_name.clone(),
info.sink_table_name().table_name.clone(),
];
let args = CreateFlowArgs {
flow_id: flow_id as _,
sink_table_name,
source_table_ids: info.source_table_ids().to_vec(),
// because recover should only happen on restart the `create_if_not_exists` and `or_replace` can be arbitrary value(since flow doesn't exist)
// but for the sake of consistency and to make sure recover of flow actually happen, we set both to true
// (which is also fine since checks for not allow both to be true is on metasrv and we already pass that)
create_if_not_exists: true,
or_replace: true,
expire_after: info.expire_after(),
comment: Some(info.comment().clone()),
sql: info.raw_sql().clone(),
flow_options: info.options().clone(),
query_ctx: Some(
QueryContextBuilder::default()
.current_catalog(info.catalog_name().clone())
.build(),
),
};
manager
.create_flow_inner(args)
.await
.map_err(BoxedError::new)
.with_context(|_| CreateFlowSnafu {
sql: info.raw_sql().clone(),
})?;
}
Ok(cnt)
}
/// build [`FlowWorkerManager`], note this doesn't take ownership of `self`,
/// nor does it actually start running the worker.
async fn build_manager(
&mut self,
query_engine: Arc<dyn QueryEngine>,
) -> Result<StreamingEngine, Error> {
) -> Result<FlowWorkerManager, Error> {
let table_meta = self.table_meta.clone();
register_function_to_query_engine(&query_engine);
@@ -411,7 +462,7 @@ impl FlownodeBuilder {
let node_id = self.opts.node_id.map(|id| id as u32);
let mut man = StreamingEngine::new(node_id, query_engine, table_meta);
let mut man = FlowWorkerManager::new(node_id, query_engine, table_meta);
for worker_id in 0..num_workers {
let (tx, rx) = oneshot::channel();
@@ -493,10 +544,6 @@ impl<'a> FlownodeServiceBuilder<'a> {
}
}
/// Basically a tiny frontend that communicates with datanode, different from [`FrontendClient`] which
/// connect to a real frontend instead, this is used for flow's streaming engine. And is for simple query.
///
/// For heavy query use [`FrontendClient`] which offload computation to frontend, lifting the load from flownode
#[derive(Clone)]
pub struct FrontendInvoker {
inserter: Arc<Inserter>,
@@ -518,7 +565,7 @@ impl FrontendInvoker {
}
pub async fn build_from(
flow_streaming_engine: FlowStreamingEngineRef,
flow_worker_manager: FlowWorkerManagerRef,
catalog_manager: CatalogManagerRef,
kv_backend: KvBackendRef,
layered_cache_registry: LayeredCacheRegistryRef,
@@ -553,7 +600,7 @@ impl FrontendInvoker {
node_manager.clone(),
));
let query_engine = flow_streaming_engine.query_engine.clone();
let query_engine = flow_worker_manager.query_engine.clone();
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
@@ -581,7 +628,7 @@ impl FrontendInvoker {
.start_timer();
self.inserter
.handle_row_inserts(requests, ctx, &self.statement_executor, false)
.handle_row_inserts(requests, ctx, &self.statement_executor)
.await
.map_err(BoxedError::new)
.context(common_frontend::error::ExternalSnafu)

View File

@@ -153,6 +153,7 @@ pub fn create_test_query_engine() -> Arc<dyn QueryEngine> {
None,
None,
None,
None,
false,
QueryOptions::default(),
);

View File

@@ -270,6 +270,7 @@ mod test {
None,
None,
None,
None,
false,
QueryOptions::default(),
);

View File

@@ -15,7 +15,6 @@ api.workspace = true
arc-swap = "1.0"
async-trait.workspace = true
auth.workspace = true
bytes.workspace = true
cache.workspace = true
catalog.workspace = true
client.workspace = true
@@ -40,7 +39,6 @@ datafusion.workspace = true
datafusion-expr.workspace = true
datanode.workspace = true
datatypes.workspace = true
futures.workspace = true
humantime-serde.workspace = true
lazy_static.workspace = true
log-query.workspace = true
@@ -49,7 +47,6 @@ meta-client.workspace = true
num_cpus.workspace = true
opentelemetry-proto.workspace = true
operator.workspace = true
otel-arrow-rust.workspace = true
partition.workspace = true
pipeline.workspace = true
prometheus.workspace = true

Some files were not shown because too many files have changed in this diff Show More