mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
203 Commits
c112cdf241
...
v0.17.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bb9ceb63b | ||
|
|
38456638f8 | ||
|
|
97c0b1f5c1 | ||
|
|
4fc7f12360 | ||
|
|
ed17997449 | ||
|
|
849ae8ebb6 | ||
|
|
a0587e2e87 | ||
|
|
1ed71169ac | ||
|
|
e62f0e2b64 | ||
|
|
f92e753a34 | ||
|
|
a22b016f90 | ||
|
|
7a9fa99069 | ||
|
|
d808e7be7e | ||
|
|
8e22fcfd5c | ||
|
|
26729c31a6 | ||
|
|
b73617eaba | ||
|
|
3b909f63e3 | ||
|
|
0d4e07eddd | ||
|
|
b94ce9019d | ||
|
|
3dcd40c4ba | ||
|
|
a67803d0e9 | ||
|
|
aa7e7942f8 | ||
|
|
f1b7581dc3 | ||
|
|
cd761df369 | ||
|
|
0cea6ae64d | ||
|
|
8bf772fb50 | ||
|
|
9c1240921d | ||
|
|
eb52129a91 | ||
|
|
a0a2b40cbe | ||
|
|
067c4458d6 | ||
|
|
4e9c31bf5c | ||
|
|
9320a6ddaa | ||
|
|
4c9fcb7dee | ||
|
|
9dc16772fe | ||
|
|
6ee91f6af4 | ||
|
|
9175fa643d | ||
|
|
0e962844ac | ||
|
|
246b832d79 | ||
|
|
e62a022d76 | ||
|
|
e595885dc6 | ||
|
|
dd3432e6ca | ||
|
|
ab96703d8f | ||
|
|
73add808a6 | ||
|
|
1234911ed3 | ||
|
|
d57c0db9e6 | ||
|
|
4daf5adce5 | ||
|
|
575093f85f | ||
|
|
ac82ad4549 | ||
|
|
367a25af06 | ||
|
|
d585c23ba5 | ||
|
|
f55023f300 | ||
|
|
9213315613 | ||
|
|
e77ad8e9dc | ||
|
|
7bc669e991 | ||
|
|
b84cd19145 | ||
|
|
cbcfdf9d65 | ||
|
|
bacd9c7d15 | ||
|
|
f441598247 | ||
|
|
556c408e7b | ||
|
|
ec817f6877 | ||
|
|
32e73dad12 | ||
|
|
bc20b17bc5 | ||
|
|
200422313f | ||
|
|
8452a9d579 | ||
|
|
5ef4dd1743 | ||
|
|
32a3ef36f9 | ||
|
|
566a647ec7 | ||
|
|
906e1ca0bf | ||
|
|
b921e41abf | ||
|
|
6782bcddfa | ||
|
|
3d1a4b56a4 | ||
|
|
8894cb5406 | ||
|
|
bb334e1594 | ||
|
|
ec8ff48473 | ||
|
|
d99734b97b | ||
|
|
eb5e627ddd | ||
|
|
69eed2c3fa | ||
|
|
48572d18a8 | ||
|
|
d5575d3fa4 | ||
|
|
83a65a81c0 | ||
|
|
288f69a30f | ||
|
|
d6d5dad758 | ||
|
|
d82f36db6a | ||
|
|
68ac37461b | ||
|
|
4c2955b86b | ||
|
|
390aef7563 | ||
|
|
c6c33d14aa | ||
|
|
3014972202 | ||
|
|
6839b5aef4 | ||
|
|
a04ec07b61 | ||
|
|
d774996e89 | ||
|
|
2b43ff30b6 | ||
|
|
eaceae4c91 | ||
|
|
cdc168e753 | ||
|
|
5d5817b851 | ||
|
|
a98c48a9b2 | ||
|
|
6692957e08 | ||
|
|
896d72191e | ||
|
|
5eec3485fe | ||
|
|
7e573e497c | ||
|
|
474a689309 | ||
|
|
2995eddca5 | ||
|
|
05529387d9 | ||
|
|
819531393f | ||
|
|
d6bc117408 | ||
|
|
7402320abc | ||
|
|
5420d6f7fb | ||
|
|
7af471c5aa | ||
|
|
9cdd0d8251 | ||
|
|
5a4036cc66 | ||
|
|
8fc3a9a9d7 | ||
|
|
0b29b41c17 | ||
|
|
78dca8a0d7 | ||
|
|
bf191c5763 | ||
|
|
b652ea52ee | ||
|
|
f64fc3a57a | ||
|
|
326198162e | ||
|
|
f9d2a89a0c | ||
|
|
dfc29eb3b3 | ||
|
|
351826cd32 | ||
|
|
60e01c7c3d | ||
|
|
021ad09c21 | ||
|
|
2a3e4c7a82 | ||
|
|
92fd34ba22 | ||
|
|
d03f85287e | ||
|
|
4d97754cb4 | ||
|
|
1b6d924169 | ||
|
|
80f3ae650c | ||
|
|
c0fe800e79 | ||
|
|
56f5ccf823 | ||
|
|
fb3b1d4866 | ||
|
|
4fb7d92f7c | ||
|
|
8659412cac | ||
|
|
dea87b7e57 | ||
|
|
a678b4dfd6 | ||
|
|
ccccaf7734 | ||
|
|
f0bec4940f | ||
|
|
5eb491df12 | ||
|
|
1d84e802d8 | ||
|
|
2992e70393 | ||
|
|
8a44137f37 | ||
|
|
777da35b0d | ||
|
|
9ad9a7d2bc | ||
|
|
ff5d672583 | ||
|
|
e495c614f7 | ||
|
|
e80e4a9ed7 | ||
|
|
1977ae50ee | ||
|
|
5cec0d4e3a | ||
|
|
d2d6489b2f | ||
|
|
25f926ea7d | ||
|
|
f159fcf599 | ||
|
|
e4454e0c7d | ||
|
|
0781adaa3d | ||
|
|
253d89b5cc | ||
|
|
3a2f5413e0 | ||
|
|
214ffe7109 | ||
|
|
3b1f172ab8 | ||
|
|
0215b39f61 | ||
|
|
01dc789816 | ||
|
|
bbe48e9e8b | ||
|
|
e2015ce1af | ||
|
|
7bb765af1d | ||
|
|
080b4b5d53 | ||
|
|
c7c8495a6b | ||
|
|
bbab35f285 | ||
|
|
6c6487ab30 | ||
|
|
757694ae38 | ||
|
|
39e2f122eb | ||
|
|
877ce6e893 | ||
|
|
c8da35c7e5 | ||
|
|
309e9d978c | ||
|
|
3a9f0220b5 | ||
|
|
cc35bab5e4 | ||
|
|
414db41219 | ||
|
|
ea024874e7 | ||
|
|
e64469bbc4 | ||
|
|
875207d26c | ||
|
|
9871c22740 | ||
|
|
50f7f61fdc | ||
|
|
9c3b83e84d | ||
|
|
e81d0f5861 | ||
|
|
29e0092468 | ||
|
|
67a93a07a2 | ||
|
|
1afa0afc67 | ||
|
|
414101fafa | ||
|
|
280024d7f8 | ||
|
|
865ca44dbd | ||
|
|
a3e55565dc | ||
|
|
bed0c1e55f | ||
|
|
572e29b158 | ||
|
|
31cb769507 | ||
|
|
e19493db4a | ||
|
|
9817eb934d | ||
|
|
8639961cc9 | ||
|
|
a9cd117706 | ||
|
|
9485dbed64 | ||
|
|
21b71d1e10 | ||
|
|
cfaa9b4dda | ||
|
|
19ad9a7f85 | ||
|
|
9e2f793b04 | ||
|
|
52466fdd92 | ||
|
|
869f8bf68a | ||
|
|
9527e0df2f |
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
@@ -24,4 +24,9 @@ runs:
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
--create-namespace \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag=3.6.1-debian-12-r3 \
|
||||
--version 12.0.8 \
|
||||
-n ${{ inputs.namespace }}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
filters:
|
||||
- log_store=debug
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
|
||||
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Install Kafka cluster
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
@@ -23,4 +23,8 @@ runs:
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/kafka \
|
||||
--set image.tag=3.9.0-debian-12-r1 \
|
||||
--version 31.0.0 \
|
||||
-n ${{ inputs.namespace }}
|
||||
|
||||
@@ -6,9 +6,7 @@ inputs:
|
||||
description: "Number of PostgreSQL replicas"
|
||||
namespace:
|
||||
default: "postgres-namespace"
|
||||
postgres-version:
|
||||
default: "14.2"
|
||||
description: "PostgreSQL version"
|
||||
description: "The PostgreSQL namespace"
|
||||
storage-size:
|
||||
default: "1Gi"
|
||||
description: "Storage size for PostgreSQL"
|
||||
@@ -22,7 +20,11 @@ runs:
|
||||
helm upgrade \
|
||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||
--set image.tag=${{ inputs.postgres-version }} \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/postgresql \
|
||||
--set image.tag=17.5.0-debian-12-r3 \
|
||||
--version 16.7.4 \
|
||||
--set persistence.size=${{ inputs.storage-size }} \
|
||||
--set postgresql.username=greptimedb \
|
||||
--set postgresql.password=admin \
|
||||
|
||||
4
.github/scripts/check-version.sh
vendored
4
.github/scripts/check-version.sh
vendored
@@ -35,8 +35,8 @@ HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | ta
|
||||
|
||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
||||
echo "is-current-version-latest=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
||||
echo "is-current-version-latest=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
40
.github/scripts/deploy-greptimedb.sh
vendored
40
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,12 +3,14 @@
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
||||
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
@@ -35,10 +37,16 @@ function add_greptime_chart() {
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
helm upgrade --install etcd "$ETCD_CHART" \
|
||||
--version "$ETCD_CHART_VERSION" \
|
||||
--create-namespace \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag="$ETCD_IMAGE_TAG" \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
@@ -48,7 +56,8 @@ function deploy_etcd_cluster() {
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||
--create-namespace \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
@@ -66,9 +75,11 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -101,15 +112,17 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
--set objectStorage.s3.root="$DATA_ROOT" \
|
||||
--set objectStorage.credentials.secretName=s3-credentials \
|
||||
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
@@ -134,7 +147,8 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
|
||||
34
.github/scripts/pull-test-deps-images.sh
vendored
Executable file
34
.github/scripts/pull-test-deps-images.sh
vendored
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
||||
|
||||
set -e
|
||||
|
||||
MAX_RETRIES=3
|
||||
|
||||
IMAGES=(
|
||||
"greptime/zookeeper:3.7"
|
||||
"greptime/kafka:3.9.0-debian-12-r1"
|
||||
"greptime/etcd:3.6.1-debian-12-r3"
|
||||
"greptime/minio:2024"
|
||||
"greptime/mysql:5.7"
|
||||
)
|
||||
|
||||
for image in "${IMAGES[@]}"; do
|
||||
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
||||
if docker pull "$image"; then
|
||||
# Successfully pulled the image.
|
||||
break
|
||||
else
|
||||
# Use some simple exponential backoff to avoid rate limiting.
|
||||
if [ $attempt -lt $MAX_RETRIES ]; then
|
||||
sleep_seconds=$((attempt * 5))
|
||||
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
||||
sleep $sleep_seconds # 5s, 10s delays
|
||||
else
|
||||
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
@@ -21,7 +21,7 @@ update_dev_builder_version() {
|
||||
|
||||
# Commit the changes.
|
||||
git add Makefile
|
||||
git commit -m "ci: update dev-builder image tag"
|
||||
git commit -s -m "ci: update dev-builder image tag"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
|
||||
47
.github/workflows/develop.yml
vendored
47
.github/workflows/develop.yml
vendored
@@ -12,6 +12,7 @@ on:
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
- 'Makefile'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
@@ -617,10 +618,12 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait kafka
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait kafka
|
||||
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -682,6 +685,30 @@ jobs:
|
||||
- name: Run cargo clippy
|
||||
run: make clippy
|
||||
|
||||
check-udeps:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check Unused Dependencies
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "check-udeps"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install cargo-udeps --locked
|
||||
- name: Check unused dependencies
|
||||
run: make check-udeps
|
||||
|
||||
conflict-check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check for conflict
|
||||
@@ -697,7 +724,7 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 60
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
needs: [conflict-check, clippy, fmt, check-udeps]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -709,7 +736,7 @@ jobs:
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
cache: false
|
||||
cache: false
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -719,9 +746,11 @@ jobs:
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||
|
||||
- name: Run nextest cases
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
@@ -738,8 +767,11 @@ jobs:
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
||||
GT_POSTGRES15_SCHEMA: test_schema
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
@@ -772,9 +804,11 @@ jobs:
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
@@ -790,8 +824,11 @@ jobs:
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
||||
GT_POSTGRES15_SCHEMA: test_schema
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
|
||||
8
.github/workflows/docs.yml
vendored
8
.github/workflows/docs.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
- 'Makefile'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
@@ -21,6 +22,7 @@ on:
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
- 'Makefile'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
@@ -65,6 +67,12 @@ jobs:
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
check-udeps:
|
||||
name: Unused Dependencies
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -111,7 +111,8 @@ jobs:
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
|
||||
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
||||
# The 'is-current-version-latest' determines whether to update 'latest' Docker tags and downstream repositories.
|
||||
is-current-version-latest: ${{ steps.check-version.outputs.is-current-version-latest }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -321,7 +322,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
@@ -368,7 +369,7 @@ jobs:
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
@@ -476,7 +477,7 @@ jobs:
|
||||
|
||||
bump-helm-charts-version:
|
||||
name: Bump helm charts version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -497,7 +498,7 @@ jobs:
|
||||
|
||||
bump-homebrew-greptime-version:
|
||||
name: Bump homebrew greptime version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
|
||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
@@ -12,9 +12,9 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -52,6 +52,9 @@ venv/
|
||||
tests-fuzz/artifacts/
|
||||
tests-fuzz/corpus/
|
||||
|
||||
# cargo-udeps reports
|
||||
udeps-report.json
|
||||
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
@@ -55,14 +55,18 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run --workspace --features pg_kvbackend,mysql_kvbackend` or `make test`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings` or `make clippy`).
|
||||
- Ensure there are no unused dependencies by running `make check-udeps` (clean them up with `make fix-udeps` if reported).
|
||||
- If you must keep a target-specific dependency (e.g. under `[target.'cfg(...)'.dev-dependencies]`), add a cargo-udeps ignore entry in the same `Cargo.toml`, for example:
|
||||
`[package.metadata.cargo-udeps.ignore]` with `development = ["rexpect"]` (or `dependencies`/`build` as appropriate).
|
||||
- When modifying sample configuration files in `config/`, run `make config-docs` (which requires Docker to be installed) to update the configuration documentation and include it in your commit.
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||
|
||||
1. Install `pre-commit`
|
||||
1. Install `pre-commit`
|
||||
|
||||
pip install pre-commit
|
||||
|
||||
@@ -70,7 +74,7 @@ You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run
|
||||
|
||||
brew install pre-commit
|
||||
|
||||
2. Install the `pre-commit` hooks
|
||||
2. Install the `pre-commit` hooks
|
||||
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
|
||||
5368
Cargo.lock
generated
5368
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
67
Cargo.toml
67
Cargo.toml
@@ -73,7 +73,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.16.0"
|
||||
version = "0.17.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -98,11 +98,12 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.6"
|
||||
arrow = { version = "54.2", features = ["prettyprint"] }
|
||||
arrow-array = { version = "54.2", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-flight = "54.2"
|
||||
arrow-ipc = { version = "54.2", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "54.2", features = ["serde"] }
|
||||
arrow = { version = "56.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "56.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-buffer = "56.0"
|
||||
arrow-flight = "56.0"
|
||||
arrow-ipc = { version = "56.0", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "56.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# Remember to update axum-extra, axum-macros when updating axum
|
||||
@@ -121,26 +122,30 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc", rev = "a0a5f902158f153119316eaeec868cff3fc8a99d" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = "0.14"
|
||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
||||
"tls",
|
||||
"tls-roots",
|
||||
] }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "fe8c13f5f3c1fbef63f57fbdd29f0490dfeb987b" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "66eb089afa6baaa3ddfafabd0a4abbe317d012c3" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -151,7 +156,7 @@ itertools = "0.14"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||
mockall = "0.13"
|
||||
moka = "0.12"
|
||||
@@ -159,9 +164,9 @@ nalgebra = "0.33"
|
||||
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
||||
notify = "8.0"
|
||||
num_cpus = "1.16"
|
||||
object_store_opendal = "0.50"
|
||||
object_store_opendal = "0.54"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.27", features = [
|
||||
opentelemetry-proto = { version = "0.30", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
@@ -170,12 +175,14 @@ opentelemetry-proto = { version = "0.27", features = [
|
||||
] }
|
||||
ordered-float = { version = "4.3", features = ["serde"] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "54.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
parquet = { version = "56.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
prost-types = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
ratelimit = "0.10"
|
||||
@@ -187,7 +194,7 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "8dbd01ed809f5a791833a594e85b144e36e45820", features = [
|
||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
@@ -196,18 +203,18 @@ rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
sea-query = "0.32"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
shadow-rs = "1.1"
|
||||
simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.54.x"
|
||||
] } # branch = "v0.55.x"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
@@ -217,20 +224,20 @@ sqlx = { version = "0.8", features = [
|
||||
strum = { version = "0.27", features = ["derive"] }
|
||||
sysinfo = "0.33"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio = { version = "1.47", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = "0.1"
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] }
|
||||
tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
||||
vrl = "0.25"
|
||||
zstd = "0.13"
|
||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||
@@ -289,7 +296,7 @@ mito-codec = { path = "src/mito-codec" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
otel-arrow-rust = { git = "https://github.com/open-telemetry/otel-arrow", rev = "5d551412d2a12e689cde4d84c14ef29e36784e51", features = [
|
||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||
"server",
|
||||
] }
|
||||
partition = { path = "src/partition" }
|
||||
|
||||
15
Makefile
15
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-f55023f3-20250829091211
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -22,7 +22,7 @@ SQLNESS_OPTS ?=
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||
RETRY_COUNT ?= 3
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT} --features pg_kvbackend,mysql_kvbackend
|
||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||
BUILD_JOBS := 1
|
||||
@@ -193,6 +193,17 @@ clippy: ## Check clippy rules.
|
||||
fix-clippy: ## Fix clippy violations.
|
||||
cargo clippy --workspace --all-targets --all-features --fix
|
||||
|
||||
.PHONY: check-udeps
|
||||
check-udeps: ## Check unused dependencies.
|
||||
cargo udeps --workspace --all-targets
|
||||
|
||||
.PHONY: fix-udeps
|
||||
fix-udeps: ## Remove unused dependencies automatically.
|
||||
@echo "Running cargo-udeps to find unused dependencies..."
|
||||
@cargo udeps --workspace --all-targets --output json > udeps-report.json || true
|
||||
@echo "Removing unused dependencies..."
|
||||
@python3 scripts/fix-udeps.py udeps-report.json
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -147,6 +148,7 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -185,12 +187,13 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
@@ -207,6 +210,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
@@ -240,11 +245,22 @@
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `internal_grpc` | -- | -- | The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend. |
|
||||
| `internal_grpc.bind_addr` | String | `127.0.0.1:4010` | The address to bind the gRPC server. |
|
||||
| `internal_grpc.server_addr` | String | `127.0.0.1:4010` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `internal_grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `internal_grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `internal_grpc.tls` | -- | -- | internal gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `internal_grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `internal_grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `internal_grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `internal_grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -290,19 +306,20 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
||||
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||
| `slow_query.ttl` | String | `30d` | The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`. |
|
||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
@@ -311,6 +328,10 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -322,6 +343,7 @@
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_schema_name` | String | `greptime_schema` | Optional PostgreSQL schema for metadata table and election table name qualification.<br/>When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),<br/>set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.<br/>GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
@@ -333,6 +355,12 @@
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
@@ -360,26 +388,33 @@
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.auto_prune_interval` | String | `0s` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically. |
|
||||
| `wal.trigger_flush_threshold` | Integer | `0` | The threshold to trigger a flush operation of a region in automatically WAL pruning.<br/>Metasrv will send a flush request to flush the region when:<br/>`trigger_flush_threshold` + `prunable_entry_id` < `max_prunable_entry_id`<br/>where:<br/>- `prunable_entry_id` is the maximum entry id that can be pruned of the region.<br/>- `max_prunable_entry_id` is the maximum prunable entry id among all regions in the same topic.<br/>Set to `0` to disable the flush operation. |
|
||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster.<br/><br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)`<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_prune_interval` | String | `30m` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.flush_trigger_size` | String | `512MB` | Estimated size threshold to trigger a flush when using Kafka remote WAL.<br/>Since multiple regions may share a Kafka topic, the estimated size is calculated as:<br/> (latest_entry_id - flushed_entry_id) * avg_record_size<br/>MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.<br/>- `latest_entry_id`: The latest entry ID in the topic.<br/>- `flushed_entry_id`: The last flushed entry ID for the region.<br/>Set to "0" to let the system decide the flush trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.checkpoint_trigger_size` | String | `128MB` | Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.<br/>The estimated size is calculated as:<br/> (latest_entry_id - last_checkpoint_entry_id) * avg_record_size<br/>MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.<br/>Set to "0" to let the system decide the checkpoint trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics used for remote WAL.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | The timeout for creating a Kafka topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
||||
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
||||
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
@@ -389,6 +424,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -484,6 +521,8 @@
|
||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.experimental_manifest_keep_removed_file_count` | Integer | `256` | Number of removed files to keep in manifest's `removed_files` field before also<br/>remove them from `removed_files`. Mostly for debugging purpose.<br/>If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files<br/>from `removed_files` field. |
|
||||
| `region_engine.mito.experimental_manifest_keep_removed_file_ttl` | String | `1h` | How long to keep removed files in the `removed_files` field of manifest<br/>after they are removed from manifest.<br/>files will only be removed from `removed_files` field<br/>if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
@@ -501,6 +540,7 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -539,12 +579,13 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
@@ -554,6 +595,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Flownode
|
||||
@@ -573,6 +616,12 @@
|
||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
||||
| `flow.batching_mode.read_preference` | String | `Leader` | Read preference of the Frontend client. |
|
||||
| `flow.batching_mode.frontend_tls` | -- | -- | -- |
|
||||
| `flow.batching_mode.frontend_tls.enabled` | Bool | `false` | Whether to enable TLS for client. |
|
||||
| `flow.batching_mode.frontend_tls.server_ca_cert_path` | String | Unset | Server Certificate file path. |
|
||||
| `flow.batching_mode.frontend_tls.client_cert_path` | String | Unset | Client Certificate file path. |
|
||||
| `flow.batching_mode.frontend_tls.client_key_path` | String | Unset | Client Private key file path. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -600,14 +649,17 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
@@ -409,6 +409,19 @@ worker_request_batch_size = 64
|
||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||
manifest_checkpoint_distance = 10
|
||||
|
||||
|
||||
## Number of removed files to keep in manifest's `removed_files` field before also
|
||||
## remove them from `removed_files`. Mostly for debugging purpose.
|
||||
## If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files
|
||||
## from `removed_files` field.
|
||||
experimental_manifest_keep_removed_file_count = 256
|
||||
|
||||
## How long to keep removed files in the `removed_files` field of manifest
|
||||
## after they are removed from manifest.
|
||||
## files will only be removed from `removed_files` field
|
||||
## if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached.
|
||||
experimental_manifest_keep_removed_file_ttl = "1h"
|
||||
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
|
||||
@@ -474,6 +487,9 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 384
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -629,7 +645,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -643,6 +659,13 @@ max_log_files = 720
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -669,3 +692,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -30,6 +30,20 @@ node_id = 14
|
||||
#+experimental_max_filter_num_per_query=20
|
||||
## Time window merge distance
|
||||
#+experimental_time_window_merge_threshold=3
|
||||
## Read preference of the Frontend client.
|
||||
#+read_preference="Leader"
|
||||
[flow.batching_mode.frontend_tls]
|
||||
## Whether to enable TLS for client.
|
||||
#+enabled=false
|
||||
## Server Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
#+server_ca_cert_path=""
|
||||
## Client Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
#+client_cert_path=""
|
||||
## Client Private key file path.
|
||||
## @toml2docs:none-default
|
||||
#+client_key_path=""
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
@@ -106,7 +120,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -120,6 +134,13 @@ max_log_files = 720
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -136,3 +157,11 @@ default_ratio = 1.0
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -79,6 +79,42 @@ key_path = ""
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend.
|
||||
[internal_grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:4010"
|
||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||
server_addr = "127.0.0.1:4010"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## internal gRPC server TLS options, see `mysql.tls` section.
|
||||
[internal_grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -90,6 +126,8 @@ runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
||||
prepared_stmt_cache_size = 10000
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
@@ -221,7 +259,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -235,6 +273,13 @@ max_log_files = 720
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -257,8 +302,8 @@ threshold = "30s"
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`.
|
||||
ttl = "30d"
|
||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
||||
ttl = "90d"
|
||||
|
||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
@@ -280,3 +325,16 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
||||
ttl = "90d"
|
||||
|
||||
@@ -23,6 +23,14 @@ backend = "etcd_store"
|
||||
## **Only used when backend is `postgres_store`.**
|
||||
meta_table_name = "greptime_metakv"
|
||||
|
||||
## Optional PostgreSQL schema for metadata table and election table name qualification.
|
||||
## When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),
|
||||
## set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.
|
||||
## GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.
|
||||
## **Only used when backend is `postgres_store`.**
|
||||
|
||||
meta_schema_name = "greptime_schema"
|
||||
|
||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||
## Only used when backend is `postgres_store`.
|
||||
meta_election_lock_id = 1
|
||||
@@ -65,6 +73,34 @@ node_max_idle_time = "24hours"
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
## - "prefer" (default) - Try TLS, fallback to plain
|
||||
## - "require" - Require TLS
|
||||
## - "verify_ca" - Require TLS and verify CA
|
||||
## - "verify_full" - Require TLS and verify hostname
|
||||
mode = "prefer"
|
||||
|
||||
## Path to client certificate file (for client authentication)
|
||||
## Like "/path/to/client.crt"
|
||||
cert_path = ""
|
||||
|
||||
## Path to client private key file (for client authentication)
|
||||
## Like "/path/to/client.key"
|
||||
key_path = ""
|
||||
|
||||
## Path to CA certificate file (for server certificate verification)
|
||||
## Required when using custom CAs or self-signed certificates
|
||||
## Leave empty to use system root certificates only
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -148,50 +184,69 @@ tcp_nodelay = true
|
||||
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
||||
provider = "raft_engine"
|
||||
|
||||
# Kafka wal config.
|
||||
|
||||
## The broker endpoints of the Kafka cluster.
|
||||
##
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
auto_create_topics = true
|
||||
|
||||
## Interval of automatically WAL pruning.
|
||||
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
||||
auto_prune_interval = "0s"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
auto_prune_interval = "30m"
|
||||
|
||||
## The threshold to trigger a flush operation of a region in automatically WAL pruning.
|
||||
## Metasrv will send a flush request to flush the region when:
|
||||
## `trigger_flush_threshold` + `prunable_entry_id` < `max_prunable_entry_id`
|
||||
## where:
|
||||
## - `prunable_entry_id` is the maximum entry id that can be pruned of the region.
|
||||
## - `max_prunable_entry_id` is the maximum prunable entry id among all regions in the same topic.
|
||||
## Set to `0` to disable the flush operation.
|
||||
trigger_flush_threshold = 0
|
||||
|
||||
## Estimated size threshold to trigger a flush when using Kafka remote WAL.
|
||||
## Since multiple regions may share a Kafka topic, the estimated size is calculated as:
|
||||
## (latest_entry_id - flushed_entry_id) * avg_record_size
|
||||
## MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.
|
||||
## - `latest_entry_id`: The latest entry ID in the topic.
|
||||
## - `flushed_entry_id`: The last flushed entry ID for the region.
|
||||
## Set to "0" to let the system decide the flush trigger size.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
flush_trigger_size = "512MB"
|
||||
|
||||
## Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.
|
||||
## The estimated size is calculated as:
|
||||
## (latest_entry_id - last_checkpoint_entry_id) * avg_record_size
|
||||
## MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.
|
||||
## Set to "0" to let the system decide the checkpoint trigger size.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
checkpoint_trigger_size = "128MB"
|
||||
|
||||
## Concurrent task limit for automatically WAL pruning.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
auto_prune_parallelism = 10
|
||||
|
||||
## Number of topics.
|
||||
## Number of topics used for remote WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
## Available selector types:
|
||||
## - `round_robin` (default)
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
selector_type = "round_robin"
|
||||
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## Only accepts strings that match the following regular expression pattern:
|
||||
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
## The timeout for creating a Kafka topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_topic_timeout = "30s"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
@@ -212,6 +267,23 @@ create_topic_timeout = "30s"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
||||
ttl = "90d"
|
||||
|
||||
## Configuration options for the stats persistence.
|
||||
[stats_persistence]
|
||||
## TTL for the stats table that will be used to store the stats.
|
||||
## Set to `0s` to disable stats persistence.
|
||||
## Default is `0s`.
|
||||
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
||||
## It is recommended to set a small value, e.g., `3h`.
|
||||
ttl = "0s"
|
||||
## The interval to persist the stats. Default is `10m`.
|
||||
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
||||
interval = "10m"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
@@ -225,7 +297,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -239,6 +311,14 @@ max_log_files = 720
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -265,3 +345,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -85,7 +85,8 @@ runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
||||
prepared_stmt_cache_size= 10000
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
|
||||
@@ -565,6 +566,9 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 384
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -720,7 +724,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -734,6 +738,13 @@ max_log_files = 720
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -783,3 +794,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -47,4 +47,6 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -47,4 +47,6 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -15,4 +15,6 @@ ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -18,4 +18,6 @@ ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||
|
||||
@@ -13,7 +13,8 @@ RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
unzip \
|
||||
build-essential \
|
||||
pkg-config
|
||||
pkg-config \
|
||||
openssh-client
|
||||
|
||||
# Install protoc
|
||||
ARG PROTOBUF_VERSION=29.3
|
||||
|
||||
@@ -19,7 +19,7 @@ ARG PROTOBUF_VERSION=29.3
|
||||
|
||||
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||
|
||||
|
||||
RUN mv protoc3/bin/* /usr/local/bin/
|
||||
RUN mv protoc3/include/* /usr/local/include/
|
||||
|
||||
|
||||
@@ -34,6 +34,48 @@ services:
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
etcd-tls:
|
||||
<<: *etcd_common_settings
|
||||
container_name: etcd-tls
|
||||
ports:
|
||||
- 2378:2378
|
||||
- 2381:2381
|
||||
command:
|
||||
- --name=etcd-tls
|
||||
- --data-dir=/var/lib/etcd
|
||||
- --initial-advertise-peer-urls=https://etcd-tls:2381
|
||||
- --listen-peer-urls=https://0.0.0.0:2381
|
||||
- --listen-client-urls=https://0.0.0.0:2378
|
||||
- --advertise-client-urls=https://etcd-tls:2378
|
||||
- --heartbeat-interval=250
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd-tls=https://etcd-tls:2381
|
||||
- --initial-cluster-state=new
|
||||
- --initial-cluster-token=etcd-tls-cluster
|
||||
- --cert-file=/certs/server.crt
|
||||
- --key-file=/certs/server-key.pem
|
||||
- --peer-cert-file=/certs/server.crt
|
||||
- --peer-key-file=/certs/server-key.pem
|
||||
- --trusted-ca-file=/certs/ca.crt
|
||||
- --peer-trusted-ca-file=/certs/ca.crt
|
||||
- --client-cert-auth
|
||||
- --peer-client-cert-auth
|
||||
volumes:
|
||||
- ./greptimedb-cluster-docker-compose/etcd-tls:/var/lib/etcd
|
||||
- ./greptimedb-cluster-docker-compose/certs:/certs:ro
|
||||
environment:
|
||||
- ETCDCTL_API=3
|
||||
- ETCDCTL_CACERT=/certs/ca.crt
|
||||
- ETCDCTL_CERT=/certs/server.crt
|
||||
- ETCDCTL_KEY=/certs/server-key.pem
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=https://etcd-tls:2378", "--cacert=/certs/ca.crt", "--cert=/certs/server.crt", "--key=/certs/server-key.pem", "endpoint", "health" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: *greptimedb_image
|
||||
container_name: metasrv
|
||||
|
||||
@@ -30,6 +30,23 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
```bash
|
||||
@@ -40,6 +57,23 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
|
||||
```bash
|
||||
# Check current profiling status
|
||||
curl -X GET localhost:4000/debug/prof/mem/status
|
||||
|
||||
# Activate heap profiling (if not already active)
|
||||
curl -X POST localhost:4000/debug/prof/mem/activate
|
||||
|
||||
# Deactivate heap profiling
|
||||
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||
```
|
||||
|
||||
### Dump memory profiling data
|
||||
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/datafusion/tree/main/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
|
||||
|
||||
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's preferred way to write UDAF.)
|
||||
|
||||
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we find a way to bypass DataFusion's restriction, a restriction that DataFusion simply doesn't pass the input data's type when creating an Accumulator.
|
||||
|
||||
> There's an example in `my_sum_udaf_example.rs`, take that as quick start.
|
||||
|
||||
# 1. Impl `AggregateFunctionCreator` trait for your accumulator creator.
|
||||
|
||||
You must first define a struct that will be used to create your accumulator. For example,
|
||||
|
||||
```Rust
|
||||
#[as_aggr_func_creator]
|
||||
#[derive(Debug, AggrFuncTypeStore)]
|
||||
struct MySumAccumulatorCreator {}
|
||||
```
|
||||
|
||||
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both be annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
|
||||
|
||||
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
|
||||
|
||||
Then impl `AggregateFunctionCreator` trait on it. The definition of the trait is:
|
||||
|
||||
```Rust
|
||||
pub trait AggregateFunctionCreator: Send + Sync + Debug {
|
||||
fn creator(&self) -> AccumulatorCreatorFunction;
|
||||
fn output_type(&self) -> ConcreteDataType;
|
||||
fn state_types(&self) -> Vec<ConcreteDataType>;
|
||||
}
|
||||
```
|
||||
|
||||
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
|
||||
|
||||
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, its state types are `i64` (for sum) and `u64` (for count).
|
||||
|
||||
The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically.
|
||||
|
||||
# 2. Impl `Accumulator` trait for your accumulator.
|
||||
|
||||
The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is:
|
||||
|
||||
```Rust
|
||||
pub trait Accumulator: Send + Sync + Debug {
|
||||
fn state(&self) -> Result<Vec<Value>>;
|
||||
fn update_batch(&mut self, values: &[VectorRef]) -> Result<()>;
|
||||
fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()>;
|
||||
fn evaluate(&self) -> Result<Value>;
|
||||
}
|
||||
```
|
||||
|
||||
The DataFusion basically executes aggregate like this:
|
||||
|
||||
1. Partitioning all input data for aggregate. Create an accumulator for each part.
|
||||
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
|
||||
3. Call `state` to get each accumulator's internal state, the medial calculation result.
|
||||
4. Call `merge_batch` to merge all accumulator's internal state to one.
|
||||
5. Execute `evaluate` on the chosen one to get the final calculation result.
|
||||
|
||||
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
|
||||
|
||||
# 3. Register your aggregate function to our query engine.
|
||||
|
||||
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
|
||||
|
||||
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to calculate, and so the count of the arguments is two.
|
||||
|
||||
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.
|
||||
|
||||
# (Optional) 4. Make your aggregate function automatically registered.
|
||||
|
||||
If you've written a great aggregate function that wants to let everyone use it, you can make it automatically register to our query engine at start time. It's quick and simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.
|
||||
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
Feature Name: Compatibility Test Framework
|
||||
Tracking Issue: TBD
|
||||
Date: 2025-07-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a compatibility test framework for GreptimeDB to ensure backward/forward compatibility for different versions of GreptimeDB.
|
||||
|
||||
# Motivation
|
||||
|
||||
In current practice, we don't have a systematic way to test and ensure the compatibility of different versions of GreptimeDB. Each time we release a new version, we need to manually test the compatibility with ad-hoc cases. This is not only time-consuming, but also prone to errors and unmaintainable. Highly rely on the release manager to ensure the compatibility of different versions of GreptimeDB.
|
||||
|
||||
We don't have a detailed guide on the release SoP of how to test and ensure the compatibility of the new version. And has broken the compatibility of the new version many times (`v0.14.1` and `v0.15.1` are two examples, which are both released right after the major release).
|
||||
|
||||
# Details
|
||||
|
||||
This RFC proposes a compatibility test framework that is easy to maintain, extend and run. It can tell the compatibility between any given two versions of GreptimeDB, both backward and forward. It's based on the Sqlness library but used in a different way.
|
||||
|
||||
Generally speaking, the framework is composed of two parts:
|
||||
|
||||
1. Test cases: A set of test cases that are maintained dedicatedly for the compatibility test. Still in the `.sql` and `.result` format.
|
||||
2. Test framework: A new sqlness runner that is used to run the test cases. With some new features that is not required by the integration sqlness test.
|
||||
|
||||
## Test Cases
|
||||
|
||||
### Structure
|
||||
|
||||
The case set is organized in three parts:
|
||||
|
||||
- `1.feature`: Use a new feature
|
||||
- `2.verify`: Verify database behavior
|
||||
- `3.cleanup`: Paired with `1.feature`, cleanup the test environment.
|
||||
|
||||
These three parts are organized in a tree structure, and should be run in sequence:
|
||||
|
||||
```
|
||||
compatibility_test/
|
||||
├── 1.feature/
|
||||
│ ├── feature-a/
|
||||
│ ├── feature-b/
|
||||
│ └── feature-c/
|
||||
├── 2.verify/
|
||||
│ ├── verify-metadata/
|
||||
│ ├── verify-data/
|
||||
│ └── verify-schema/
|
||||
└── 3.cleanup/
|
||||
├── cleanup-a/
|
||||
├── cleanup-b/
|
||||
└── cleanup-c/
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
For example, for a new feature like adding new index option ([#6416](https://github.com/GreptimeTeam/greptimedb/pull/6416)), we (who implement the feature) create a new test case like this:
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/1.feature/index-option/granularity_and_false_positive_rate.sql
|
||||
|
||||
-- SQLNESS ARG since=0.15.0
|
||||
-- SQLNESS IGNORE_RESULT
|
||||
CREATE TABLE granularity_and_false_positive_rate (ts timestamp time index, val double) with ("index.granularity" = "8192", "index.false_positive_rate" = "0.01");
|
||||
```
|
||||
|
||||
And
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/3.cleanup/index-option/granularity_and_false_positive_rate.sql
|
||||
drop table granularity_and_false_positive_rate;
|
||||
```
|
||||
|
||||
Since this new feature don't require some special way to verify the database behavior, we can reuse existing test cases in `2.verify/` to verify the database behavior. For example, we can reuse the `verify-metadata` test case to verify the metadata of the table.
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/2.verify/verify-metadata/show-create-table.sql
|
||||
|
||||
-- SQLNESS TEMPLATE TABLE="SHOW TABLES";
|
||||
SHOW CREATE TABLE $TABLE;
|
||||
```
|
||||
|
||||
In this example, we use some new sqlness features that will be introduced in the next section (`since`, `IGNORE_RESULT`, `TEMPLATE`).
|
||||
|
||||
### Maintenance
|
||||
|
||||
Each time implement a new feature that should be covered by the compatibility test, we should create a new test case in `1.feature/` and `3.cleanup/` for them. And check if existing cases in `2.verify/` can be reused to verify the database behavior.
|
||||
|
||||
This simulates an enthusiastic user who uses all the new features at the first time. All the new Maintenance burden is on the feature implementer to write one more test case for the new feature, to "fixation" the behavior. And once there is a breaking change in the future, it can be detected by the compatibility test framework automatically.
|
||||
|
||||
Another topic is about deprecation. If a feature is deprecated, we should also mark it in the test case. Still use above example, assume we deprecate the `index.granularity` and `index.false_positive_rate` index options in `v0.99.0`, we can mark them as:
|
||||
```sql
|
||||
-- SQLNESS ARG since=0.15.0 till=0.99.0
|
||||
...
|
||||
```
|
||||
|
||||
This tells the framework to ignore this feature in version `v0.99.0` and later. Currently, we have so many experimental features that are scheduled to be broken in the future, this is a good way to mark them.
|
||||
|
||||
## Test Framework
|
||||
|
||||
This section is about new sqlness features required by this framework.
|
||||
|
||||
### Since and Till
|
||||
|
||||
Follows the `ARG` interceptor in sqlness, we can mark a feature is available between two given versions. Only the `since` is required:
|
||||
|
||||
```sql
|
||||
-- SQLNESS ARG since=VERSION_STRING [till=VERSION_STRING]
|
||||
```
|
||||
|
||||
### IGNORE_RESULT
|
||||
|
||||
`IGNORE_RESULT` is a new interceptor, it tells the runner to ignore the result of the query, only check whether the query is executed successfully.
|
||||
|
||||
This is useful to reduce the Maintenance burden of the test cases, unlike the integration sqlness test, in most cases we don't care about the result of the query, only need to make sure the query is executed successfully.
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` is another new interceptor, it can generate queries from a template based on a runtime data.
|
||||
|
||||
In above example, we need to run the `SHOW CREATE TABLE` query for all existing tables, so we can use the `TEMPLATE` interceptor to generate the query with a dynamic table list.
|
||||
|
||||
### RUNNER
|
||||
|
||||
There are also some extra requirement for the runner itself:
|
||||
|
||||
- It should run the test cases in sequence, first `1.feature/`, then `2.verify/`, and finally `3.cleanup/`.
|
||||
- It should be able to fetch required version automatically to finish the test.
|
||||
- It should handle the `since` and `till` properly.
|
||||
|
||||
On the `1.feature` phase, the runner needs to identify all features need to be tested by version number. And then restart with a new version (the `to` version) to run `2.verify/` and `3.cleanup/` phase.
|
||||
|
||||
## Test Report
|
||||
|
||||
Finally, we can run the compatibility test to verify the compatibility between any given two versions of GreptimeDB, for example:
|
||||
|
||||
```bash
|
||||
# check backward compatibility between v0.15.0 and v0.16.0 when releasing v0.16.0
|
||||
./sqlness run --from=0.15.0 --to=0.16.0
|
||||
|
||||
# check forward compatibility when downgrading from v0.15.0 to v0.13.0
|
||||
./sqlness run --from=0.15.0 --to=0.13.0
|
||||
```
|
||||
|
||||
We can also use a script to run the compatibility test for all the versions in a given range to give a quick report with all versions we need.
|
||||
|
||||
And we always bump the version in `Cargo.toml` to the next major release version, so the next major release version can be used as "latest" unpublished version for scenarios like local testing.
|
||||
|
||||
# Alternatives
|
||||
|
||||
There was a previous attempt to implement a compatibility test framework that was disabled due to some reasons [#3728](https://github.com/GreptimeTeam/greptimedb/issues/3728).
|
||||
157
docs/rfcs/2025-07-23-global-gc-worker.md
Normal file
157
docs/rfcs/2025-07-23-global-gc-worker.md
Normal file
@@ -0,0 +1,157 @@
|
||||
---
|
||||
Feature Name: "global-gc-worker"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/6571
|
||||
Date: 2025-07-23
|
||||
Author: "discord9 <discord9@163.com>"
|
||||
---
|
||||
|
||||
# Global GC Worker
|
||||
|
||||
## Summary
|
||||
|
||||
This RFC proposes the integration of a garbage collection (GC) mechanism within the Compaction process. This mechanism aims to manage and remove stale files that are no longer actively used by any system component, thereby reclaiming storage space.
|
||||
|
||||
## Motivation
|
||||
|
||||
With the introduction of features such as table repartitioning, a substantial number of Parquet files can become obsolete. Furthermore, failures during manifest updates may result in orphaned files that are never referenced by the system. Therefore, a periodic garbage collection mechanism is essential to reclaim storage space by systematically removing these unused files.
|
||||
|
||||
## Details
|
||||
|
||||
### Overview
|
||||
|
||||
The garbage collection process will be integrated directly into the Compaction process. Upon the completion of a Compaction for a given region, the GC worker will be automatically triggered. Its primary function will be to identify and subsequently delete obsolete files that have persisted beyond their designated retention period. This integration ensures that garbage collection is performed in close conjunction with data lifecycle management, effectively leveraging the compaction process's inherent knowledge of file states.
|
||||
|
||||
This design prioritizes correctness and safety by explicitly linking GC execution to a well-defined operational boundary: the successful completion of a compaction cycle.
|
||||
|
||||
### Terminology
|
||||
|
||||
- **Unused File**: Refers to a file present in the storage directory that has never been formally recorded in any manifest. A common scenario for this includes cases where a new SST file is successfully written to storage, but the subsequent update to the manifest fails, leaving the file unreferenced.
|
||||
- **Obsolete File**: Denotes a file that was previously recorded in a manifest but has since been explicitly marked for removal. This typically occurs following operations such as data repartitioning or compaction.
|
||||
|
||||
### GC Worker Process
|
||||
|
||||
The GC worker operates as an integral part of the Compaction process. Once a Compaction for a specific region is completed, the GC worker is automatically triggered. Executing this process on a `datanode` is preferred to eliminate the overhead associated with having to set object storage configurations in the `metasrv`.
|
||||
|
||||
The detailed process is as follows:
|
||||
|
||||
1. **Invocation**: Upon the successful completion of a Compaction for a region, the GC worker is invoked.
|
||||
2. **Manifest Reading**: The worker reads the region's primary manifest to obtain a comprehensive list of all files marked as obsolete. Concurrently, it reads any temporary manifests generated by long-running queries to identify files that are currently in active use, thereby preventing their premature deletion.
|
||||
3. **Lingering Time Check (Obsolete Files)**: For each identified obsolete file, the GC worker evaluates its "lingering time." Which is the time passed after it had been removed from manifest.
|
||||
4. **Deletion Marking (Obsolete Files)**: Files that have exceeded their maximum configurable lingering time and are not referenced by any active temporary manifests are marked for deletion.
|
||||
5. **Lingering Time (Unused Files)**: Unused files (those never recorded in any manifest) are also subject to a configurable maximum lingering time before they are eligible for deletion.
|
||||
|
||||
Following flowchart illustrates the GC worker's process:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Compaction Completed] --> B[Trigger GC Worker]
|
||||
B --> C[Scan Region Manifest]
|
||||
C --> D[Identify File Types]
|
||||
D --> E[Unused Files<br/>Never recorded in manifest]
|
||||
D --> F[Obsolete Files<br/>Previously in manifest<br/>but marked for removal]
|
||||
E --> G[Check Lingering Time]
|
||||
F --> G
|
||||
G --> H{File exceeds<br/>configured lingering time?}
|
||||
H -->|No| I[Skip deletion]
|
||||
H -->|Yes| J[Check Temporary Manifest]
|
||||
J --> K{File in use by<br/>active queries?}
|
||||
K -->|Yes| L[Retain file<br/>Wait for next GC cycle]
|
||||
K -->|No| M[Safely delete file]
|
||||
I --> N[End GC cycle]
|
||||
L --> N
|
||||
M --> O[Update Manifest]
|
||||
O --> N
|
||||
N --> P[Wait for next Compaction]
|
||||
P --> A
|
||||
style A fill:#e1f5fe
|
||||
style B fill:#f3e5f5
|
||||
style M fill:#e8f5e8
|
||||
style L fill:#fff3e0
|
||||
```
|
||||
|
||||
#### Handling Obsolete Files
|
||||
|
||||
An obsolete file is permanently deleted only if two conditions are met:
|
||||
1. The time elapsed since its removal from the manifest (its obsolescence timestamp) exceeds a configurable threshold.
|
||||
2. It is not currently referenced by any active temporary manifests.
|
||||
|
||||
|
||||
#### Handling Unused Files
|
||||
|
||||
With the integration of the GC worker into the Compaction process, the risk of accidentally deleting newly created SST files that have not yet been recorded in the manifest is significantly mitigated. Consequently, the concept of "Unused Files" as a distinct category primarily susceptible to accidental deletion is largely resolved. Any files that are genuinely "unused" (i.e., never referenced by any manifest, including temporary ones) can be safely deleted after a configurable maximum lingering time.
|
||||
|
||||
For debugging and auditing purposes, a comprehensive list of recently deleted files can be maintained.
|
||||
|
||||
### Ensuring Read Consistency
|
||||
|
||||
To prevent the GC worker from inadvertently deleting files that are actively being utilized by long-running analytical queries, a robust protection mechanism is introduced. This mechanism relies on temporary manifests that are actively kept "alive" by the queries using them.
|
||||
|
||||
When a long-running query is detected (e.g., by a slow query recorder), it will write a temporary manifest to the region's manifest directory. This manifest lists all files required for the query. However, simply creating this file is not enough, as a query runner might crash, leaving the temporary manifest orphaned and preventing garbage collection indefinitely.
|
||||
|
||||
To address this, the following "heartbeat" mechanism is implemented:
|
||||
1. **Periodic Updates**: The process executing the long-running query is responsible for periodically updating the modification timestamp of its temporary manifest file (i.e., "touching" the file). This serves as a heartbeat, signaling that the query is still active.
|
||||
2. **GC Worker Verification**: When the GC worker runs, it scans for temporary manifests. For each one it finds, it checks the file's last modification time.
|
||||
3. **Stale File Handling**: If a temporary manifest's last modification time is older than a configurable threshold, the GC worker considers it stale (left over from a crashed or terminated query). The GC worker will then delete this stale temporary manifest. Files that were protected only by this stale manifest are no longer shielded from garbage collection.
|
||||
|
||||
This approach ensures that only files for genuinely active queries are protected. The lifecycle of the temporary manifest is managed dynamically: it is created when a long query starts, kept alive through periodic updates, and is either deleted by the query upon normal completion or automatically cleaned up by the GC worker if the query terminates unexpectedly.
|
||||
|
||||
This mechanism may be too complex to implement at once. We can consider a two-phased approach:
|
||||
1. **Phase 1 (Simple Time-Based Deletion)**: Initially, implement a simpler GC strategy that deletes obsolete files based solely on a configurable lingering time. This provides a baseline for space reclamation without the complexity of temporary manifests.
|
||||
2. **Phase 2 (Consistency-Aware GC)**: Based on the practical effectiveness and observed issues from Phase 1, we can then decide whether to implement the full temporary manifest and heartbeat mechanism to handle long-running queries. This iterative approach allows for a quicker initial implementation while gathering real-world data to justify the need for a more complex solution.
|
||||
|
||||
## Drawbacks
|
||||
|
||||
- **Dependency on Compaction Frequency**: The integration of the GC worker with Compaction means that GC cycles are directly tied to the frequency of compactions. In environments with infrequent compaction operations, obsolete files may accumulate for extended periods before being reclaimed, potentially leading to increased storage consumption.
|
||||
- **Race Condition with Long-Running Queries**: A potential race condition exists if a long-running query initiates but haven't write its temporary manifest in time, while a compaction process simultaneously begins and marks files used by that query as obsolete. This scenario could lead to the premature deletion of files still required by the active query. To mitigate this, the threshold time for writing a temporary manifest should be significantly shorter than the lingering time configured for obsolete files, ensuring that next GC worker runs do not delete files that are now referenced by a temporary manifest if the query is still running.
|
||||
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
|
||||
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
|
||||
|
||||
|
||||
## Conclusion and Rationale
|
||||
|
||||
This section summarizes the key aspects and trade-offs of the proposed integrated GC worker, highlighting its advantages and potential challenges.
|
||||
|
||||
| Aspect | Current Proposal (Integrated GC) |
|
||||
| :--- | :--- |
|
||||
| **Implementation Complexity** | **Medium**. Requires careful integration with the compaction process and the slow query recorder for temporary manifest management. |
|
||||
| **Reliability** | **High**. Integration with compaction and leveraging temporary manifests from long-running queries significantly mitigates the risk of incorrect deletion. Accurate management of lingering times for obsolete files and prevention of accidental deletion of newly created SSTs enhance data safety. |
|
||||
| **Performance Overhead** | **Low to Medium**. The GC worker runs post-compaction, minimizing direct impact on write paths. Overhead from temporary manifest management by the slow query recorder is expected to be acceptable for long-running queries. |
|
||||
| **Impact on Other Components** | **Moderate**. Requires modifications to the compaction process to trigger GC and the slow query recorder to manage temporary manifests. This introduces some coupling but enhances overall data safety. |
|
||||
| **Deletion Strategy** | **State- and Time-Based**. Obsolete files are deleted based on a configurable lingering time, which is paused if the file is referenced by a temporary manifest. Unused files (never in a manifest) are also subject to a lingering time. |
|
||||
|
||||
## Unresolved Questions and Future Work
|
||||
|
||||
This section outlines key areas requiring further discussion and defines potential avenues for future development.
|
||||
|
||||
* **Slow Query Recorder Implementation**: Detailed specifications for modify slow query recorder's implementation and its precise interaction mechanisms with temporary manifests are needed.
|
||||
* **Configurable Lingering Times**: Establish and make configurable the specific lingering times for both obsolete and unused files to optimize storage reclamation and data availability.
|
||||
|
||||
## Alternatives
|
||||
|
||||
### 1. Standalone GC Service
|
||||
|
||||
Instead of integrating the GC worker directly into the Compaction process, a standalone GC service could be implemented. This service would operate independently, periodically scanning the storage for obsolete and unused files based on manifest information and predefined retention policies.
|
||||
|
||||
**Pros:**
|
||||
* **Decoupling**: Separates GC logic from compaction, allowing independent scaling and deployment.
|
||||
* **Flexibility**: Can be configured to run at different frequencies and with different strategies than compaction.
|
||||
|
||||
**Cons:**
|
||||
* **Increased Complexity**: Requires a separate service to manage, monitor, and coordinate with other components.
|
||||
* **Potential for Redundancy**: May duplicate some file scanning logic already present in compaction.
|
||||
* **Consistency Challenges**: Ensuring read consistency would require more complex coordination mechanisms between the standalone GC service and active queries, potentially involving a distributed lock manager or a more sophisticated temporary manifest system.
|
||||
|
||||
This alternative could be implemented in the future if the integrated GC worker proves insufficient or if there is a need for more advanced GC strategies.
|
||||
|
||||
### 2. Manifest-Driven Deletion (No Lingering Time)
|
||||
|
||||
This alternative would involve immediate deletion of files once they are removed from the manifest, without a lingering time.
|
||||
|
||||
**Pros:**
|
||||
* **Simplicity**: Simplifies the GC logic by removing the need for lingering time management.
|
||||
* **Immediate Space Reclamation**: Storage space is reclaimed as soon as files are marked for deletion.
|
||||
|
||||
**Cons:**
|
||||
* **Increased Risk of Data Loss**: Higher risk of deleting files still in use by long-running queries or other processes if not perfectly synchronized.
|
||||
* **Complex Read Consistency**: Requires extremely robust and immediate mechanisms to ensure that no active queries are referencing files marked for deletion, potentially leading to performance bottlenecks or complex error handling.
|
||||
* **Debugging Challenges**: Difficult to debug issues related to premature file deletion due to the immediate nature of the operation.
|
||||
112
docs/rfcs/2025-08-16-async-index-build.md
Normal file
112
docs/rfcs/2025-08-16-async-index-build.md
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
Feature Name: Async Index Build
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/6756
|
||||
Date: 2025-08-16
|
||||
Author: "SNC123 <sinhco@outlook.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes an asynchronous index build mechanism in the database, with a configuration option to choose between synchronous and asynchronous modes, aiming to improve flexibility and adapt to different workload requirements.
|
||||
|
||||
# Motivation
|
||||
Currently, index creation is performed synchronously, which may lead to prolonged write suspension and impact business continuity. As data volume grows, the time required for index building increases significantly. An asynchronous solution is urgently needed to enhance user experience and system throughput.
|
||||
|
||||
# Details
|
||||
|
||||
## Overview
|
||||
|
||||
The following table highlights the difference between async and sync index approach:
|
||||
|
||||
| Approach | Trigger | Data Source | Additional Index Metadata Installation | Fine-grained `FileMeta` Index |
|
||||
| :--- | :--- | :--- | :--- | :--- |
|
||||
| Sync Index | On `write_sst` | Memory (on flush) / Disk (on compact) | Not required(already installed synchronously) | Not required |
|
||||
| Async Index | 4 trigger types | Disk | Required | Required |
|
||||
|
||||
The index build mode (synchronous or asynchronous) can be selected via configuration file.
|
||||
|
||||
### Four Trigger Types
|
||||
|
||||
This RFC introduces four `IndexBuildType`s to trigger index building:
|
||||
|
||||
- **Manual Rebuild**: Triggered by the user via `ADMIN build_index("table_name")`, for scenarios like recovering from failed builds or migrating data. SST files whose `ColumnIndexMetadata` (see below) is already consistent with the `RegionMetadata` will be skipped.
|
||||
- **Schema Change**: Automatically triggered when the schema of an indexed column is altered.
|
||||
- **Flush**: Automatically builds indexes for new SST files created by a flush.
|
||||
- **Compact**: Automatically builds indexes for new SST files created by a compaction.
|
||||
|
||||
### Additional Index Metadata Installation
|
||||
|
||||
Previously, index information in the in-memory `FileMeta` was updated synchronously. The async approach requires an explicit installation step.
|
||||
|
||||
A race condition can occur when compaction and index building run concurrently, leading to:
|
||||
1. Building an index for a file that is about to be deleted by compaction.
|
||||
2. Creating an unnecessary index file and an incorrect manifest record.
|
||||
3. On restart, replaying the manifest could load metadata for a non-existent file.
|
||||
|
||||
To prevent this, the system checks if a file's `FileMeta` is in a `compacting` state before updating the manifest. If it is, the installation is aborted.
|
||||
|
||||
### Fine-grained `FileMeta` Index
|
||||
|
||||
The original `FileMeta` only stored file-level index information. However, manual rebuilds require column-level details to identify files inconsistent with the current DDL. Therefore, the `indexes` field in `FileMeta` is updated as follows:
|
||||
```rust
|
||||
struct FileMeta {
|
||||
...
|
||||
// From file-level:
|
||||
// available_indexes: SmallVec<[IndexType; 4]>
|
||||
// To column-level:
|
||||
indexes: Vec<ColumnIndexMetadata>,
|
||||
...
|
||||
}
|
||||
pub struct ColumnIndexMetadata {
|
||||
pub column_id: ColumnId,
|
||||
pub created_indexes: IndexTypes,
|
||||
}
|
||||
```
|
||||
|
||||
## Process
|
||||
|
||||
The index building process is similar to a flush and is illustrated below:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
Region0->>Region0: Triggered by one of 4 conditions, targets specific files
|
||||
loop For each target file
|
||||
Region0->>IndexBuildScheduler: Submits an index build task
|
||||
end
|
||||
IndexBuildScheduler->>IndexBuildTask: Executes the task
|
||||
IndexBuildTask->>Storage Interfaces: Reads SST data from disk
|
||||
IndexBuildTask->>IndexBuildTask: Builds the index file
|
||||
alt Index file size > 0
|
||||
IndexBuildTask->>Region0: Sends IndexBuildFinished notification
|
||||
end
|
||||
alt File exists in Version and is not compacting
|
||||
Region0->>Storage Interfaces: Updates manifest and Version
|
||||
end
|
||||
```
|
||||
|
||||
### Task Triggering and Scheduling
|
||||
|
||||
The process starts with one of the four `IndexBuildType` triggers. In `handle_rebuild_index`, the `RegionWorkerLoop` identifies target SSTs from the request or the current region version. It then creates an `IndexBuildTask` for each file and submits it to the `index_build_scheduler`.
|
||||
|
||||
Similar to Flush and Compact operations, index build tasks are ultimately dispatched to the LocalScheduler. Resource usage can be adjusted via configuration files. Since asynchronous index tasks are both memory-intensive and IO-intensive but have lower priority, it is recommended to allocate fewer resources to them compared to compaction and flush tasks—for example, limiting them to 1/8 of the CPU cores.
|
||||
|
||||
### Index Building and Notification
|
||||
|
||||
The scheduled `IndexBuildTask` executes its `index_build` method. It uses an `indexer_builder` to create an `Indexer` that reads SST data and builds the index. If a new index file is created (`IndexOutput.file_size > 0`), the task sends an `IndexBuildFinished` notification back to the `RegionWorkerLoop`.
|
||||
|
||||
### Index Metadata Installation
|
||||
|
||||
Upon receiving the `IndexBuildFinished` notification in `handle_index_build_finished`, the `RegionWorkerLoop` verifies that the file still exists in the current `version` and is not being compacted. If the check passes, it calls `manifest_ctx.update_manifest` to apply a `RegionEdit` with the new index information, completing the installation.
|
||||
|
||||
# Drawbacks
|
||||
|
||||
Asynchronous index building may consume extra system resources, potentially affecting overall performance during peak periods.
|
||||
|
||||
There may be a delay before the new index becomes available for queries, which could impact certain use cases.
|
||||
|
||||
# Unresolved Questions and Future Work
|
||||
|
||||
**Resource Management and Throttling**: The resource consumption (CPU, I/O) of background index building can be managed and limited to some extent by configuring a dedicated background thread pool. However, this approach cannot fully eliminate resource contention, especially under heavy workloads or when I/O is highly competitive. Additional throttling mechanisms or dynamic prioritization may still be necessary to avoid impacting foreground operations.
|
||||
|
||||
# Alternatives
|
||||
|
||||
Instead of being triggered by events like Flush or Compact, index building could be performed in batches during scheduled maintenance windows. This offers predictable resource usage but delays index availability.
|
||||
@@ -15,8 +15,6 @@
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
buildInputs = with pkgs; [
|
||||
libgit2
|
||||
libz
|
||||
];
|
||||
lib = nixpkgs.lib;
|
||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -72,6 +72,7 @@
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -86,6 +87,13 @@
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Remote WAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -102,6 +110,8 @@
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -180,13 +180,18 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -197,16 +202,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -217,16 +232,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -237,16 +262,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -257,6 +292,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -642,6 +682,15 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
@@ -753,6 +802,48 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Remote WAL
|
||||
panels:
|
||||
- title: Triggered region flush total
|
||||
type: timeseries
|
||||
description: Triggered region flush total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_flush_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Triggered region checkpoint total
|
||||
type: timeseries
|
||||
description: Triggered region checkpoint total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_checkpoint_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Topic estimated replay size
|
||||
type: timeseries
|
||||
description: Topic estimated max replay size
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: meta_topic_estimated_replay_size
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Kafka logstore's bytes traffic
|
||||
type: timeseries
|
||||
description: Kafka logstore's bytes traffic
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{logstore}}'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
@@ -899,6 +990,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Reconciliation stats
|
||||
type: timeseries
|
||||
description: Reconciliation stats
|
||||
unit: s
|
||||
queries:
|
||||
- expr: greptime_meta_reconciliation_stats
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||
- title: Reconciliation steps
|
||||
type: timeseries
|
||||
description: 'Elapsed of Reconciliation steps '
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -72,6 +72,7 @@
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -86,6 +87,13 @@
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Remote WAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -102,6 +110,8 @@
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -180,13 +180,18 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -197,16 +202,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -217,16 +232,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -237,16 +262,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -257,6 +292,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -642,6 +682,15 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
@@ -753,6 +802,48 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Remote WAL
|
||||
panels:
|
||||
- title: Triggered region flush total
|
||||
type: timeseries
|
||||
description: Triggered region flush total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_flush_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Triggered region checkpoint total
|
||||
type: timeseries
|
||||
description: Triggered region checkpoint total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_checkpoint_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Topic estimated replay size
|
||||
type: timeseries
|
||||
description: Topic estimated max replay size
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: meta_topic_estimated_replay_size
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Kafka logstore's bytes traffic
|
||||
type: timeseries
|
||||
description: Kafka logstore's bytes traffic
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{logstore}}'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
@@ -899,6 +990,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Reconciliation stats
|
||||
type: timeseries
|
||||
description: Reconciliation stats
|
||||
unit: s
|
||||
queries:
|
||||
- expr: greptime_meta_reconciliation_stats
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||
- title: Reconciliation steps
|
||||
type: timeseries
|
||||
description: 'Elapsed of Reconciliation steps '
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
@@ -26,7 +26,7 @@ check_dashboards_generation() {
|
||||
./grafana/scripts/gen-dashboards.sh
|
||||
|
||||
if [[ -n "$(git diff --name-only grafana/dashboards/metrics)" ]]; then
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the `make dashboards` command."
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the 'make dashboards' command."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
265
scripts/fix-udeps.py
Executable file
265
scripts/fix-udeps.py
Executable file
@@ -0,0 +1,265 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def load_udeps_report(report_path):
|
||||
try:
|
||||
with open(report_path, "r") as f:
|
||||
return json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: Report file '{report_path}' not found.")
|
||||
return None
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error: Invalid JSON in report file: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def extract_unused_dependencies(report):
|
||||
"""
|
||||
Extract and organize unused dependencies from the cargo-udeps JSON report.
|
||||
|
||||
The cargo-udeps report has this structure:
|
||||
{
|
||||
"unused_deps": {
|
||||
"package_name v0.1.0 (/path/to/package)": {
|
||||
"normal": ["dep1", "dep2"],
|
||||
"development": ["dev_dep1"],
|
||||
"build": ["build_dep1"],
|
||||
"manifest_path": "/path/to/Cargo.toml"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Args:
|
||||
report (dict): The parsed JSON report from cargo-udeps
|
||||
|
||||
Returns:
|
||||
dict: Organized unused dependencies by package name:
|
||||
{
|
||||
"package_name": {
|
||||
"dependencies": [("dep1", "normal"), ("dev_dep1", "dev")],
|
||||
"manifest_path": "/path/to/Cargo.toml"
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not report or "unused_deps" not in report:
|
||||
return {}
|
||||
|
||||
unused_deps = {}
|
||||
for package_full_name, deps_info in report["unused_deps"].items():
|
||||
package_name = package_full_name.split(" ")[0]
|
||||
|
||||
all_unused = []
|
||||
if deps_info.get("normal"):
|
||||
all_unused.extend([(dep, "normal") for dep in deps_info["normal"]])
|
||||
if deps_info.get("development"):
|
||||
all_unused.extend([(dep, "dev") for dep in deps_info["development"]])
|
||||
if deps_info.get("build"):
|
||||
all_unused.extend([(dep, "build") for dep in deps_info["build"]])
|
||||
|
||||
if all_unused:
|
||||
unused_deps[package_name] = {
|
||||
"dependencies": all_unused,
|
||||
"manifest_path": deps_info.get("manifest_path", "unknown"),
|
||||
}
|
||||
|
||||
return unused_deps
|
||||
|
||||
|
||||
def get_section_pattern(dep_type):
|
||||
"""
|
||||
Get regex patterns to identify different dependency sections in Cargo.toml.
|
||||
|
||||
Args:
|
||||
dep_type (str): Type of dependency ("normal", "dev", or "build")
|
||||
|
||||
Returns:
|
||||
list: List of regex patterns to match the appropriate section headers
|
||||
|
||||
"""
|
||||
patterns = {
|
||||
"normal": [r"\[dependencies\]", r"\[dependencies\..*?\]"],
|
||||
"dev": [r"\[dev-dependencies\]", r"\[dev-dependencies\..*?\]"],
|
||||
"build": [r"\[build-dependencies\]", r"\[build-dependencies\..*?\]"],
|
||||
}
|
||||
return patterns.get(dep_type, [])
|
||||
|
||||
|
||||
def remove_dependency_line(content, dep_name, section_start, section_end):
|
||||
"""
|
||||
Remove a dependency line from a specific section of a Cargo.toml file.
|
||||
|
||||
Args:
|
||||
content (str): The entire content of the Cargo.toml file
|
||||
dep_name (str): Name of the dependency to remove (e.g., "serde", "tokio")
|
||||
section_start (int): Starting position of the section in the content
|
||||
section_end (int): Ending position of the section in the content
|
||||
|
||||
Returns:
|
||||
tuple: (new_content, removed) where:
|
||||
- new_content (str): The modified content with dependency removed
|
||||
- removed (bool): True if dependency was found and removed, False otherwise
|
||||
|
||||
Example input content format:
|
||||
content = '''
|
||||
[package]
|
||||
name = "my-crate"
|
||||
version = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
serde_json.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.0"
|
||||
'''
|
||||
|
||||
# If dep_name = "serde", section_start = start of [dependencies],
|
||||
# section_end = start of [dev-dependencies], this function will:
|
||||
# 1. Extract the section: "serde = "1.0"\ntokio = { version = "1.0", features = ["full"] }\nserde_json.workspace = true\n"
|
||||
# 2. Find and remove the line: "serde = "1.0""
|
||||
# 3. Return the modified content with that line removed
|
||||
"""
|
||||
section_content = content[section_start:section_end]
|
||||
|
||||
dep_patterns = [
|
||||
rf"^{re.escape(dep_name)}\s*=.*$", # e.g., "serde = "1.0""
|
||||
rf"^{re.escape(dep_name)}\.workspace\s*=.*$", # e.g., "serde_json.workspace = true"
|
||||
]
|
||||
|
||||
for pattern in dep_patterns:
|
||||
match = re.search(pattern, section_content, re.MULTILINE)
|
||||
if match:
|
||||
line_start = section_start + match.start() # Start of the matched line
|
||||
line_end = section_start + match.end() # End of the matched line
|
||||
|
||||
if line_end < len(content) and content[line_end] == "\n":
|
||||
line_end += 1
|
||||
|
||||
return content[:line_start] + content[line_end:], True
|
||||
|
||||
return content, False
|
||||
|
||||
|
||||
def remove_dependency_from_toml(file_path, dep_name, dep_type):
|
||||
"""
|
||||
Remove a specific dependency from a Cargo.toml file.
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the Cargo.toml file
|
||||
dep_name (str): Name of the dependency to remove
|
||||
dep_type (str): Type of dependency ("normal", "dev", or "build")
|
||||
|
||||
Returns:
|
||||
bool: True if dependency was successfully removed, False otherwise
|
||||
"""
|
||||
try:
|
||||
with open(file_path, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
section_patterns = get_section_pattern(dep_type)
|
||||
if not section_patterns:
|
||||
return False
|
||||
|
||||
for pattern in section_patterns:
|
||||
section_match = re.search(pattern, content, re.IGNORECASE)
|
||||
if not section_match:
|
||||
continue
|
||||
|
||||
section_start = section_match.end()
|
||||
next_section = re.search(r"\n\s*\[", content[section_start:])
|
||||
section_end = (
|
||||
section_start + next_section.start() if next_section else len(content)
|
||||
)
|
||||
|
||||
new_content, removed = remove_dependency_line(
|
||||
content, dep_name, section_start, section_end
|
||||
)
|
||||
if removed:
|
||||
with open(file_path, "w") as f:
|
||||
f.write(new_content)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing {file_path}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def process_unused_dependencies(unused_deps):
|
||||
"""
|
||||
Process and remove all unused dependencies from their respective Cargo.toml files.
|
||||
|
||||
Args:
|
||||
unused_deps (dict): Dictionary of unused dependencies organized by package:
|
||||
{
|
||||
"package_name": {
|
||||
"dependencies": [("dep1", "normal"), ("dev_dep1", "dev")],
|
||||
"manifest_path": "/path/to/Cargo.toml"
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
if not unused_deps:
|
||||
print("No unused dependencies found.")
|
||||
return
|
||||
|
||||
total_removed = 0
|
||||
total_failed = 0
|
||||
|
||||
for package, info in unused_deps.items():
|
||||
deps = info["dependencies"]
|
||||
manifest_path = info["manifest_path"]
|
||||
|
||||
if not os.path.exists(manifest_path):
|
||||
print(f"Manifest file not found: {manifest_path}")
|
||||
total_failed += len(deps)
|
||||
continue
|
||||
|
||||
for dep, dep_type in deps:
|
||||
if remove_dependency_from_toml(manifest_path, dep, dep_type):
|
||||
print(f"Removed {dep} from {package}")
|
||||
total_removed += 1
|
||||
else:
|
||||
print(f"Failed to remove {dep} from {package}")
|
||||
total_failed += 1
|
||||
|
||||
print(f"Removed {total_removed} dependencies")
|
||||
if total_failed > 0:
|
||||
print(f"Failed to remove {total_failed} dependencies")
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) > 1:
|
||||
report_path = sys.argv[1]
|
||||
else:
|
||||
report_path = "udeps-report.json"
|
||||
|
||||
report = load_udeps_report(report_path)
|
||||
if report is None:
|
||||
sys.exit(1)
|
||||
|
||||
unused_deps = extract_unused_dependencies(report)
|
||||
process_unused_dependencies(unused_deps)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
71
scripts/generate-etcd-tls-certs.sh
Executable file
71
scripts/generate-etcd-tls-certs.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Generate TLS certificates for etcd testing
|
||||
# This script creates certificates for TLS-enabled etcd in testing environments
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
CERT_DIR="${1:-$(dirname "$0")/../tests-integration/fixtures/etcd-tls-certs}"
|
||||
DAYS="${2:-365}"
|
||||
|
||||
echo "Generating TLS certificates for etcd in ${CERT_DIR}..."
|
||||
|
||||
mkdir -p "${CERT_DIR}"
|
||||
cd "${CERT_DIR}"
|
||||
|
||||
echo "Generating CA private key..."
|
||||
openssl genrsa -out ca-key.pem 2048
|
||||
|
||||
echo "Generating CA certificate..."
|
||||
openssl req -new -x509 -key ca-key.pem -out ca.crt -days "${DAYS}" \
|
||||
-subj "/C=US/ST=CA/L=SF/O=Greptime/CN=etcd-ca"
|
||||
|
||||
# Create server certificate config with Subject Alternative Names
|
||||
echo "Creating server certificate configuration..."
|
||||
cat > server.conf << 'EOF'
|
||||
[req]
|
||||
distinguished_name = req
|
||||
[v3_req]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = localhost
|
||||
DNS.2 = etcd-tls
|
||||
DNS.3 = 127.0.0.1
|
||||
IP.1 = 127.0.0.1
|
||||
IP.2 = ::1
|
||||
EOF
|
||||
|
||||
echo "Generating server private key..."
|
||||
openssl genrsa -out server-key.pem 2048
|
||||
|
||||
echo "Generating server certificate signing request..."
|
||||
openssl req -new -key server-key.pem -out server.csr \
|
||||
-subj "/CN=etcd-tls"
|
||||
|
||||
echo "Generating server certificate..."
|
||||
openssl x509 -req -in server.csr -CA ca.crt \
|
||||
-CAkey ca-key.pem -CAcreateserial -out server.crt \
|
||||
-days "${DAYS}" -extensions v3_req -extfile server.conf
|
||||
|
||||
echo "Generating client private key..."
|
||||
openssl genrsa -out client-key.pem 2048
|
||||
|
||||
echo "Generating client certificate signing request..."
|
||||
openssl req -new -key client-key.pem -out client.csr \
|
||||
-subj "/CN=etcd-client"
|
||||
|
||||
echo "Generating client certificate..."
|
||||
openssl x509 -req -in client.csr -CA ca.crt \
|
||||
-CAkey ca-key.pem -CAcreateserial -out client.crt \
|
||||
-days "${DAYS}"
|
||||
|
||||
echo "Setting proper file permissions..."
|
||||
chmod 644 ca.crt server.crt client.crt
|
||||
chmod 600 ca-key.pem server-key.pem client-key.pem
|
||||
|
||||
# Clean up intermediate files
|
||||
rm -f server.csr client.csr server.conf
|
||||
|
||||
echo "TLS certificates generated successfully in ${CERT_DIR}"
|
||||
@@ -19,6 +19,3 @@ paste.workspace = true
|
||||
prost.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.11"
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
@@ -66,12 +67,28 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid time unit: {time_unit}"))]
|
||||
InvalidTimeUnit {
|
||||
time_unit: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Inconsistent time unit: {:?}", units))]
|
||||
InconsistentTimeUnit {
|
||||
units: Vec<TimeUnit>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::UnknownColumnDataType { .. }
|
||||
| Error::InvalidTimeUnit { .. }
|
||||
| Error::InconsistentTimeUnit { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
@@ -46,7 +47,7 @@ use greptime_proto::v1::{
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, InconsistentTimeUnitSnafu, InvalidTimeUnitSnafu, Result};
|
||||
use crate::v1::column::Values;
|
||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||
|
||||
@@ -1079,6 +1080,89 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_unit(unit: v1::TimeUnit) -> TimeUnit {
|
||||
match unit {
|
||||
v1::TimeUnit::Second => TimeUnit::Second,
|
||||
v1::TimeUnit::Millisecond => TimeUnit::Millisecond,
|
||||
v1::TimeUnit::Microsecond => TimeUnit::Microsecond,
|
||||
v1::TimeUnit::Nanosecond => TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_pb_time_unit(unit: TimeUnit) -> v1::TimeUnit {
|
||||
match unit {
|
||||
TimeUnit::Second => v1::TimeUnit::Second,
|
||||
TimeUnit::Millisecond => v1::TimeUnit::Millisecond,
|
||||
TimeUnit::Microsecond => v1::TimeUnit::Microsecond,
|
||||
TimeUnit::Nanosecond => v1::TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_ranges(time_ranges: v1::TimeRanges) -> Result<Vec<(Timestamp, Timestamp)>> {
|
||||
if time_ranges.time_ranges.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let proto_time_unit = v1::TimeUnit::try_from(time_ranges.time_unit).map_err(|_| {
|
||||
InvalidTimeUnitSnafu {
|
||||
time_unit: time_ranges.time_unit,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let time_unit = from_pb_time_unit(proto_time_unit);
|
||||
Ok(time_ranges
|
||||
.time_ranges
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
(
|
||||
Timestamp::new(r.start, time_unit),
|
||||
Timestamp::new(r.end, time_unit),
|
||||
)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// All time_ranges must be of the same time unit.
|
||||
///
|
||||
/// if input `time_ranges` is empty, it will return a default `TimeRanges` with `Millisecond` as the time unit.
|
||||
pub fn to_pb_time_ranges(time_ranges: &[(Timestamp, Timestamp)]) -> Result<v1::TimeRanges> {
|
||||
let is_same_time_unit = time_ranges.windows(2).all(|x| {
|
||||
x[0].0.unit() == x[1].0.unit()
|
||||
&& x[0].1.unit() == x[1].1.unit()
|
||||
&& x[0].0.unit() == x[0].1.unit()
|
||||
});
|
||||
|
||||
if !is_same_time_unit {
|
||||
let all_time_units: Vec<_> = time_ranges
|
||||
.iter()
|
||||
.map(|(s, e)| [s.unit(), e.unit()])
|
||||
.clone()
|
||||
.flatten()
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
InconsistentTimeUnitSnafu {
|
||||
units: all_time_units,
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
|
||||
let mut pb_time_ranges = v1::TimeRanges {
|
||||
// default time unit is Millisecond
|
||||
time_unit: v1::TimeUnit::Millisecond as i32,
|
||||
time_ranges: Vec::with_capacity(time_ranges.len()),
|
||||
};
|
||||
if let Some((start, _end)) = time_ranges.first() {
|
||||
pb_time_ranges.time_unit = to_pb_time_unit(start.unit()) as i32;
|
||||
}
|
||||
for (start, end) in time_ranges {
|
||||
pb_time_ranges.time_ranges.push(v1::TimeRange {
|
||||
start: start.value(),
|
||||
end: end.value(),
|
||||
});
|
||||
}
|
||||
Ok(pb_time_ranges)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
pub mod column_def;
|
||||
|
||||
pub mod helper;
|
||||
|
||||
pub mod meta {
|
||||
pub use greptime_proto::v1::meta::*;
|
||||
}
|
||||
|
||||
65
src/api/src/v1/helper.rs
Normal file
65
src/api/src/v1/helper.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{ColumnDataType, ColumnSchema, Row, SemanticType, Value};
|
||||
|
||||
/// Create a time index [ColumnSchema] with column's name and datatype.
|
||||
/// Other fields are left default.
|
||||
/// Useful when you just want to create a simple [ColumnSchema] without providing much struct fields.
|
||||
pub fn time_index_column_schema(name: &str, datatype: ColumnDataType) -> ColumnSchema {
|
||||
ColumnSchema {
|
||||
column_name: name.to_string(),
|
||||
datatype: datatype as i32,
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a tag [ColumnSchema] with column's name and datatype.
|
||||
/// Other fields are left default.
|
||||
/// Useful when you just want to create a simple [ColumnSchema] without providing much struct fields.
|
||||
pub fn tag_column_schema(name: &str, datatype: ColumnDataType) -> ColumnSchema {
|
||||
ColumnSchema {
|
||||
column_name: name.to_string(),
|
||||
datatype: datatype as i32,
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a field [ColumnSchema] with column's name and datatype.
|
||||
/// Other fields are left default.
|
||||
/// Useful when you just want to create a simple [ColumnSchema] without providing much struct fields.
|
||||
pub fn field_column_schema(name: &str, datatype: ColumnDataType) -> ColumnSchema {
|
||||
ColumnSchema {
|
||||
column_name: name.to_string(),
|
||||
datatype: datatype as i32,
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a [Row] from [ValueData]s.
|
||||
/// Useful when you don't want to write much verbose codes.
|
||||
pub fn row(values: Vec<ValueData>) -> Row {
|
||||
Row {
|
||||
values: values
|
||||
.into_iter()
|
||||
.map(|x| Value {
|
||||
value_data: Some(x),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
}
|
||||
@@ -32,6 +32,7 @@ pub enum PermissionReq<'a> {
|
||||
PromStoreRead,
|
||||
Otlp,
|
||||
LogWrite,
|
||||
BulkInsert,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -21,6 +21,7 @@ bytes.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-event-recorder.workspace = true
|
||||
common-frontend.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
@@ -44,6 +45,8 @@ moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -16,8 +16,8 @@ use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use meta_client::MetaClientRef;
|
||||
|
||||
@@ -23,7 +23,8 @@ use common_catalog::consts::{
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
|
||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||
ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -41,8 +42,9 @@ use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::PartitionRules;
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -131,6 +133,8 @@ impl KvBackendCatalogManager {
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
|
||||
let mut phy_part_cols_not_in_logical_table = vec![];
|
||||
|
||||
// Remap partition key indices from physical table to logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
@@ -147,15 +151,30 @@ impl KvBackendCatalogManager {
|
||||
.get(physical_index)
|
||||
.and_then(|physical_column| {
|
||||
// Find the corresponding index in the logical table schema
|
||||
new_table_info
|
||||
let idx = new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_index_by_name(physical_column.name.as_str())
|
||||
.column_index_by_name(physical_column.name.as_str());
|
||||
if idx.is_none() {
|
||||
// not all part columns in physical table that are also in logical table
|
||||
phy_part_cols_not_in_logical_table
|
||||
.push(physical_column.name.clone());
|
||||
}
|
||||
|
||||
idx
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
let partition_rules = if !phy_part_cols_not_in_logical_table.is_empty() {
|
||||
Some(PartitionRules {
|
||||
extra_phy_cols_not_in_logical_table: phy_part_cols_not_in_logical_table,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let new_table = DistTable::table_partitioned(Arc::new(new_table_info), partition_rules);
|
||||
|
||||
return Ok(new_table);
|
||||
}
|
||||
@@ -325,6 +344,63 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, schema_name, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
|
||||
let table_cache: TableNameCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_name_cache",
|
||||
})?;
|
||||
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
// falldown to pg_catalog
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
let table_info_cache: TableInfoCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_info_cache",
|
||||
})?;
|
||||
table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
|
||||
@@ -25,7 +25,7 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -89,6 +89,23 @@ pub trait CatalogManager: Send + Sync {
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns the table id of provided table ident.
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
Ok(self
|
||||
.table(catalog, schema, table_name, query_ctx)
|
||||
.await?
|
||||
.map(|t| t.table_info().ident.table_id))
|
||||
}
|
||||
|
||||
/// Returns the table of provided id.
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>>;
|
||||
|
||||
/// Returns the tables by table ids.
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
@@ -38,7 +38,7 @@ use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, Regis
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
/// Simple in-memory list of catalogs used for tests.
|
||||
#[derive(Clone)]
|
||||
pub struct MemoryCatalogManager {
|
||||
/// Collection of catalogs containing schemas and ultimately Tables
|
||||
@@ -144,6 +144,18 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.flat_map(|(_, schema_entries)| schema_entries.values())
|
||||
.flat_map(|tables| tables.values())
|
||||
.find(|t| t.table_info().ident.table_id == table_id)
|
||||
.map(|t| t.table_info()))
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
|
||||
@@ -14,17 +14,24 @@
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant, UNIX_EPOCH};
|
||||
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_event_recorder::EventRecorderRef;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_telemetry::{debug, info, warn};
|
||||
use common_frontend::slow_query_event::SlowQueryEvent;
|
||||
use common_telemetry::logging::SlowQueriesRecordType;
|
||||
use common_telemetry::{debug, info, slow, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use rand::random;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error;
|
||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
||||
@@ -44,6 +51,23 @@ pub struct ProcessManager {
|
||||
frontend_selector: Option<MetaClientSelector>,
|
||||
}
|
||||
|
||||
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
|
||||
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryStatement {
|
||||
Sql(Statement),
|
||||
Promql(EvalStmt),
|
||||
}
|
||||
|
||||
impl Display for QueryStatement {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
||||
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessManager {
|
||||
/// Create a [ProcessManager] instance with server address and kv client.
|
||||
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
||||
@@ -67,6 +91,7 @@ impl ProcessManager {
|
||||
query: String,
|
||||
client: String,
|
||||
query_id: Option<ProcessId>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
) -> Ticket {
|
||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||
let process = ProcessInfo {
|
||||
@@ -93,6 +118,7 @@ impl ProcessManager {
|
||||
manager: self.clone(),
|
||||
id,
|
||||
cancellation_handle,
|
||||
_slow_query_timer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +249,9 @@ pub struct Ticket {
|
||||
pub(crate) manager: ProcessManagerRef,
|
||||
pub(crate) id: ProcessId,
|
||||
pub cancellation_handle: Arc<CancellationHandle>,
|
||||
|
||||
// Keep the handle of the slow query timer to ensure it will trigger the event recording when dropped.
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
}
|
||||
|
||||
impl Drop for Ticket {
|
||||
@@ -263,6 +292,114 @@ impl Debug for CancellableProcess {
|
||||
}
|
||||
}
|
||||
|
||||
/// SlowQueryTimer is used to log slow query when it's dropped.
|
||||
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
|
||||
pub struct SlowQueryTimer {
|
||||
start: Instant,
|
||||
stmt: QueryStatement,
|
||||
threshold: Duration,
|
||||
sample_ratio: f64,
|
||||
record_type: SlowQueriesRecordType,
|
||||
recorder: EventRecorderRef,
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
pub fn new(
|
||||
stmt: QueryStatement,
|
||||
threshold: Duration,
|
||||
sample_ratio: f64,
|
||||
record_type: SlowQueriesRecordType,
|
||||
recorder: EventRecorderRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
stmt,
|
||||
threshold,
|
||||
sample_ratio,
|
||||
record_type,
|
||||
recorder,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
fn send_slow_query_event(&self, elapsed: Duration) {
|
||||
let mut slow_query_event = SlowQueryEvent {
|
||||
cost: elapsed.as_millis() as u64,
|
||||
threshold: self.threshold.as_millis() as u64,
|
||||
query: "".to_string(),
|
||||
|
||||
// The following fields are only used for PromQL queries.
|
||||
is_promql: false,
|
||||
promql_range: None,
|
||||
promql_step: None,
|
||||
promql_start: None,
|
||||
promql_end: None,
|
||||
};
|
||||
|
||||
match &self.stmt {
|
||||
QueryStatement::Promql(stmt) => {
|
||||
slow_query_event.is_promql = true;
|
||||
slow_query_event.query = stmt.expr.to_string();
|
||||
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
||||
|
||||
let start = stmt
|
||||
.start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
let end = stmt
|
||||
.end
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
slow_query_event.promql_range = Some((end - start) as u64);
|
||||
slow_query_event.promql_start = Some(start);
|
||||
slow_query_event.promql_end = Some(end);
|
||||
}
|
||||
QueryStatement::Sql(stmt) => {
|
||||
slow_query_event.query = stmt.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
match self.record_type {
|
||||
// Send the slow query event to the event recorder to persist it as the system table.
|
||||
SlowQueriesRecordType::SystemTable => {
|
||||
self.recorder.record(Box::new(slow_query_event));
|
||||
}
|
||||
// Record the slow query in a specific logs file.
|
||||
SlowQueriesRecordType::Log => {
|
||||
slow!(
|
||||
cost = slow_query_event.cost,
|
||||
threshold = slow_query_event.threshold,
|
||||
query = slow_query_event.query,
|
||||
is_promql = slow_query_event.is_promql,
|
||||
promql_range = slow_query_event.promql_range,
|
||||
promql_step = slow_query_event.promql_step,
|
||||
promql_start = slow_query_event.promql_start,
|
||||
promql_end = slow_query_event.promql_end,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SlowQueryTimer {
|
||||
fn drop(&mut self) {
|
||||
// Calculate the elaspsed duration since the timer is created.
|
||||
let elapsed = self.start.elapsed();
|
||||
if elapsed > self.threshold {
|
||||
// Only capture a portion of slow queries based on sample_ratio.
|
||||
// Generate a random number in [0, 1) and compare it with sample_ratio.
|
||||
if self.sample_ratio >= 1.0 || random::<f64>() <= self.sample_ratio {
|
||||
self.send_slow_query_event(elapsed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -278,6 +415,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(None).unwrap();
|
||||
@@ -301,6 +439,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
Some(custom_id),
|
||||
None,
|
||||
);
|
||||
|
||||
assert_eq!(ticket.id, custom_id);
|
||||
@@ -321,6 +460,7 @@ mod tests {
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let ticket2 = process_manager.clone().register_query(
|
||||
@@ -329,6 +469,7 @@ mod tests {
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
||||
@@ -350,6 +491,7 @@ mod tests {
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let _ticket2 = process_manager.clone().register_query(
|
||||
@@ -358,6 +500,7 @@ mod tests {
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Test listing processes for specific catalog
|
||||
@@ -384,6 +527,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
||||
process_manager.deregister_query("public".to_string(), ticket.id);
|
||||
@@ -400,6 +544,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
@@ -417,6 +562,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
let killed = process_manager
|
||||
@@ -462,6 +608,7 @@ mod tests {
|
||||
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
||||
"test_client".to_string(),
|
||||
Some(42),
|
||||
None,
|
||||
);
|
||||
|
||||
let processes = process_manager.local_processes(None).unwrap();
|
||||
@@ -488,6 +635,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Process should be registered
|
||||
|
||||
@@ -26,12 +26,11 @@ use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMicrosecond;
|
||||
use datatypes::timestamp::TimestampSecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
StringVectorBuilder, TimestampSecondVector, TimestampSecondVectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
@@ -129,17 +128,17 @@ impl InformationSchemaPartitions {
|
||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"create_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"update_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"check_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||
@@ -212,7 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
create_times: TimestampMicrosecondVectorBuilder,
|
||||
create_times: TimestampSecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
|
||||
@@ -232,7 +231,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
@@ -331,8 +330,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
self.create_times.push(Some(TimestampSecond::from(
|
||||
table_info.meta.created_on.timestamp(),
|
||||
)));
|
||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||
}
|
||||
@@ -349,8 +348,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
Arc::new(Int64Vector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
||||
let null_timestamp_second_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampSecondVector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let partition_methods = Arc::new(ConstantVector::new(
|
||||
@@ -380,8 +379,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_i64_vector.clone(),
|
||||
Arc::new(self.create_times.finish()),
|
||||
// TODO(dennis): supports update_time
|
||||
null_timestampmicrosecond_vector.clone(),
|
||||
null_timestampmicrosecond_vector,
|
||||
null_timestamp_second_vector.clone(),
|
||||
null_timestamp_second_vector,
|
||||
null_i64_vector,
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
|
||||
@@ -30,8 +30,7 @@ use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
StringVectorBuilder, TimestampMicrosecondVectorBuilder, UInt32VectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
StringVectorBuilder, TimestampSecondVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -107,17 +106,17 @@ impl InformationSchemaTables {
|
||||
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CREATE_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
UPDATE_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
CHECK_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
||||
@@ -194,9 +193,9 @@ struct InformationSchemaTablesBuilder {
|
||||
max_index_length: UInt64VectorBuilder,
|
||||
data_free: UInt64VectorBuilder,
|
||||
auto_increment: UInt64VectorBuilder,
|
||||
create_time: TimestampMicrosecondVectorBuilder,
|
||||
update_time: TimestampMicrosecondVectorBuilder,
|
||||
check_time: TimestampMicrosecondVectorBuilder,
|
||||
create_time: TimestampSecondVectorBuilder,
|
||||
update_time: TimestampSecondVectorBuilder,
|
||||
check_time: TimestampSecondVectorBuilder,
|
||||
table_collation: StringVectorBuilder,
|
||||
checksum: UInt64VectorBuilder,
|
||||
create_options: StringVectorBuilder,
|
||||
@@ -231,9 +230,9 @@ impl InformationSchemaTablesBuilder {
|
||||
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -380,7 +379,7 @@ impl InformationSchemaTablesBuilder {
|
||||
self.create_options
|
||||
.push(Some(table_info.meta.options.to_string().as_ref()));
|
||||
self.create_time
|
||||
.push(Some(table_info.meta.created_on.timestamp_millis().into()));
|
||||
.push(Some(table_info.meta.created_on.timestamp().into()));
|
||||
|
||||
self.temporary
|
||||
.push(if matches!(table_type, TableType::Temporary) {
|
||||
|
||||
@@ -133,7 +133,7 @@ impl Predicate {
|
||||
let Expr::Column(c) = *expr else {
|
||||
unreachable!();
|
||||
};
|
||||
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||
let Expr::Literal(ScalarValue::Utf8(Some(pattern)), _) = *pattern else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
@@ -148,8 +148,8 @@ impl Predicate {
|
||||
// left OP right
|
||||
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||
// left == right
|
||||
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => {
|
||||
(Expr::Literal(scalar, _), Operator::Eq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar, _)) => {
|
||||
let Ok(v) = Value::try_from(scalar) else {
|
||||
return None;
|
||||
};
|
||||
@@ -157,8 +157,8 @@ impl Predicate {
|
||||
Some(Predicate::Eq(c.name, v))
|
||||
}
|
||||
// left != right
|
||||
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => {
|
||||
(Expr::Literal(scalar, _), Operator::NotEq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar, _)) => {
|
||||
let Ok(v) = Value::try_from(scalar) else {
|
||||
return None;
|
||||
};
|
||||
@@ -189,7 +189,7 @@ impl Predicate {
|
||||
let mut values = Vec::with_capacity(list.len());
|
||||
for scalar in list {
|
||||
// Safety: checked by `is_all_scalars`
|
||||
let Expr::Literal(scalar) = scalar else {
|
||||
let Expr::Literal(scalar, _) = scalar else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
@@ -237,7 +237,7 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
|
||||
}
|
||||
|
||||
fn is_string_literal(expr: &Expr) -> bool {
|
||||
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_))))
|
||||
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_)), _))
|
||||
}
|
||||
|
||||
fn is_column(expr: &Expr) -> bool {
|
||||
@@ -286,14 +286,14 @@ impl Predicates {
|
||||
|
||||
/// Returns true when the values are all [`DfExpr::Literal`].
|
||||
fn is_all_scalars(list: &[Expr]) -> bool {
|
||||
list.iter().all(|v| matches!(v, Expr::Literal(_)))
|
||||
list.iter().all(|v| matches!(v, Expr::Literal(_, _)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datafusion::common::{Column, ScalarValue};
|
||||
use datafusion::common::Column;
|
||||
use datafusion::logical_expr::expr::InList;
|
||||
use datafusion::logical_expr::BinaryExpr;
|
||||
use datafusion::logical_expr::{BinaryExpr, Literal};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -378,7 +378,7 @@ mod tests {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: false,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
pattern: Box::new("%abc".lit()),
|
||||
case_insensitive: true,
|
||||
escape_char: None,
|
||||
});
|
||||
@@ -405,7 +405,7 @@ mod tests {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: false,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
pattern: Box::new("%abc".lit()),
|
||||
case_insensitive: false,
|
||||
escape_char: None,
|
||||
});
|
||||
@@ -425,7 +425,7 @@ mod tests {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: true,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
pattern: Box::new("%abc".lit()),
|
||||
case_insensitive: true,
|
||||
escape_char: None,
|
||||
});
|
||||
@@ -440,10 +440,6 @@ mod tests {
|
||||
Expr::Column(Column::from_name(name))
|
||||
}
|
||||
|
||||
fn string_literal(v: &str) -> Expr {
|
||||
Expr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||
}
|
||||
|
||||
fn match_string_value(v: &Value, expected: &str) -> bool {
|
||||
matches!(v, Value::String(bs) if bs.as_utf8() == expected)
|
||||
}
|
||||
@@ -463,13 +459,13 @@ mod tests {
|
||||
let expr1 = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(column("a")),
|
||||
op: Operator::Eq,
|
||||
right: Box::new(string_literal("a_value")),
|
||||
right: Box::new("a_value".lit()),
|
||||
});
|
||||
|
||||
let expr2 = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(column("b")),
|
||||
op: Operator::NotEq,
|
||||
right: Box::new(string_literal("b_value")),
|
||||
right: Box::new("b_value".lit()),
|
||||
});
|
||||
|
||||
(expr1, expr2)
|
||||
@@ -508,7 +504,7 @@ mod tests {
|
||||
|
||||
let inlist_expr = Expr::InList(InList {
|
||||
expr: Box::new(column("a")),
|
||||
list: vec![string_literal("a1"), string_literal("a2")],
|
||||
list: vec!["a1".lit(), "a2".lit()],
|
||||
negated: false,
|
||||
});
|
||||
|
||||
@@ -518,7 +514,7 @@ mod tests {
|
||||
|
||||
let inlist_expr = Expr::InList(InList {
|
||||
expr: Box::new(column("a")),
|
||||
list: vec![string_literal("a1"), string_literal("a2")],
|
||||
list: vec!["a1".lit(), "a2".lit()],
|
||||
negated: true,
|
||||
});
|
||||
let inlist_p = Predicate::from_expr(inlist_expr).unwrap();
|
||||
|
||||
@@ -32,7 +32,7 @@ use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
CastManagerSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
@@ -199,10 +199,10 @@ impl DfTableSourceProvider {
|
||||
logical_plan
|
||||
};
|
||||
|
||||
Ok(Arc::new(
|
||||
ViewTable::try_new(logical_plan, Some(view_info.definition.to_string()))
|
||||
.context(DatafusionSnafu)?,
|
||||
))
|
||||
Ok(Arc::new(ViewTable::new(
|
||||
logical_plan,
|
||||
Some(view_info.definition.to_string()),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -66,6 +66,9 @@ pub struct BenchTableMetadataCommand {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
postgres_schema: Option<String>,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
|
||||
@@ -19,8 +19,9 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use meta_srv::bootstrap::create_etcd_client;
|
||||
use meta_srv::bootstrap::create_etcd_client_with_tls;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
|
||||
use crate::error::{EmptyStoreAddrsSnafu, UnsupportedMemoryBackendSnafu};
|
||||
|
||||
@@ -55,9 +56,47 @@ pub(crate) struct StoreConfig {
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
#[clap(long, default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
|
||||
meta_table_name: String,
|
||||
|
||||
/// Optional PostgreSQL schema for metadata table (defaults to current search_path if unset).
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
meta_schema_name: Option<String>,
|
||||
/// TLS mode for backend store connections (etcd, PostgreSQL, MySQL)
|
||||
#[clap(long = "backend-tls-mode", value_enum, default_value = "disable")]
|
||||
backend_tls_mode: TlsMode,
|
||||
|
||||
/// Path to TLS certificate file for backend store connections
|
||||
#[clap(long = "backend-tls-cert-path", default_value = "")]
|
||||
backend_tls_cert_path: String,
|
||||
|
||||
/// Path to TLS private key file for backend store connections
|
||||
#[clap(long = "backend-tls-key-path", default_value = "")]
|
||||
backend_tls_key_path: String,
|
||||
|
||||
/// Path to TLS CA certificate file for backend store connections
|
||||
#[clap(long = "backend-tls-ca-cert-path", default_value = "")]
|
||||
backend_tls_ca_cert_path: String,
|
||||
|
||||
/// Enable watching TLS certificate files for changes
|
||||
#[clap(long = "backend-tls-watch")]
|
||||
backend_tls_watch: bool,
|
||||
}
|
||||
|
||||
impl StoreConfig {
|
||||
pub fn tls_config(&self) -> Option<TlsOption> {
|
||||
if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a [`KvBackendRef`] from the store configuration.
|
||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||
let max_txn_ops = self.max_txn_ops;
|
||||
@@ -67,7 +106,8 @@ impl StoreConfig {
|
||||
} else {
|
||||
let kvbackend = match self.backend {
|
||||
BackendImpl::EtcdStore => {
|
||||
let etcd_client = create_etcd_client(store_addrs)
|
||||
let tls_config = self.tls_config();
|
||||
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
||||
@@ -75,11 +115,14 @@ impl StoreConfig {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
BackendImpl::PostgresStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
|
||||
let tls_config = self.tls_config();
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, tls_config)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let schema_name = self.meta_schema_name.as_deref();
|
||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||
pool,
|
||||
schema_name,
|
||||
table_name,
|
||||
max_txn_ops,
|
||||
)
|
||||
|
||||
@@ -74,11 +74,19 @@ pub fn make_create_region_request_for_peer(
|
||||
let catalog = &create_table_expr.catalog_name;
|
||||
let schema = &create_table_expr.schema_name;
|
||||
let storage_path = region_storage_path(catalog, schema);
|
||||
let partition_exprs = region_routes
|
||||
.iter()
|
||||
.map(|r| (r.region.id.region_number(), r.region.partition_expr()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
||||
let region_request =
|
||||
request_builder.build_one(region_id, storage_path.clone(), &HashMap::new());
|
||||
let region_request = request_builder.build_one(
|
||||
region_id,
|
||||
storage_path.clone(),
|
||||
&HashMap::new(),
|
||||
&partition_exprs,
|
||||
);
|
||||
requests.push(region_request);
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ datatypes.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime.workspace = true
|
||||
lazy_static.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot.workspace = true
|
||||
@@ -38,6 +39,7 @@ query.workspace = true
|
||||
rand.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = { workspace = true, features = ["net"] }
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
|
||||
use common_meta::node_manager::{DatanodeManager, DatanodeRef, FlownodeManager, FlownodeRef};
|
||||
use common_meta::peer::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
@@ -45,7 +45,7 @@ impl Debug for NodeClients {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NodeManager for NodeClients {
|
||||
impl DatanodeManager for NodeClients {
|
||||
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||
let client = self.get_client(datanode).await;
|
||||
|
||||
@@ -60,7 +60,10 @@ impl NodeManager for NodeClients {
|
||||
*accept_compression,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl FlownodeManager for NodeClients {
|
||||
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
||||
let client = self.get_client(flownode).await;
|
||||
|
||||
|
||||
@@ -75,12 +75,24 @@ pub struct Database {
|
||||
}
|
||||
|
||||
pub struct DatabaseClient {
|
||||
pub addr: String,
|
||||
pub inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
impl DatabaseClient {
|
||||
/// Returns a closure that logs the error when the request fails.
|
||||
pub fn inspect_err<'a>(&'a self, context: &'a str) -> impl Fn(&tonic::Status) + 'a {
|
||||
let addr = &self.addr;
|
||||
move |status| {
|
||||
error!("Failed to {context} request, peer: {addr}, status: {status:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn make_database_client(client: &Client) -> Result<DatabaseClient> {
|
||||
let (_, channel) = client.find_channel()?;
|
||||
let (addr, channel) = client.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
addr,
|
||||
inner: GreptimeDatabaseClient::new(channel)
|
||||
.max_decoding_message_size(client.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(client.max_grpc_send_message_size()),
|
||||
@@ -167,14 +179,19 @@ impl Database {
|
||||
requests: InsertRequests,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut client = make_database_client(&self.client)?;
|
||||
let request = self.to_rpc_request(Request::Inserts(requests));
|
||||
|
||||
let mut request = tonic::Request::new(request);
|
||||
let metadata = request.metadata_mut();
|
||||
Self::put_hints(metadata, hints)?;
|
||||
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
let response = client
|
||||
.inner
|
||||
.handle(request)
|
||||
.await
|
||||
.inspect_err(client.inspect_err("insert_with_hints"))?
|
||||
.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
@@ -189,14 +206,19 @@ impl Database {
|
||||
requests: RowInsertRequests,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut client = make_database_client(&self.client)?;
|
||||
let request = self.to_rpc_request(Request::RowInserts(requests));
|
||||
|
||||
let mut request = tonic::Request::new(request);
|
||||
let metadata = request.metadata_mut();
|
||||
Self::put_hints(metadata, hints)?;
|
||||
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
let response = client
|
||||
.inner
|
||||
.handle(request)
|
||||
.await
|
||||
.inspect_err(client.inspect_err("row_inserts_with_hints"))?
|
||||
.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
@@ -217,9 +239,14 @@ impl Database {
|
||||
|
||||
/// Make a request to the database.
|
||||
pub async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut client = make_database_client(&self.client)?;
|
||||
let request = self.to_rpc_request(request);
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
let response = client
|
||||
.inner
|
||||
.handle(request)
|
||||
.await
|
||||
.inspect_err(client.inspect_err("handle"))?
|
||||
.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
@@ -231,7 +258,7 @@ impl Database {
|
||||
max_retries: u32,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut client = make_database_client(&self.client)?;
|
||||
let mut retries = 0;
|
||||
|
||||
let request = self.to_rpc_request(request);
|
||||
@@ -240,7 +267,11 @@ impl Database {
|
||||
let mut tonic_request = tonic::Request::new(request.clone());
|
||||
let metadata = tonic_request.metadata_mut();
|
||||
Self::put_hints(metadata, hints)?;
|
||||
let raw_response = client.handle(tonic_request).await;
|
||||
let raw_response = client
|
||||
.inner
|
||||
.handle(tonic_request)
|
||||
.await
|
||||
.inspect_err(client.inspect_err("handle"));
|
||||
match (raw_response, retries < max_retries) {
|
||||
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
|
||||
(Err(err), true) => {
|
||||
@@ -442,8 +473,8 @@ impl Database {
|
||||
}) = &self.ctx.auth_header
|
||||
{
|
||||
let encoded = BASE64_STANDARD.encode(format!("{username}:{password}"));
|
||||
let value =
|
||||
MetadataValue::from_str(&encoded).context(InvalidTonicMetadataValueSnafu)?;
|
||||
let value = MetadataValue::from_str(&format!("Basic {encoded}"))
|
||||
.context(InvalidTonicMetadataValueSnafu)?;
|
||||
request.metadata_mut().insert("x-greptime-auth", value);
|
||||
}
|
||||
|
||||
|
||||
@@ -133,6 +133,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -154,6 +161,7 @@ impl ErrorExt for Error {
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::InvalidTonicMetadataValue { .. } => StatusCode::InvalidArguments,
|
||||
Error::ConvertSchema { source, .. } => source.status_code(),
|
||||
Error::External { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
68
src/client/src/inserter.rs
Normal file
68
src/client/src/inserter.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::RowInsertRequests;
|
||||
use humantime::format_duration;
|
||||
use store_api::mito_engine_options::{APPEND_MODE_KEY, TTL_KEY, TWCS_TIME_WINDOW};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Context holds the catalog and schema information.
|
||||
pub struct Context<'a> {
|
||||
/// The catalog name.
|
||||
pub catalog: &'a str,
|
||||
/// The schema name.
|
||||
pub schema: &'a str,
|
||||
}
|
||||
|
||||
/// Options for insert operations.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct InsertOptions {
|
||||
/// Time-to-live for the inserted data.
|
||||
pub ttl: Duration,
|
||||
/// Whether to use append mode for the insert.
|
||||
pub append_mode: bool,
|
||||
/// Time window for twcs compaction.
|
||||
pub twcs_compaction_time_window: Option<Duration>,
|
||||
}
|
||||
|
||||
impl InsertOptions {
|
||||
/// Converts the insert options to a list of key-value string hints.
|
||||
pub fn to_hints(&self) -> Vec<(&'static str, String)> {
|
||||
let mut hints = vec![
|
||||
(TTL_KEY, format_duration(self.ttl).to_string()),
|
||||
(APPEND_MODE_KEY, self.append_mode.to_string()),
|
||||
];
|
||||
|
||||
if let Some(time_window) = self.twcs_compaction_time_window {
|
||||
hints.push((TWCS_TIME_WINDOW, format_duration(time_window).to_string()));
|
||||
}
|
||||
|
||||
hints
|
||||
}
|
||||
}
|
||||
|
||||
/// [`Inserter`] allows different components to share a unified mechanism for inserting data.
|
||||
///
|
||||
/// An implementation may perform the insert locally (e.g., via a direct procedure call) or
|
||||
/// delegate/forward it to another node for processing (e.g., MetaSrv forwarding to an
|
||||
/// available Frontend).
|
||||
#[async_trait::async_trait]
|
||||
pub trait Inserter: Send + Sync {
|
||||
async fn insert_rows(&self, context: &Context<'_>, requests: RowInsertRequests) -> Result<()>;
|
||||
|
||||
fn set_options(&mut self, options: &InsertOptions);
|
||||
}
|
||||
@@ -19,6 +19,7 @@ pub mod client_manager;
|
||||
pub mod database;
|
||||
pub mod error;
|
||||
pub mod flow;
|
||||
pub mod inserter;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
pub mod region;
|
||||
|
||||
@@ -38,6 +38,7 @@ common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-mem-prof.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
@@ -102,3 +103,6 @@ tempfile.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[package.metadata.cargo-udeps.ignore]
|
||||
development = ["rexpect"]
|
||||
|
||||
@@ -28,7 +28,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::datanode::{DatanodeOptions, Instance, APP_NAME};
|
||||
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::{create_resource_limit_metrics, log_versions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
/// Builder for Datanode instance.
|
||||
pub struct InstanceBuilder {
|
||||
@@ -68,6 +68,7 @@ impl InstanceBuilder {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&dn_opts.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
|
||||
|
||||
@@ -46,7 +46,7 @@ use crate::error::{
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
@@ -280,6 +280,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
@@ -375,7 +376,8 @@ impl StartCommand {
|
||||
flow_auth_header,
|
||||
opts.query.clone(),
|
||||
opts.flow.batching_mode.clone(),
|
||||
);
|
||||
)
|
||||
.context(StartFlownodeSnafu)?;
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts.clone(),
|
||||
|
||||
@@ -41,13 +41,14 @@ use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::addrs;
|
||||
use servers::export_metrics::ExportMetricsTask;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
@@ -144,6 +145,14 @@ pub struct StartCommand {
|
||||
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
|
||||
#[clap(long, alias = "rpc-hostname")]
|
||||
rpc_server_addr: Option<String>,
|
||||
/// The address to bind the internal gRPC server.
|
||||
#[clap(long, alias = "internal-rpc-addr")]
|
||||
internal_rpc_bind_addr: Option<String>,
|
||||
/// The address advertised to the metasrv, and used for connections from outside the host.
|
||||
/// If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
/// on the host, with the same port number as the one specified in `internal_rpc_bind_addr`.
|
||||
#[clap(long, alias = "internal-rpc-hostname")]
|
||||
internal_rpc_server_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -241,6 +250,31 @@ impl StartCommand {
|
||||
opts.grpc.server_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.internal_rpc_bind_addr {
|
||||
if let Some(internal_grpc) = &mut opts.internal_grpc {
|
||||
internal_grpc.bind_addr = addr.to_string();
|
||||
} else {
|
||||
let grpc_options = GrpcOptions {
|
||||
bind_addr: addr.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
opts.internal_grpc = Some(grpc_options);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.internal_rpc_server_addr {
|
||||
if let Some(internal_grpc) = &mut opts.internal_grpc {
|
||||
internal_grpc.server_addr = addr.to_string();
|
||||
} else {
|
||||
let grpc_options = GrpcOptions {
|
||||
server_addr: addr.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
opts.internal_grpc = Some(grpc_options);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
@@ -279,10 +313,11 @@ impl StartCommand {
|
||||
&opts.component.logging,
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.clone(),
|
||||
opts.component.slow_query.as_ref(),
|
||||
Some(&opts.component.slow_query),
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
@@ -447,6 +482,8 @@ mod tests {
|
||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||
internal_rpc_bind_addr: Some("127.0.0.1:4010".to_string()),
|
||||
internal_rpc_server_addr: Some("10.0.0.24:4010".to_string()),
|
||||
influxdb_enable: Some(false),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
@@ -459,6 +496,10 @@ mod tests {
|
||||
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
|
||||
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
|
||||
|
||||
let internal_grpc = opts.internal_grpc.as_ref().unwrap();
|
||||
assert_eq!(internal_grpc.bind_addr, "127.0.0.1:4010");
|
||||
assert_eq!(internal_grpc.server_addr, "10.0.0.24:4010");
|
||||
|
||||
let default_opts = FrontendOptions::default().component;
|
||||
|
||||
assert_eq!(opts.grpc.bind_addr, default_opts.grpc.bind_addr);
|
||||
|
||||
@@ -15,7 +15,10 @@
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{error, info};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_mem_prof::activate_heap_profile;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use stat::{get_cpu_limit, get_memory_limit};
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -145,3 +148,20 @@ fn log_env_flags() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_activate_heap_profile(memory_options: &common_options::memory::MemoryOptions) {
|
||||
if memory_options.enable_heap_profiling {
|
||||
match activate_heap_profile() {
|
||||
Ok(()) => {
|
||||
info!("Heap profile is active");
|
||||
}
|
||||
Err(err) => {
|
||||
if err.status_code() == StatusCode::Unsupported {
|
||||
info!("Heap profile is not supported");
|
||||
} else {
|
||||
warn!(err; "Failed to activate heap profile");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
||||
|
||||
@@ -325,6 +325,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
|
||||
@@ -34,17 +34,19 @@ use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::procedure_executor::LocalProcedureExecutor;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_options::memory::MemoryOptions;
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{
|
||||
@@ -83,7 +85,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-standalone";
|
||||
|
||||
@@ -155,8 +157,9 @@ pub struct StandaloneOptions {
|
||||
pub init_regions_in_background: bool,
|
||||
pub init_regions_parallelism: usize,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
pub slow_query: Option<SlowQueryOptions>,
|
||||
pub slow_query: SlowQueryOptions,
|
||||
pub query: QueryOptions,
|
||||
pub memory: MemoryOptions,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -188,8 +191,9 @@ impl Default for StandaloneOptions {
|
||||
init_regions_in_background: false,
|
||||
init_regions_parallelism: 16,
|
||||
max_in_flight_write_bytes: None,
|
||||
slow_query: Some(SlowQueryOptions::default()),
|
||||
slow_query: SlowQueryOptions::default(),
|
||||
query: QueryOptions::default(),
|
||||
memory: MemoryOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -482,10 +486,11 @@ impl StartCommand {
|
||||
&opts.component.logging,
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
opts.component.slow_query.as_ref(),
|
||||
Some(&opts.component.slow_query),
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
@@ -636,9 +641,8 @@ impl StartCommand {
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
};
|
||||
let procedure_manager_c = procedure_manager.clone();
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager_c, true)
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let ddl_manager = {
|
||||
@@ -646,7 +650,11 @@ impl StartCommand {
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
};
|
||||
let ddl_task_executor: ProcedureExecutorRef = Arc::new(ddl_manager);
|
||||
|
||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
||||
Arc::new(ddl_manager),
|
||||
procedure_manager.clone(),
|
||||
));
|
||||
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
@@ -654,7 +662,7 @@ impl StartCommand {
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
node_manager.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
procedure_executor.clone(),
|
||||
process_manager,
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
@@ -679,7 +687,7 @@ impl StartCommand {
|
||||
catalog_manager.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
procedure_executor,
|
||||
node_manager,
|
||||
)
|
||||
.await
|
||||
@@ -826,6 +834,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
region_manifest: region_stat.manifest.into(),
|
||||
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
||||
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
||||
write_bytes: 0,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -34,6 +34,7 @@ use query::options::QueryOptions;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use store_api::path_utils::WAL_DIR;
|
||||
|
||||
#[allow(deprecated)]
|
||||
@@ -145,6 +146,7 @@ fn test_load_frontend_example_config() {
|
||||
grpc: GrpcOptions::default()
|
||||
.with_bind_addr("127.0.0.1:4001")
|
||||
.with_server_addr("127.0.0.1:4001"),
|
||||
internal_grpc: Some(GrpcOptions::internal_default()),
|
||||
http: HttpOptions {
|
||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||
..Default::default()
|
||||
@@ -190,6 +192,14 @@ fn test_load_metasrv_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
backend_tls: Some(TlsOption {
|
||||
mode: TlsMode::Prefer,
|
||||
cert_path: String::new(),
|
||||
key_path: String::new(),
|
||||
ca_cert_path: String::new(),
|
||||
watch: false,
|
||||
}),
|
||||
meta_schema_name: Some("greptime_schema".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -245,6 +255,7 @@ fn test_load_flownode_example_config() {
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: None,
|
||||
memory: Default::default(),
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
@@ -298,6 +309,7 @@ fn test_load_standalone_example_config() {
|
||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
|
||||
@@ -20,6 +20,7 @@ pub mod range_read;
|
||||
#[allow(clippy::all)]
|
||||
pub mod readable_size;
|
||||
pub mod secrets;
|
||||
pub mod serde;
|
||||
|
||||
pub type AffectedRows = usize;
|
||||
|
||||
|
||||
@@ -12,16 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Format to store in parquet.
|
||||
//!
|
||||
//! We store two additional internal columns at last:
|
||||
//! - `__sequence`, the sequence number of a row. Type: uint64
|
||||
//! - `__op_type`, the op type of the row. Type: uint8
|
||||
//!
|
||||
//! We store other columns in the same order as [RegionMetadata::field_columns()](store_api::metadata::RegionMetadata::field_columns()).
|
||||
//!
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
/// Number of columns that have fixed positions.
|
||||
///
|
||||
/// Contains all internal columns.
|
||||
pub(crate) const PLAIN_FIXED_POS_COLUMN_NUM: usize = 2;
|
||||
/// Deserialize an empty string as the default value.
|
||||
pub fn empty_string_as_default<'de, D, T>(deserializer: D) -> Result<T, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
T: Default + Deserialize<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
|
||||
if s.is_empty() {
|
||||
Ok(T::default())
|
||||
} else {
|
||||
T::deserialize(serde::de::value::StringDeserializer::<D::Error>::new(s))
|
||||
.map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
@@ -25,19 +25,17 @@ common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-orc.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store.workspace = true
|
||||
object_store_opendal.workspace = true
|
||||
orc-rust = { git = "https://github.com/datafusion-contrib/orc-rust", rev = "3134cab581a8e91b942d6a23aca2916ea965f6bb", default-features = false, features = [
|
||||
"async",
|
||||
] }
|
||||
orc-rust = { version = "0.6.3", default-features = false, features = ["async"] }
|
||||
parquet.workspace = true
|
||||
paste.workspace = true
|
||||
rand.workspace = true
|
||||
regex = "1.7"
|
||||
serde.workspace = true
|
||||
snafu.workspace = true
|
||||
@@ -47,6 +45,4 @@ tokio-util.workspace = true
|
||||
url = "2.3"
|
||||
|
||||
[dev-dependencies]
|
||||
common-telemetry.workspace = true
|
||||
common-test-util.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
@@ -12,16 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::{ArrowError, Schema, SchemaRef};
|
||||
use arrow_schema::Schema;
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
|
||||
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use futures::FutureExt;
|
||||
use object_store::ObjectStore;
|
||||
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||
@@ -97,67 +92,6 @@ impl FileFormat for OrcFormat {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OrcOpener {
|
||||
object_store: Arc<ObjectStore>,
|
||||
output_schema: SchemaRef,
|
||||
projection: Option<Vec<usize>>,
|
||||
}
|
||||
|
||||
impl OrcOpener {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
output_schema: SchemaRef,
|
||||
projection: Option<Vec<usize>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
object_store: Arc::from(object_store),
|
||||
output_schema,
|
||||
projection,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FileOpener for OrcOpener {
|
||||
fn open(&self, meta: FileMeta) -> DfResult<FileOpenFuture> {
|
||||
let object_store = self.object_store.clone();
|
||||
let projected_schema = if let Some(projection) = &self.projection {
|
||||
let projected_schema = self
|
||||
.output_schema
|
||||
.project(projection)
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
Arc::new(projected_schema)
|
||||
} else {
|
||||
self.output_schema.clone()
|
||||
};
|
||||
let projection = self.projection.clone();
|
||||
Ok(Box::pin(async move {
|
||||
let path = meta.location().to_string();
|
||||
|
||||
let meta = object_store
|
||||
.stat(&path)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let reader = object_store
|
||||
.reader(&path)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let stream_reader =
|
||||
new_orc_stream_reader(ReaderAdapter::new(reader, meta.content_length()))
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let stream =
|
||||
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);
|
||||
|
||||
let adopted = stream.map_err(|e| ArrowError::ExternalError(Box::new(e)));
|
||||
Ok(adopted.boxed())
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
@@ -31,6 +31,7 @@ use datatypes::schema::SchemaRef;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::StreamExt;
|
||||
use object_store::{FuturesAsyncReader, ObjectStore};
|
||||
use parquet::arrow::arrow_reader::ArrowReaderOptions;
|
||||
use parquet::arrow::AsyncArrowWriter;
|
||||
use parquet::basic::{Compression, Encoding, ZstdLevel};
|
||||
use parquet::file::properties::{WriterProperties, WriterPropertiesBuilder};
|
||||
@@ -65,7 +66,7 @@ impl FileFormat for ParquetFormat {
|
||||
.compat();
|
||||
|
||||
let metadata = reader
|
||||
.get_metadata()
|
||||
.get_metadata(None)
|
||||
.await
|
||||
.context(error::ReadParquetSnafuSnafu)?;
|
||||
|
||||
@@ -146,7 +147,7 @@ impl LazyParquetFileReader {
|
||||
impl AsyncFileReader for LazyParquetFileReader {
|
||||
fn get_bytes(
|
||||
&mut self,
|
||||
range: std::ops::Range<usize>,
|
||||
range: std::ops::Range<u64>,
|
||||
) -> BoxFuture<'_, ParquetResult<bytes::Bytes>> {
|
||||
Box::pin(async move {
|
||||
self.maybe_initialize()
|
||||
@@ -157,13 +158,16 @@ impl AsyncFileReader for LazyParquetFileReader {
|
||||
})
|
||||
}
|
||||
|
||||
fn get_metadata(&mut self) -> BoxFuture<'_, ParquetResult<Arc<ParquetMetaData>>> {
|
||||
fn get_metadata<'a>(
|
||||
&'a mut self,
|
||||
options: Option<&'a ArrowReaderOptions>,
|
||||
) -> BoxFuture<'a, parquet::errors::Result<Arc<ParquetMetaData>>> {
|
||||
Box::pin(async move {
|
||||
self.maybe_initialize()
|
||||
.await
|
||||
.map_err(|e| ParquetError::External(Box::new(e)))?;
|
||||
// Safety: Must initialized
|
||||
self.reader.as_mut().unwrap().get_metadata().await
|
||||
self.reader.as_mut().unwrap().get_metadata(options).await
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -192,7 +196,10 @@ pub async fn stream_to_parquet(
|
||||
concurrency: usize,
|
||||
) -> Result<usize> {
|
||||
let write_props = column_wise_config(
|
||||
WriterProperties::builder().set_compression(Compression::ZSTD(ZstdLevel::default())),
|
||||
WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||
.set_statistics_truncate_length(None)
|
||||
.set_column_index_truncate_length(None),
|
||||
schema,
|
||||
)
|
||||
.build();
|
||||
|
||||
@@ -19,35 +19,39 @@ use std::vec;
|
||||
|
||||
use common_test_util::find_workspace_path;
|
||||
use datafusion::assert_batches_eq;
|
||||
use datafusion::datasource::file_format::file_compression_type::FileCompressionType;
|
||||
use datafusion::datasource::physical_plan::{
|
||||
CsvConfig, CsvOpener, FileOpener, FileScanConfig, FileStream, JsonOpener, ParquetExec,
|
||||
CsvSource, FileScanConfig, FileSource, FileStream, JsonSource, ParquetSource,
|
||||
};
|
||||
use datafusion::datasource::source::DataSourceExec;
|
||||
use datafusion::execution::context::TaskContext;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_orc::OrcSource;
|
||||
use futures::StreamExt;
|
||||
use object_store::ObjectStore;
|
||||
|
||||
use super::FORMAT_TYPE;
|
||||
use crate::file_format::orc::{OrcFormat, OrcOpener};
|
||||
use crate::file_format::parquet::DefaultParquetFileReaderFactory;
|
||||
use crate::file_format::{FileFormat, Format};
|
||||
use crate::file_format::{FileFormat, Format, OrcFormat};
|
||||
use crate::test_util::{scan_config, test_basic_schema, test_store};
|
||||
use crate::{error, test_util};
|
||||
|
||||
struct Test<'a, T: FileOpener> {
|
||||
struct Test<'a> {
|
||||
config: FileScanConfig,
|
||||
opener: T,
|
||||
file_source: Arc<dyn FileSource>,
|
||||
expected: Vec<&'a str>,
|
||||
}
|
||||
|
||||
impl<T: FileOpener> Test<'_, T> {
|
||||
pub async fn run(self) {
|
||||
impl Test<'_> {
|
||||
async fn run(self, store: &ObjectStore) {
|
||||
let store = Arc::new(object_store_opendal::OpendalStore::new(store.clone()));
|
||||
let file_opener = self.file_source.create_file_opener(store, &self.config, 0);
|
||||
|
||||
let result = FileStream::new(
|
||||
&self.config,
|
||||
0,
|
||||
self.opener,
|
||||
file_opener,
|
||||
&ExecutionPlanMetricsSet::new(),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -62,26 +66,16 @@ impl<T: FileOpener> Test<'_, T> {
|
||||
#[tokio::test]
|
||||
async fn test_json_opener() {
|
||||
let store = test_store("/");
|
||||
let store = Arc::new(object_store_opendal::OpendalStore::new(store));
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let json_opener = || {
|
||||
JsonOpener::new(
|
||||
test_util::TEST_BATCH_SIZE,
|
||||
schema.clone(),
|
||||
FileCompressionType::UNCOMPRESSED,
|
||||
store.clone(),
|
||||
)
|
||||
};
|
||||
let file_source = Arc::new(JsonSource::new()).with_batch_size(test_util::TEST_BATCH_SIZE);
|
||||
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/json/basic.json")
|
||||
.display()
|
||||
.to_string();
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: json_opener(),
|
||||
config: scan_config(schema.clone(), None, path, file_source.clone()),
|
||||
file_source: file_source.clone(),
|
||||
expected: vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
@@ -93,8 +87,8 @@ async fn test_json_opener() {
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: json_opener(),
|
||||
config: scan_config(schema, Some(1), path, file_source.clone()),
|
||||
file_source,
|
||||
expected: vec![
|
||||
"+-----+------+",
|
||||
"| num | str |",
|
||||
@@ -106,37 +100,26 @@ async fn test_json_opener() {
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
test.run(&store).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_csv_opener() {
|
||||
let store = test_store("/");
|
||||
let store = Arc::new(object_store_opendal::OpendalStore::new(store));
|
||||
|
||||
let schema = test_basic_schema();
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/csv/basic.csv")
|
||||
.display()
|
||||
.to_string();
|
||||
let csv_config = Arc::new(CsvConfig::new(
|
||||
test_util::TEST_BATCH_SIZE,
|
||||
schema.clone(),
|
||||
None,
|
||||
true,
|
||||
b',',
|
||||
b'"',
|
||||
None,
|
||||
store,
|
||||
None,
|
||||
));
|
||||
|
||||
let csv_opener = || CsvOpener::new(csv_config.clone(), FileCompressionType::UNCOMPRESSED);
|
||||
let file_source = CsvSource::new(true, b',', b'"')
|
||||
.with_batch_size(test_util::TEST_BATCH_SIZE)
|
||||
.with_schema(schema.clone());
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: csv_opener(),
|
||||
config: scan_config(schema.clone(), None, path, file_source.clone()),
|
||||
file_source: file_source.clone(),
|
||||
expected: vec![
|
||||
"+-----+-------+",
|
||||
"| num | str |",
|
||||
@@ -148,8 +131,8 @@ async fn test_csv_opener() {
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: csv_opener(),
|
||||
config: scan_config(schema, Some(1), path, file_source.clone()),
|
||||
file_source,
|
||||
expected: vec![
|
||||
"+-----+------+",
|
||||
"| num | str |",
|
||||
@@ -161,7 +144,7 @@ async fn test_csv_opener() {
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
test.run(&store).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,12 +157,12 @@ async fn test_parquet_exec() {
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/parquet/basic.parquet")
|
||||
.display()
|
||||
.to_string();
|
||||
let base_config = scan_config(schema.clone(), None, path);
|
||||
|
||||
let exec = ParquetExec::builder(base_config)
|
||||
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)))
|
||||
.build();
|
||||
let parquet_source = ParquetSource::default()
|
||||
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
|
||||
|
||||
let config = scan_config(schema, None, path, Arc::new(parquet_source));
|
||||
let exec = DataSourceExec::from_data_source(config);
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
let context = Arc::new(TaskContext::from(&ctx));
|
||||
@@ -208,20 +191,18 @@ async fn test_parquet_exec() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_orc_opener() {
|
||||
let root = find_workspace_path("/src/common/datasource/tests/orc")
|
||||
let path = &find_workspace_path("/src/common/datasource/tests/orc/test.orc")
|
||||
.display()
|
||||
.to_string();
|
||||
let store = test_store(&root);
|
||||
let schema = OrcFormat.infer_schema(&store, "test.orc").await.unwrap();
|
||||
let schema = Arc::new(schema);
|
||||
|
||||
let orc_opener = OrcOpener::new(store.clone(), schema.clone(), None);
|
||||
let path = "test.orc";
|
||||
let store = test_store("/");
|
||||
let schema = Arc::new(OrcFormat.infer_schema(&store, path).await.unwrap());
|
||||
let file_source = Arc::new(OrcSource::default());
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
config: scan_config(schema.clone(), None, path),
|
||||
opener: orc_opener.clone(),
|
||||
config: scan_config(schema.clone(), None, path, file_source.clone()),
|
||||
file_source: file_source.clone(),
|
||||
expected: vec![
|
||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
||||
@@ -235,8 +216,8 @@ async fn test_orc_opener() {
|
||||
],
|
||||
},
|
||||
Test {
|
||||
config: scan_config(schema.clone(), Some(1), path),
|
||||
opener: orc_opener.clone(),
|
||||
config: scan_config(schema.clone(), Some(1), path, file_source.clone()),
|
||||
file_source,
|
||||
expected: vec![
|
||||
"+----------+-----+------+------------+---+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+-------------------------+-------------+",
|
||||
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
||||
@@ -248,7 +229,7 @@ async fn test_orc_opener() {
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
test.run().await;
|
||||
test.run(&store).await;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datafusion::common::{Constraints, Statistics};
|
||||
use datafusion::datasource::file_format::file_compression_type::FileCompressionType;
|
||||
use datafusion::datasource::listing::PartitionedFile;
|
||||
use datafusion::datasource::object_store::ObjectStoreUrl;
|
||||
use datafusion::datasource::physical_plan::{
|
||||
CsvConfig, CsvOpener, FileScanConfig, FileStream, JsonOpener,
|
||||
CsvSource, FileGroup, FileScanConfig, FileScanConfigBuilder, FileSource, FileStream,
|
||||
JsonOpener, JsonSource,
|
||||
};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use object_store::services::Fs;
|
||||
@@ -68,21 +68,20 @@ pub fn test_basic_schema() -> SchemaRef {
|
||||
Arc::new(schema)
|
||||
}
|
||||
|
||||
pub fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
|
||||
pub(crate) fn scan_config(
|
||||
file_schema: SchemaRef,
|
||||
limit: Option<usize>,
|
||||
filename: &str,
|
||||
file_source: Arc<dyn FileSource>,
|
||||
) -> FileScanConfig {
|
||||
// object_store only recognize the Unix style path, so make it happy.
|
||||
let filename = &filename.replace('\\', "/");
|
||||
let statistics = Statistics::new_unknown(file_schema.as_ref());
|
||||
FileScanConfig {
|
||||
object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
|
||||
file_schema,
|
||||
file_groups: vec![vec![PartitionedFile::new(filename.to_string(), 10)]],
|
||||
constraints: Constraints::empty(),
|
||||
statistics,
|
||||
projection: None,
|
||||
limit,
|
||||
table_partition_cols: vec![],
|
||||
output_ordering: vec![],
|
||||
}
|
||||
let file_group = FileGroup::new(vec![PartitionedFile::new(filename.to_string(), 4096)]);
|
||||
|
||||
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), file_schema, file_source)
|
||||
.with_file_group(file_group)
|
||||
.with_limit(limit)
|
||||
.build()
|
||||
}
|
||||
|
||||
pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
||||
@@ -99,9 +98,14 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
||||
|
||||
let size = store.read(origin_path).await.unwrap().len();
|
||||
|
||||
let config = scan_config(schema.clone(), None, origin_path);
|
||||
|
||||
let stream = FileStream::new(&config, 0, json_opener, &ExecutionPlanMetricsSet::new()).unwrap();
|
||||
let config = scan_config(schema, None, origin_path, Arc::new(JsonSource::new()));
|
||||
let stream = FileStream::new(
|
||||
&config,
|
||||
0,
|
||||
Arc::new(json_opener),
|
||||
&ExecutionPlanMetricsSet::new(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (tmp_store, dir) = test_tmp_store("test_stream_to_json");
|
||||
|
||||
@@ -127,24 +131,17 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
|
||||
|
||||
let schema = test_basic_schema();
|
||||
|
||||
let csv_config = Arc::new(CsvConfig::new(
|
||||
TEST_BATCH_SIZE,
|
||||
schema.clone(),
|
||||
None,
|
||||
true,
|
||||
b',',
|
||||
b'"',
|
||||
None,
|
||||
Arc::new(object_store_opendal::OpendalStore::new(store.clone())),
|
||||
None,
|
||||
));
|
||||
|
||||
let csv_opener = CsvOpener::new(csv_config, FileCompressionType::UNCOMPRESSED);
|
||||
|
||||
let csv_source = CsvSource::new(true, b',', b'"')
|
||||
.with_schema(schema.clone())
|
||||
.with_batch_size(TEST_BATCH_SIZE);
|
||||
let config = scan_config(schema, None, origin_path, csv_source.clone());
|
||||
let size = store.read(origin_path).await.unwrap().len();
|
||||
|
||||
let config = scan_config(schema.clone(), None, origin_path);
|
||||
|
||||
let csv_opener = csv_source.create_file_opener(
|
||||
Arc::new(object_store_opendal::OpendalStore::new(store.clone())),
|
||||
&config,
|
||||
0,
|
||||
);
|
||||
let stream = FileStream::new(&config, 0, csv_opener, &ExecutionPlanMetricsSet::new()).unwrap();
|
||||
|
||||
let (tmp_store, dir) = test_tmp_store("test_stream_to_csv");
|
||||
|
||||
@@ -251,7 +251,6 @@ macro_rules! define_from_tonic_status {
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = metadata_value(&e, $crate::GREPTIME_DB_HEADER_ERROR_CODE)
|
||||
.and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
@@ -290,6 +289,8 @@ macro_rules! define_into_tonic_status {
|
||||
use tonic::metadata::MetadataMap;
|
||||
use $crate::GREPTIME_DB_HEADER_ERROR_CODE;
|
||||
|
||||
common_telemetry::error!(err; "Failed to handle request");
|
||||
|
||||
let mut headers = HeaderMap::<HeaderValue>::with_capacity(2);
|
||||
|
||||
// If either of the status_code or error msg cannot convert to valid HTTP header value
|
||||
|
||||
@@ -8,12 +8,13 @@ license.workspace = true
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon.workspace = true
|
||||
client.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
humantime.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
itertools.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::ColumnSchema;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
@@ -22,12 +22,6 @@ use snafu::{Location, Snafu};
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("No available frontend"))]
|
||||
NoAvailableFrontend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Mismatched schema, expected: {:?}, actual: {:?}", expected, actual))]
|
||||
MismatchedSchema {
|
||||
#[snafu(implicit)]
|
||||
@@ -35,6 +29,30 @@ pub enum Error {
|
||||
expected: Vec<ColumnSchema>,
|
||||
actual: Vec<ColumnSchema>,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize event"))]
|
||||
SerializeEvent {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert events"))]
|
||||
InsertEvents {
|
||||
// BoxedError is utilized here to prevent introducing a circular dependency that would arise from directly referencing `client::error::Error`.
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Keyvalue backend error"))]
|
||||
KvBackend {
|
||||
// BoxedError is utilized here to prevent introducing a circular dependency that would arise from directly referencing `common_meta::error::Error`.
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -42,8 +60,10 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::MismatchedSchema { .. } => StatusCode::InvalidArguments,
|
||||
Error::NoAvailableFrontend { .. } => StatusCode::Internal,
|
||||
Error::MismatchedSchema { .. } | Error::SerializeEvent { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::InsertEvents { .. } | Error::KvBackend { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(duration_constructors)]
|
||||
|
||||
pub mod error;
|
||||
pub mod recorder;
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
@@ -28,6 +28,8 @@ use async_trait::async_trait;
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::timestamp::{TimeUnit, Timestamp};
|
||||
use humantime::format_duration;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::mito_engine_options::{APPEND_MODE_KEY, TTL_KEY};
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
@@ -50,12 +52,12 @@ pub const EVENTS_TABLE_TIMESTAMP_COLUMN_NAME: &str = "timestamp";
|
||||
/// EventRecorderRef is the reference to the event recorder.
|
||||
pub type EventRecorderRef = Arc<dyn EventRecorder>;
|
||||
|
||||
static EVENTS_TABLE_TTL: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// The time interval for flushing batched events to the event handler.
|
||||
pub const DEFAULT_FLUSH_INTERVAL_SECONDS: Duration = Duration::from_secs(5);
|
||||
// The default TTL for the events table.
|
||||
const DEFAULT_EVENTS_TABLE_TTL: &str = "30d";
|
||||
/// The default TTL(90 days) for the events table.
|
||||
const DEFAULT_EVENTS_TABLE_TTL: Duration = Duration::from_days(90);
|
||||
/// The default compaction time window for the events table.
|
||||
pub const DEFAULT_COMPACTION_TIME_WINDOW: Duration = Duration::from_days(1);
|
||||
// The capacity of the tokio channel for transmitting events to background processor.
|
||||
const DEFAULT_CHANNEL_SIZE: usize = 2048;
|
||||
// The size of the buffer for batching events before flushing to event handler.
|
||||
@@ -72,6 +74,11 @@ const DEFAULT_MAX_RETRY_TIMES: u64 = 3;
|
||||
///
|
||||
/// The event can also add the extra schema and row to the event by overriding the `extra_schema` and `extra_row` methods.
|
||||
pub trait Event: Send + Sync + Debug {
|
||||
/// Returns the table name of the event.
|
||||
fn table_name(&self) -> &str {
|
||||
DEFAULT_EVENTS_TABLE_NAME
|
||||
}
|
||||
|
||||
/// Returns the type of the event.
|
||||
fn event_type(&self) -> &str;
|
||||
|
||||
@@ -81,7 +88,9 @@ pub trait Event: Send + Sync + Debug {
|
||||
}
|
||||
|
||||
/// Returns the JSON bytes of the event as the payload. It will use JSON type to store the payload.
|
||||
fn json_payload(&self) -> Result<String>;
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
/// Add the extra schema to the event with the default schema.
|
||||
fn extra_schema(&self) -> Vec<ColumnSchema> {
|
||||
@@ -97,88 +106,76 @@ pub trait Event: Send + Sync + Debug {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
/// Returns the hints for the insert operation.
|
||||
pub fn insert_hints() -> Vec<(&'static str, &'static str)> {
|
||||
vec![
|
||||
(
|
||||
TTL_KEY,
|
||||
EVENTS_TABLE_TTL
|
||||
.get()
|
||||
.map(|s| s.as_str())
|
||||
.unwrap_or(DEFAULT_EVENTS_TABLE_TTL),
|
||||
),
|
||||
(APPEND_MODE_KEY, "true"),
|
||||
]
|
||||
/// Eventable trait defines the interface for objects that can be converted to [Event].
|
||||
pub trait Eventable: Send + Sync + Debug {
|
||||
/// Converts the object to an [Event].
|
||||
fn to_event(&self) -> Option<Box<dyn Event>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the row inserts request for the events that will be persisted to the events table.
|
||||
pub fn build_row_inserts_request(events: &[Box<dyn Event>]) -> Result<RowInsertRequests> {
|
||||
// Aggregate the events by the event type.
|
||||
let mut event_groups: HashMap<&str, Vec<&Box<dyn Event>>> = HashMap::new();
|
||||
/// Groups events by its `event_type`.
|
||||
#[allow(clippy::borrowed_box)]
|
||||
pub fn group_events_by_type(events: &[Box<dyn Event>]) -> HashMap<&str, Vec<&Box<dyn Event>>> {
|
||||
events
|
||||
.iter()
|
||||
.into_grouping_map_by(|event| event.event_type())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Builds the row inserts request for the events that will be persisted to the events table. The `events` should have the same event type, or it will return an error.
|
||||
#[allow(clippy::borrowed_box)]
|
||||
pub fn build_row_inserts_request(events: &[&Box<dyn Event>]) -> Result<RowInsertRequests> {
|
||||
// Ensure all the events are the same type.
|
||||
validate_events(events)?;
|
||||
|
||||
// We already validated the events, so it's safe to get the first event to build the schema for the RowInsertRequest.
|
||||
let event = &events[0];
|
||||
let mut schema: Vec<ColumnSchema> = Vec::with_capacity(3 + event.extra_schema().len());
|
||||
schema.extend(vec![
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_TYPE_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Tag.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_PAYLOAD_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Binary as i32,
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype_extension: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_TIMESTAMP_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::TimestampNanosecond.into(),
|
||||
semantic_type: SemanticType::Timestamp.into(),
|
||||
..Default::default()
|
||||
},
|
||||
]);
|
||||
schema.extend(event.extra_schema());
|
||||
|
||||
let mut rows: Vec<Row> = Vec::with_capacity(events.len());
|
||||
for event in events {
|
||||
event_groups
|
||||
.entry(event.event_type())
|
||||
.or_default()
|
||||
.push(event);
|
||||
let extra_row = event.extra_row()?;
|
||||
let mut values = Vec::with_capacity(3 + extra_row.values.len());
|
||||
values.extend([
|
||||
ValueData::StringValue(event.event_type().to_string()).into(),
|
||||
ValueData::BinaryValue(event.json_payload()?.into_bytes()).into(),
|
||||
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
|
||||
]);
|
||||
values.extend(extra_row.values);
|
||||
rows.push(Row { values });
|
||||
}
|
||||
|
||||
let mut row_insert_requests = RowInsertRequests {
|
||||
inserts: Vec::with_capacity(event_groups.len()),
|
||||
};
|
||||
|
||||
for (_, events) in event_groups {
|
||||
validate_events(&events)?;
|
||||
|
||||
// We already validated the events, so it's safe to get the first event to build the schema for the RowInsertRequest.
|
||||
let event = &events[0];
|
||||
let mut schema = vec![
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_TYPE_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Tag.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_PAYLOAD_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Binary as i32,
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype_extension: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_TIMESTAMP_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::TimestampNanosecond.into(),
|
||||
semantic_type: SemanticType::Timestamp.into(),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
schema.extend(event.extra_schema());
|
||||
|
||||
let rows = events
|
||||
.iter()
|
||||
.map(|event| {
|
||||
let mut row = Row {
|
||||
values: vec![
|
||||
ValueData::StringValue(event.event_type().to_string()).into(),
|
||||
ValueData::BinaryValue(event.json_payload()?.as_bytes().to_vec()).into(),
|
||||
ValueData::TimestampNanosecondValue(event.timestamp().value()).into(),
|
||||
],
|
||||
};
|
||||
row.values.extend(event.extra_row()?.values);
|
||||
Ok(row)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
row_insert_requests.inserts.push(RowInsertRequest {
|
||||
table_name: DEFAULT_EVENTS_TABLE_NAME.to_string(),
|
||||
Ok(RowInsertRequests {
|
||||
inserts: vec![RowInsertRequest {
|
||||
table_name: event.table_name().to_string(),
|
||||
rows: Some(Rows { schema, rows }),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(row_insert_requests)
|
||||
}],
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure the events with the same event type have the same extra schema.
|
||||
@@ -199,7 +196,7 @@ fn validate_events(events: &[&Box<dyn Event>]) -> Result<()> {
|
||||
}
|
||||
|
||||
/// EventRecorder trait defines the interface for recording events.
|
||||
pub trait EventRecorder: Send + Sync + 'static {
|
||||
pub trait EventRecorder: Send + Sync + Debug + 'static {
|
||||
/// Records an event for persistence and processing by [EventHandler].
|
||||
fn record(&self, event: Box<dyn Event>);
|
||||
|
||||
@@ -207,6 +204,34 @@ pub trait EventRecorder: Send + Sync + 'static {
|
||||
fn close(&self);
|
||||
}
|
||||
|
||||
/// EventHandlerOptions is the options for the event handler.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct EventHandlerOptions {
|
||||
/// TTL for the events table that will be used to store the events.
|
||||
pub ttl: Duration,
|
||||
/// Append mode for the events table that will be used to store the events.
|
||||
pub append_mode: bool,
|
||||
}
|
||||
|
||||
impl Default for EventHandlerOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
ttl: DEFAULT_EVENTS_TABLE_TTL,
|
||||
append_mode: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EventHandlerOptions {
|
||||
/// Converts the options to the hints for the insert operation.
|
||||
pub fn to_hints(&self) -> Vec<(&str, String)> {
|
||||
vec![
|
||||
(TTL_KEY, format_duration(self.ttl).to_string()),
|
||||
(APPEND_MODE_KEY, self.append_mode.to_string()),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
/// EventHandler trait defines the interface for how to handle the event.
|
||||
#[async_trait]
|
||||
pub trait EventHandler: Send + Sync + 'static {
|
||||
@@ -219,18 +244,20 @@ pub trait EventHandler: Send + Sync + 'static {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct EventRecorderOptions {
|
||||
/// TTL for the events table that will be used to store the events.
|
||||
pub ttl: String,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub ttl: Duration,
|
||||
}
|
||||
|
||||
impl Default for EventRecorderOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
ttl: DEFAULT_EVENTS_TABLE_TTL.to_string(),
|
||||
ttl: DEFAULT_EVENTS_TABLE_TTL,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of [EventRecorder] that records the events and processes them in the background by the [EventHandler].
|
||||
#[derive(Debug)]
|
||||
pub struct EventRecorderImpl {
|
||||
// The channel to send the events to the background processor.
|
||||
tx: Sender<Box<dyn Event>>,
|
||||
@@ -241,9 +268,7 @@ pub struct EventRecorderImpl {
|
||||
}
|
||||
|
||||
impl EventRecorderImpl {
|
||||
pub fn new(event_handler: Box<dyn EventHandler>, opts: EventRecorderOptions) -> Self {
|
||||
info!("Creating event recorder with options: {:?}", opts);
|
||||
|
||||
pub fn new(event_handler: Box<dyn EventHandler>) -> Self {
|
||||
let (tx, rx) = channel(DEFAULT_CHANNEL_SIZE);
|
||||
let cancel_token = CancellationToken::new();
|
||||
|
||||
@@ -268,14 +293,6 @@ impl EventRecorderImpl {
|
||||
|
||||
recorder.handle = Some(handle);
|
||||
|
||||
// It only sets the ttl once, so it's safe to skip the error.
|
||||
if EVENTS_TABLE_TTL.set(opts.ttl.clone()).is_err() {
|
||||
info!(
|
||||
"Events table ttl already set to {}, skip setting it",
|
||||
opts.ttl
|
||||
);
|
||||
}
|
||||
|
||||
recorder
|
||||
}
|
||||
}
|
||||
@@ -460,10 +477,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_event_recorder() {
|
||||
let mut event_recorder = EventRecorderImpl::new(
|
||||
Box::new(TestEventHandlerImpl {}),
|
||||
EventRecorderOptions::default(),
|
||||
);
|
||||
let mut event_recorder = EventRecorderImpl::new(Box::new(TestEventHandlerImpl {}));
|
||||
event_recorder.record(Box::new(TestEvent {}));
|
||||
|
||||
// Sleep for a while to let the event be sent to the event handler.
|
||||
@@ -504,10 +518,8 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_event_recorder_should_panic() {
|
||||
let mut event_recorder = EventRecorderImpl::new(
|
||||
Box::new(TestEventHandlerImplShouldPanic {}),
|
||||
EventRecorderOptions::default(),
|
||||
);
|
||||
let mut event_recorder =
|
||||
EventRecorderImpl::new(Box::new(TestEventHandlerImplShouldPanic {}));
|
||||
|
||||
event_recorder.record(Box::new(TestEvent {}));
|
||||
|
||||
@@ -524,4 +536,135 @@ mod tests {
|
||||
assert!(handle.await.unwrap_err().is_panic());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestEventA {}
|
||||
|
||||
impl Event for TestEventA {
|
||||
fn event_type(&self) -> &str {
|
||||
"A"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestEventB {}
|
||||
|
||||
impl Event for TestEventB {
|
||||
fn table_name(&self) -> &str {
|
||||
"table_B"
|
||||
}
|
||||
|
||||
fn event_type(&self) -> &str {
|
||||
"B"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestEventC {}
|
||||
|
||||
impl Event for TestEventC {
|
||||
fn table_name(&self) -> &str {
|
||||
"table_C"
|
||||
}
|
||||
|
||||
fn event_type(&self) -> &str {
|
||||
"C"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_group_events_by_type() {
|
||||
let events: Vec<Box<dyn Event>> = vec![
|
||||
Box::new(TestEventA {}),
|
||||
Box::new(TestEventB {}),
|
||||
Box::new(TestEventA {}),
|
||||
Box::new(TestEventC {}),
|
||||
Box::new(TestEventB {}),
|
||||
Box::new(TestEventC {}),
|
||||
Box::new(TestEventA {}),
|
||||
];
|
||||
|
||||
let event_groups = group_events_by_type(&events);
|
||||
assert_eq!(event_groups.len(), 3);
|
||||
assert_eq!(event_groups.get("A").unwrap().len(), 3);
|
||||
assert_eq!(event_groups.get("B").unwrap().len(), 2);
|
||||
assert_eq!(event_groups.get("C").unwrap().len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_row_inserts_request() {
|
||||
let events: Vec<Box<dyn Event>> = vec![
|
||||
Box::new(TestEventA {}),
|
||||
Box::new(TestEventB {}),
|
||||
Box::new(TestEventA {}),
|
||||
Box::new(TestEventC {}),
|
||||
Box::new(TestEventB {}),
|
||||
Box::new(TestEventC {}),
|
||||
Box::new(TestEventA {}),
|
||||
];
|
||||
|
||||
let event_groups = group_events_by_type(&events);
|
||||
assert_eq!(event_groups.len(), 3);
|
||||
assert_eq!(event_groups.get("A").unwrap().len(), 3);
|
||||
assert_eq!(event_groups.get("B").unwrap().len(), 2);
|
||||
assert_eq!(event_groups.get("C").unwrap().len(), 2);
|
||||
|
||||
for (event_type, events) in event_groups {
|
||||
let row_inserts_request = build_row_inserts_request(&events).unwrap();
|
||||
if event_type == "A" {
|
||||
assert_eq!(row_inserts_request.inserts.len(), 1);
|
||||
assert_eq!(
|
||||
row_inserts_request.inserts[0].table_name,
|
||||
DEFAULT_EVENTS_TABLE_NAME
|
||||
);
|
||||
assert_eq!(
|
||||
row_inserts_request.inserts[0]
|
||||
.rows
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.rows
|
||||
.len(),
|
||||
3
|
||||
);
|
||||
} else if event_type == "B" {
|
||||
assert_eq!(row_inserts_request.inserts.len(), 1);
|
||||
assert_eq!(row_inserts_request.inserts[0].table_name, "table_B");
|
||||
assert_eq!(
|
||||
row_inserts_request.inserts[0]
|
||||
.rows
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.rows
|
||||
.len(),
|
||||
2
|
||||
);
|
||||
} else if event_type == "C" {
|
||||
assert_eq!(row_inserts_request.inserts.len(), 1);
|
||||
assert_eq!(row_inserts_request.inserts[0].table_name, "table_C");
|
||||
assert_eq!(
|
||||
row_inserts_request.inserts[0]
|
||||
.rows
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.rows
|
||||
.len(),
|
||||
2
|
||||
);
|
||||
} else {
|
||||
panic!("Unexpected event type: {}", event_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,13 +5,19 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-event-recorder.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-time.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
humantime.workspace = true
|
||||
meta-client.workspace = true
|
||||
serde.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use snafu::OptionExt;
|
||||
|
||||
pub mod error;
|
||||
pub mod selector;
|
||||
pub mod slow_query_event;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct DisplayProcessId {
|
||||
|
||||
132
src/common/frontend/src/slow_query_event.rs
Normal file
132
src/common/frontend/src/slow_query_event.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType};
|
||||
use common_event_recorder::error::Result;
|
||||
use common_event_recorder::Event;
|
||||
use serde::Serialize;
|
||||
|
||||
pub const SLOW_QUERY_TABLE_NAME: &str = "slow_queries";
|
||||
pub const SLOW_QUERY_TABLE_COST_COLUMN_NAME: &str = "cost";
|
||||
pub const SLOW_QUERY_TABLE_THRESHOLD_COLUMN_NAME: &str = "threshold";
|
||||
pub const SLOW_QUERY_TABLE_QUERY_COLUMN_NAME: &str = "query";
|
||||
pub const SLOW_QUERY_TABLE_TIMESTAMP_COLUMN_NAME: &str = "timestamp";
|
||||
pub const SLOW_QUERY_TABLE_IS_PROMQL_COLUMN_NAME: &str = "is_promql";
|
||||
pub const SLOW_QUERY_TABLE_PROMQL_START_COLUMN_NAME: &str = "promql_start";
|
||||
pub const SLOW_QUERY_TABLE_PROMQL_END_COLUMN_NAME: &str = "promql_end";
|
||||
pub const SLOW_QUERY_TABLE_PROMQL_RANGE_COLUMN_NAME: &str = "promql_range";
|
||||
pub const SLOW_QUERY_TABLE_PROMQL_STEP_COLUMN_NAME: &str = "promql_step";
|
||||
pub const SLOW_QUERY_EVENT_TYPE: &str = "slow_query";
|
||||
|
||||
/// SlowQueryEvent is the event of slow query.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct SlowQueryEvent {
|
||||
pub cost: u64,
|
||||
pub threshold: u64,
|
||||
pub query: String,
|
||||
pub is_promql: bool,
|
||||
pub promql_range: Option<u64>,
|
||||
pub promql_step: Option<u64>,
|
||||
pub promql_start: Option<i64>,
|
||||
pub promql_end: Option<i64>,
|
||||
}
|
||||
|
||||
impl Event for SlowQueryEvent {
|
||||
fn table_name(&self) -> &str {
|
||||
SLOW_QUERY_TABLE_NAME
|
||||
}
|
||||
|
||||
fn event_type(&self) -> &str {
|
||||
SLOW_QUERY_EVENT_TYPE
|
||||
}
|
||||
|
||||
fn extra_schema(&self) -> Vec<ColumnSchema> {
|
||||
vec![
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_COST_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Uint64.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_THRESHOLD_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Uint64.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_QUERY_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_IS_PROMQL_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Boolean.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_PROMQL_RANGE_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Uint64.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_PROMQL_STEP_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::Uint64.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_PROMQL_START_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::TimestampMillisecond.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: SLOW_QUERY_TABLE_PROMQL_END_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::TimestampMillisecond.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn extra_row(&self) -> Result<Row> {
|
||||
Ok(Row {
|
||||
values: vec![
|
||||
ValueData::U64Value(self.cost).into(),
|
||||
ValueData::U64Value(self.threshold).into(),
|
||||
ValueData::StringValue(self.query.to_string()).into(),
|
||||
ValueData::BoolValue(self.is_promql).into(),
|
||||
ValueData::U64Value(self.promql_range.unwrap_or(0)).into(),
|
||||
ValueData::U64Value(self.promql_step.unwrap_or(0)).into(),
|
||||
ValueData::TimestampMillisecondValue(self.promql_start.unwrap_or(0)).into(),
|
||||
ValueData::TimestampMillisecondValue(self.promql_end.unwrap_or(0)).into(),
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -57,7 +57,6 @@ serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
uddsketch = { git = "https://github.com/GreptimeTeam/timescaledb-toolkit.git", rev = "84828fe8fb494a6a61412a3da96517fc80f7bb20" }
|
||||
@@ -66,6 +65,6 @@ wkt = { version = "0.11", optional = true }
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
futures.workspace = true
|
||||
pretty_assertions = "1.4.0"
|
||||
pretty_assertions.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,32 +16,39 @@ mod add_region_follower;
|
||||
mod flush_compact_region;
|
||||
mod flush_compact_table;
|
||||
mod migrate_region;
|
||||
mod reconcile_catalog;
|
||||
mod reconcile_database;
|
||||
mod reconcile_table;
|
||||
mod remove_region_follower;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use add_region_follower::AddRegionFollowerFunction;
|
||||
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
use reconcile_catalog::ReconcileCatalogFunction;
|
||||
use reconcile_database::ReconcileDatabaseFunction;
|
||||
use reconcile_table::ReconcileTableFunction;
|
||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Table functions
|
||||
/// Administration functions
|
||||
pub(crate) struct AdminFunction;
|
||||
|
||||
impl AdminFunction {
|
||||
/// Register all table functions to [`FunctionRegistry`].
|
||||
/// Register all admin functions to [`FunctionRegistry`].
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_async(Arc::new(MigrateRegionFunction));
|
||||
registry.register_async(Arc::new(AddRegionFollowerFunction));
|
||||
registry.register_async(Arc::new(RemoveRegionFollowerFunction));
|
||||
registry.register_async(Arc::new(FlushRegionFunction));
|
||||
registry.register_async(Arc::new(CompactRegionFunction));
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
registry.register_async(Arc::new(CompactTableFunction));
|
||||
registry.register_async(Arc::new(FlushFlowFunction));
|
||||
registry.register(MigrateRegionFunction::factory());
|
||||
registry.register(AddRegionFollowerFunction::factory());
|
||||
registry.register(RemoveRegionFollowerFunction::factory());
|
||||
registry.register(FlushRegionFunction::factory());
|
||||
registry.register(CompactRegionFunction::factory());
|
||||
registry.register(FlushTableFunction::factory());
|
||||
registry.register(CompactTableFunction::factory());
|
||||
registry.register(FlushFlowFunction::factory());
|
||||
registry.register(ReconcileCatalogFunction::factory());
|
||||
registry.register(ReconcileDatabaseFunction::factory());
|
||||
registry.register(ReconcileTableFunction::factory());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datafusion_expr::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
@@ -82,7 +83,13 @@ fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// add_region_follower(region_id, peer)
|
||||
TypeSignature::Uniform(2, ConcreteDataType::numerics()),
|
||||
TypeSignature::Uniform(
|
||||
2,
|
||||
ConcreteDataType::numerics()
|
||||
.into_iter()
|
||||
.map(|dt| dt.as_arrow_type())
|
||||
.collect(),
|
||||
),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
@@ -92,38 +99,57 @@ fn signature() -> Signature {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{UInt64Vector, VectorRef};
|
||||
use arrow::array::UInt64Array;
|
||||
use arrow::datatypes::{DataType, Field};
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
use crate::function::FunctionContext;
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
|
||||
#[test]
|
||||
fn test_add_region_follower_misc() {
|
||||
let f = AddRegionFollowerFunction;
|
||||
let factory: ScalarFunctionFactory = AddRegionFollowerFunction::factory().into();
|
||||
let f = factory.provide(FunctionContext::mock());
|
||||
assert_eq!("add_region_follower", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert_eq!(DataType::UInt64, f.return_type(&[]).unwrap());
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
datafusion_expr::Signature {
|
||||
type_signature: datafusion_expr::TypeSignature::OneOf(sigs),
|
||||
volatility: datafusion_expr::Volatility::Immutable
|
||||
} if sigs.len() == 1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_region_follower() {
|
||||
let f = AddRegionFollowerFunction;
|
||||
let args = vec![1, 1];
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
let factory: ScalarFunctionFactory = AddRegionFollowerFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([0u64]));
|
||||
assert_eq!(result, expect);
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![2]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::UInt64, false)),
|
||||
Arc::new(Field::new("arg_1", DataType::UInt64, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::UInt64, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
assert_eq!(result_array.value(0), 0u64);
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(scalar, datafusion_common::ScalarValue::UInt64(Some(0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,8 @@ use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datafusion_expr::{Signature, Volatility};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ensure;
|
||||
@@ -66,71 +67,99 @@ define_region_function!(FlushRegionFunction, flush_region, flush_region);
|
||||
define_region_function!(CompactRegionFunction, compact_region, compact_region);
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::uniform(1, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
Signature::uniform(
|
||||
1,
|
||||
ConcreteDataType::numerics()
|
||||
.into_iter()
|
||||
.map(|dt| dt.as_arrow_type())
|
||||
.collect(),
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::UInt64Vector;
|
||||
use arrow::array::UInt64Array;
|
||||
use arrow::datatypes::{DataType, Field};
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
use crate::function::FunctionContext;
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
|
||||
macro_rules! define_region_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
paste::paste! {
|
||||
#[test]
|
||||
fn [<test_ $name _misc>]() {
|
||||
let f = $func;
|
||||
let factory: ScalarFunctionFactory = $func::factory().into();
|
||||
let f = factory.provide(FunctionContext::mock());
|
||||
assert_eq!(stringify!($name), f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
DataType::UInt64,
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == ConcreteDataType::numerics()));
|
||||
datafusion_expr::Signature {
|
||||
type_signature: datafusion_expr::TypeSignature::Uniform(1, valid_types),
|
||||
volatility: datafusion_expr::Volatility::Immutable
|
||||
} if valid_types == &ConcreteDataType::numerics().into_iter().map(|dt| { use datatypes::data_type::DataType; dt.as_arrow_type() }).collect::<Vec<_>>()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name _missing_table_mutation>]() {
|
||||
let f = $func;
|
||||
let factory: ScalarFunctionFactory = $func::factory().into();
|
||||
let provider = factory.provide(FunctionContext::default());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let args = vec![99];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![99]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::UInt64, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::UInt64, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing TableMutationHandler, not expected",
|
||||
"Execution error: Handler error: Missing TableMutationHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name>]() {
|
||||
let f = $func;
|
||||
let factory: ScalarFunctionFactory = $func::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![99]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::UInt64, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::UInt64, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
|
||||
let args = vec![99];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
|
||||
assert_eq!(expect, result);
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
assert_eq!(result_array.value(0), 42u64);
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(scalar, datafusion_common::ScalarValue::UInt64(Some(42)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,14 +15,15 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::region::{compact_request, StrictWindow};
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datafusion_expr::{Signature, Volatility};
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
@@ -105,18 +106,11 @@ pub(crate) async fn compact_table(
|
||||
}
|
||||
|
||||
fn flush_signature() -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
Signature::uniform(1, vec![ArrowDataType::Utf8], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn compact_signature() -> Signature {
|
||||
Signature::variadic(
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
Signature::variadic(vec![ArrowDataType::Utf8], Volatility::Immutable)
|
||||
}
|
||||
|
||||
/// Parses `compact_table` UDF parameters. This function accepts following combinations:
|
||||
@@ -204,66 +198,87 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::region::compact_request::Options;
|
||||
use arrow::array::StringArray;
|
||||
use arrow::datatypes::{DataType, Field};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector};
|
||||
use datafusion_expr::ColumnarValue;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
use crate::function::FunctionContext;
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
|
||||
macro_rules! define_table_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
paste::paste!{
|
||||
#[test]
|
||||
fn [<test_ $name _misc>]() {
|
||||
let f = $func;
|
||||
let factory: ScalarFunctionFactory = $func::factory().into();
|
||||
let f = factory.provide(FunctionContext::mock());
|
||||
assert_eq!(stringify!($name), f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
DataType::UInt64,
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::string_datatype()]));
|
||||
datafusion_expr::Signature {
|
||||
type_signature: datafusion_expr::TypeSignature::Uniform(1, valid_types),
|
||||
volatility: datafusion_expr::Volatility::Immutable
|
||||
} if valid_types == &vec![ArrowDataType::Utf8]));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name _missing_table_mutation>]() {
|
||||
let f = $func;
|
||||
let factory: ScalarFunctionFactory = $func::factory().into();
|
||||
let provider = factory.provide(FunctionContext::default());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let args = vec!["test"];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["test"]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::Utf8, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::UInt64, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing TableMutationHandler, not expected",
|
||||
"Execution error: Handler error: Missing TableMutationHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name>]() {
|
||||
let f = $func;
|
||||
let factory: ScalarFunctionFactory = $func::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["test"]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::Utf8, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::UInt64, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
|
||||
let args = vec!["test"];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
|
||||
assert_eq!(expect, result);
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<arrow::array::UInt64Array>().unwrap();
|
||||
assert_eq!(result_array.value(0), 42u64);
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(scalar, datafusion_common::ScalarValue::UInt64(Some(42)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@ use std::time::Duration;
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::MigrateRegionRequest;
|
||||
use common_query::error::{InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datafusion_expr::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
@@ -103,9 +104,21 @@ fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// migrate_region(region_id, from_peer, to_peer)
|
||||
TypeSignature::Uniform(3, ConcreteDataType::numerics()),
|
||||
TypeSignature::Uniform(
|
||||
3,
|
||||
ConcreteDataType::numerics()
|
||||
.into_iter()
|
||||
.map(|dt| dt.as_arrow_type())
|
||||
.collect(),
|
||||
),
|
||||
// migrate_region(region_id, from_peer, to_peer, timeout(secs))
|
||||
TypeSignature::Uniform(4, ConcreteDataType::numerics()),
|
||||
TypeSignature::Uniform(
|
||||
4,
|
||||
ConcreteDataType::numerics()
|
||||
.into_iter()
|
||||
.map(|dt| dt.as_arrow_type())
|
||||
.collect(),
|
||||
),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
@@ -115,59 +128,89 @@ fn signature() -> Signature {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
use arrow::array::{StringArray, UInt64Array};
|
||||
use arrow::datatypes::{DataType, Field};
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
use crate::function::FunctionContext;
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
|
||||
#[test]
|
||||
fn test_migrate_region_misc() {
|
||||
let f = MigrateRegionFunction;
|
||||
let factory: ScalarFunctionFactory = MigrateRegionFunction::factory().into();
|
||||
let f = factory.provide(FunctionContext::mock());
|
||||
assert_eq!("migrate_region", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert_eq!(DataType::Utf8, f.return_type(&[]).unwrap());
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
datafusion_expr::Signature {
|
||||
type_signature: datafusion_expr::TypeSignature::OneOf(sigs),
|
||||
volatility: datafusion_expr::Volatility::Immutable
|
||||
} if sigs.len() == 2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_missing_procedure_service() {
|
||||
let f = MigrateRegionFunction;
|
||||
let factory: ScalarFunctionFactory = MigrateRegionFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::default());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let args = vec![1, 1, 1];
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::UInt64, false)),
|
||||
Arc::new(Field::new("arg_1", DataType::UInt64, false)),
|
||||
Arc::new(Field::new("arg_2", DataType::UInt64, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing ProcedureServiceHandler, not expected",
|
||||
"Execution error: Handler error: Missing ProcedureServiceHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_migrate_region() {
|
||||
let f = MigrateRegionFunction;
|
||||
let factory: ScalarFunctionFactory = MigrateRegionFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let args = vec![1, 1, 1];
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![1]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::UInt64, false)),
|
||||
Arc::new(Field::new("arg_1", DataType::UInt64, false)),
|
||||
Arc::new(Field::new("arg_2", DataType::UInt64, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
assert_eq!(result_array.value(0), "test_pid");
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(
|
||||
scalar,
|
||||
datafusion_common::ScalarValue::Utf8(Some("test_pid".to_string()))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
270
src/common/function/src/admin/reconcile_catalog.rs
Normal file
270
src/common/function/src/admin/reconcile_catalog.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileCatalog, ReconcileRequest};
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_telemetry::info;
|
||||
use datafusion_expr::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::{
|
||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
||||
parse_resolve_strategy,
|
||||
};
|
||||
|
||||
const FN_NAME: &str = "reconcile_catalog";
|
||||
|
||||
/// A function to reconcile a catalog.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_catalog(resolve_strategy)`.
|
||||
/// - `reconcile_catalog(resolve_strategy, parallelism)`.
|
||||
///
|
||||
/// - `reconcile_catalog()`.
|
||||
#[admin_fn(
|
||||
name = ReconcileCatalogFunction,
|
||||
display_name = reconcile_catalog,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_catalog(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (resolve_strategy, parallelism) = match params.len() {
|
||||
0 => (default_resolve_strategy(), default_parallelism()),
|
||||
1 => (
|
||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
||||
default_parallelism(),
|
||||
),
|
||||
2 => {
|
||||
let Some(parallelism) = cast_u32(¶ms[1])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
(
|
||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
||||
parallelism,
|
||||
)
|
||||
}
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 0, 1 or 2, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Reconciling catalog with resolve_strategy: {:?}, parallelism: {}",
|
||||
resolve_strategy, parallelism
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileCatalog(ReconcileCatalog {
|
||||
catalog_name: query_ctx.current_catalog().to_string(),
|
||||
parallelism,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
let nums = ConcreteDataType::numerics();
|
||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
||||
signs.extend([
|
||||
// reconcile_catalog()
|
||||
TypeSignature::Nullary,
|
||||
// reconcile_catalog(resolve_strategy)
|
||||
TypeSignature::Exact(vec![ArrowDataType::Utf8]),
|
||||
]);
|
||||
for sign in nums {
|
||||
// reconcile_catalog(resolve_strategy, parallelism)
|
||||
signs.push(TypeSignature::Exact(vec![
|
||||
ArrowDataType::Utf8,
|
||||
sign.as_arrow_type(),
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::{StringArray, UInt64Array};
|
||||
use arrow::datatypes::{DataType, Field};
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use crate::admin::reconcile_catalog::ReconcileCatalogFunction;
|
||||
use crate::function::FunctionContext;
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_catalog() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_catalog()
|
||||
let factory: ScalarFunctionFactory = ReconcileCatalogFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![],
|
||||
arg_fields: vec![],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
assert_eq!(result_array.value(0), "test_pid");
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(
|
||||
scalar,
|
||||
datafusion_common::ScalarValue::Utf8(Some("test_pid".to_string()))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// reconcile_catalog(resolve_strategy)
|
||||
let factory: ScalarFunctionFactory = ReconcileCatalogFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![ColumnarValue::Array(Arc::new(StringArray::from(vec![
|
||||
"UseMetasrv",
|
||||
])))],
|
||||
arg_fields: vec![Arc::new(Field::new("arg_0", DataType::Utf8, false))],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
assert_eq!(result_array.value(0), "test_pid");
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(
|
||||
scalar,
|
||||
datafusion_common::ScalarValue::Utf8(Some("test_pid".to_string()))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// reconcile_catalog(resolve_strategy, parallelism)
|
||||
let factory: ScalarFunctionFactory = ReconcileCatalogFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["UseLatest"]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![10]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::Utf8, false)),
|
||||
Arc::new(Field::new("arg_1", DataType::UInt64, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let result = f.invoke_async_with_args(func_args).await.unwrap();
|
||||
match result {
|
||||
ColumnarValue::Array(array) => {
|
||||
let result_array = array.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
assert_eq!(result_array.value(0), "test_pid");
|
||||
}
|
||||
ColumnarValue::Scalar(scalar) => {
|
||||
assert_eq!(
|
||||
scalar,
|
||||
datafusion_common::ScalarValue::Utf8(Some("test_pid".to_string()))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// unsupported input data type
|
||||
let factory: ScalarFunctionFactory = ReconcileCatalogFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["UseLatest"]))),
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["test"]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::Utf8, false)),
|
||||
Arc::new(Field::new("arg_1", DataType::Utf8, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let _err = f.invoke_async_with_args(func_args).await.unwrap_err();
|
||||
// Note: Error type is DataFusionError at this level, not common_query::Error
|
||||
|
||||
// invalid function args
|
||||
let factory: ScalarFunctionFactory = ReconcileCatalogFunction::factory().into();
|
||||
let provider = factory.provide(FunctionContext::mock());
|
||||
let f = provider.as_async().unwrap();
|
||||
|
||||
let func_args = datafusion::logical_expr::ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["UseLatest"]))),
|
||||
ColumnarValue::Array(Arc::new(UInt64Array::from(vec![10]))),
|
||||
ColumnarValue::Array(Arc::new(StringArray::from(vec!["10"]))),
|
||||
],
|
||||
arg_fields: vec![
|
||||
Arc::new(Field::new("arg_0", DataType::Utf8, false)),
|
||||
Arc::new(Field::new("arg_1", DataType::UInt64, false)),
|
||||
Arc::new(Field::new("arg_2", DataType::Utf8, false)),
|
||||
],
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
number_rows: 1,
|
||||
config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
|
||||
};
|
||||
let _err = f.invoke_async_with_args(func_args).await.unwrap_err();
|
||||
// Note: Error type is DataFusionError at this level, not common_query::Error
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user