mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 15:40:02 +00:00
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edd8cb6710 | ||
|
|
7ee61e5d28 | ||
|
|
1b30aca5a5 | ||
|
|
99b352cea1 | ||
|
|
0f521956bf | ||
|
|
aee72ab363 | ||
|
|
5b78d76fc5 | ||
|
|
a166430650 | ||
|
|
007a2b3dfe | ||
|
|
f35e957ddd | ||
|
|
68414bf593 | ||
|
|
5e836a0d1b | ||
|
|
f5e0da2fc8 | ||
|
|
fb96d26ebf | ||
|
|
0046d3f65b | ||
|
|
d7b97fc877 | ||
|
|
bfdaa28b25 | ||
|
|
6293bb1f5b | ||
|
|
8fa1ebcc3e | ||
|
|
c18c3f5839 | ||
|
|
629e72d8c0 | ||
|
|
e4065505ab | ||
|
|
aafd164483 | ||
|
|
1386e903d6 | ||
|
|
12692a940c | ||
|
|
4d44cbb8b2 | ||
|
|
f4911aa3bb | ||
|
|
5ac61f17bc | ||
|
|
e0d34c6d95 | ||
|
|
8a98b9c433 | ||
|
|
1f5d36a203 | ||
|
|
6fc7168893 | ||
|
|
2799d67212 | ||
|
|
d97a76c312 | ||
|
|
15caca244e | ||
|
|
8638075cdd |
@@ -12,6 +12,3 @@ fetch = true
|
||||
checkout = true
|
||||
list_files = true
|
||||
internal_use_git2 = false
|
||||
|
||||
[env]
|
||||
CARGO_WORKSPACE_DIR = { value = "", relative = true }
|
||||
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,7 +4,7 @@
|
||||
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Database Engine
|
||||
## [Module] Databse Engine
|
||||
/src/index @zhongzc
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
|
||||
@@ -52,7 +52,7 @@ runs:
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -70,7 +70,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -64,11 +64,11 @@ inputs:
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30"
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "120" # minutes
|
||||
default: "30" # minutes
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
|
||||
@@ -10,13 +10,13 @@ inputs:
|
||||
meta-replicas:
|
||||
default: 2
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
description: "Image registry"
|
||||
image-repository:
|
||||
image-repository:
|
||||
default: "greptime/greptimedb"
|
||||
description: "Image repository"
|
||||
image-tag:
|
||||
image-tag:
|
||||
default: "latest"
|
||||
description: 'Image tag'
|
||||
etcd-endpoints:
|
||||
@@ -32,12 +32,12 @@ runs:
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
with:
|
||||
timeout_minutes: 3
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
@@ -48,10 +48,10 @@ runs:
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
@@ -59,7 +59,7 @@ runs:
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
@@ -72,7 +72,7 @@ runs:
|
||||
- name: Wait for GreptimeDB
|
||||
shell: bash
|
||||
run: |
|
||||
while true; do
|
||||
while true; do
|
||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
@@ -86,10 +86,10 @@ runs:
|
||||
- name: Print GreptimeDB info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kubectl get all --show-labels -n my-greptimedb
|
||||
- name: Describe Nodes
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
|
||||
@@ -22,6 +22,7 @@ datanode:
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
overwrite_entry_start_id = true
|
||||
frontend:
|
||||
configData: |-
|
||||
|
||||
15
.github/labeler.yaml
vendored
15
.github/labeler.yaml
vendored
@@ -1,15 +0,0 @@
|
||||
ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: .github/**
|
||||
|
||||
docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docker/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
dashboard:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: grafana/**
|
||||
42
.github/scripts/check-version.sh
vendored
42
.github/scripts/check-version.sh
vendored
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get current version
|
||||
CURRENT_VERSION=$1
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: Failed to get current version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version from GitHub Releases
|
||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
||||
|
||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
||||
echo "Error: Failed to fetch latest version from GitHub"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version
|
||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
||||
|
||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
||||
echo "Error: No valid version found in GitHub releases"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
|
||||
echo "Current version: $CLEAN_CURRENT"
|
||||
echo "Latest release version: $CLEAN_LATEST"
|
||||
|
||||
# Use sort -V to compare versions
|
||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
||||
|
||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
5
.github/scripts/create-version.sh
vendored
5
.github/scripts/create-version.sh
vendored
@@ -8,7 +8,7 @@ set -e
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from environment variables.
|
||||
# Read from envrionment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||
exit 1
|
||||
@@ -16,8 +16,7 @@ function create_version() {
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||
# NOTE: Need a `v` prefix for the version string.
|
||||
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
export NEXT_RELEASE_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
|
||||
6
.github/scripts/deploy-greptimedb.sh
vendored
6
.github/scripts/deploy-greptimedb.sh
vendored
@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
|
||||
37
.github/scripts/update-dev-builder-version.sh
vendored
37
.github/scripts/update-dev-builder-version.sh
vendored
@@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEV_BUILDER_IMAGE_TAG=$1
|
||||
|
||||
update_dev_builder_version() {
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: Should specify the dev-builder image tag"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure Git configs.
|
||||
git config --global user.email greptimedb-ci@greptime.com
|
||||
git config --global user.name greptimedb-ci
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update the dev-builder image tag in the Makefile.
|
||||
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||
|
||||
# Commit the changes.
|
||||
git add Makefile
|
||||
git commit -m "ci: update dev-builder image tag"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "ci: update dev-builder image tag" \
|
||||
--body "This PR updates the dev-builder image tag" \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_dev_builder_version
|
||||
46
.github/scripts/update-helm-charts-version.sh
vendored
46
.github/scripts/update-helm-charts-version.sh
vendored
@@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||
|
||||
update_helm_charts_version() {
|
||||
# Configure Git configs.
|
||||
git config --global user.email update-helm-charts-version@greptime.com
|
||||
git config --global user.name update-helm-charts-version
|
||||
|
||||
# Clone helm-charts repository.
|
||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
||||
cd helm-charts
|
||||
|
||||
# Set default remote for gh CLI
|
||||
gh repo set-default GreptimeTeam/helm-charts
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update version.
|
||||
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
||||
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
||||
|
||||
# Update docs.
|
||||
make docs
|
||||
|
||||
# Commit the changes.
|
||||
git add .
|
||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_helm_charts_version
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||
|
||||
update_homebrew_greptime_version() {
|
||||
# Configure Git configs.
|
||||
git config --global user.email update-greptime-version@greptime.com
|
||||
git config --global user.name update-greptime-version
|
||||
|
||||
# Clone helm-charts repository.
|
||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
||||
cd homebrew-greptime
|
||||
|
||||
# Set default remote for gh CLI
|
||||
gh repo set-default GreptimeTeam/homebrew-greptime
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update version.
|
||||
make update-greptime-version VERSION=${VERSION}
|
||||
|
||||
# Commit the changes.
|
||||
git add .
|
||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_homebrew_greptime_version
|
||||
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -41,7 +41,7 @@ function upload_artifacts() {
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
|
||||
9
.github/workflows/dev-build.yml
vendored
9
.github/workflows/dev-build.yml
vendored
@@ -55,11 +55,6 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
upload_artifacts_to_s3:
|
||||
type: boolean
|
||||
description: Whether upload artifacts to s3
|
||||
required: false
|
||||
default: false
|
||||
cargo_profile:
|
||||
type: choice
|
||||
description: The cargo profile to use in building GreptimeDB.
|
||||
@@ -243,7 +238,7 @@ jobs:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
|
||||
- name: Echo Docker image tag to step summary
|
||||
run: |
|
||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -286,7 +281,7 @@ jobs:
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||
upload-to-s3: false
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
|
||||
52
.github/workflows/develop.yml
vendored
52
.github/workflows/develop.yml
vendored
@@ -22,7 +22,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check typos and docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -37,7 +36,6 @@ jobs:
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
|
||||
license-header-check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
steps:
|
||||
@@ -47,7 +45,6 @@ jobs:
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
@@ -74,7 +71,6 @@ jobs:
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
toml:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Toml Check
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
@@ -89,7 +85,6 @@ jobs:
|
||||
run: taplo format --check
|
||||
|
||||
build:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
@@ -132,7 +127,6 @@ jobs:
|
||||
version: current
|
||||
|
||||
fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -189,13 +183,11 @@ jobs:
|
||||
max-total-time: 120
|
||||
|
||||
unstable-fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Unstable Fuzz Test
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
@@ -223,12 +215,12 @@ jobs:
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binary
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
- name: Unzip bianry
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
@@ -250,14 +242,8 @@ jobs:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
|
||||
build-greptime-ci:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binary (profile-CI)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
@@ -281,7 +267,7 @@ jobs:
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binary
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
@@ -299,13 +285,11 @@ jobs:
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
@@ -335,9 +319,9 @@ jobs:
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluster
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -410,11 +394,6 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pod
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
@@ -437,13 +416,11 @@ jobs:
|
||||
docker system prune -f
|
||||
|
||||
distributed-fuzztest-with-chaos:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
@@ -488,9 +465,9 @@ jobs:
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluster
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -564,11 +541,6 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
@@ -591,12 +563,10 @@ jobs:
|
||||
docker system prune -f
|
||||
|
||||
sqlness:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
@@ -639,7 +609,6 @@ jobs:
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
@@ -657,7 +626,6 @@ jobs:
|
||||
run: make fmt-check
|
||||
|
||||
clippy:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
@@ -683,7 +651,6 @@ jobs:
|
||||
run: make clippy
|
||||
|
||||
conflict-check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check for conflict
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -694,7 +661,7 @@ jobs:
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
test:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||
if: github.event_name != 'merge_group'
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 60
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
@@ -746,7 +713,7 @@ jobs:
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
coverage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-22.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@@ -806,7 +773,6 @@ jobs:
|
||||
verbose: true
|
||||
|
||||
# compat:
|
||||
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-22.04
|
||||
|
||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -117,16 +117,16 @@ jobs:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
timeout-minutes: 45
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: cachix/install-nix-action@v31
|
||||
- run: nix develop --command cargo check --bin greptime
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
- uses: cachix/install-nix-action@v27
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-24.11
|
||||
- run: nix develop --command cargo build
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
|
||||
42
.github/workflows/pr-labeling.yaml
vendored
42
.github/workflows/pr-labeling.yaml
vendored
@@ -1,42 +0,0 @@
|
||||
name: 'PR Labeling'
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: ".github/labeler.yaml"
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
size-label:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: pascalgn/size-label-action@v0.5.5
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
with:
|
||||
sizes: >
|
||||
{
|
||||
"0": "XS",
|
||||
"100": "S",
|
||||
"300": "M",
|
||||
"1000": "L",
|
||||
"1500": "XL",
|
||||
"2000": "XXL"
|
||||
}
|
||||
@@ -24,19 +24,11 @@ on:
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
update_dev_builder_image_tag:
|
||||
type: boolean
|
||||
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
# The jobs are triggered by the following events:
|
||||
# 1. Manually triggered workflow_dispatch event
|
||||
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
@@ -65,9 +57,9 @@ jobs:
|
||||
version: ${{ env.VERSION }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
@@ -93,7 +85,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -114,7 +106,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -135,7 +127,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -170,7 +162,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -184,7 +176,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -198,7 +190,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -209,24 +201,3 @@ jobs:
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
|
||||
update-dev-builder-image-tag:
|
||||
name: Update dev-builder image tag
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update dev-builder image tag
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
74
.github/workflows/release.yml
vendored
74
.github/workflows/release.yml
vendored
@@ -88,7 +88,7 @@ env:
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
jobs:
|
||||
@@ -110,8 +110,6 @@ jobs:
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
|
||||
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -126,7 +124,7 @@ jobs:
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -137,11 +135,6 @@ jobs:
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Check version
|
||||
id: check-version
|
||||
run: |
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
@@ -321,7 +314,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
@@ -339,7 +332,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -368,7 +361,7 @@ jobs:
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
@@ -395,7 +388,7 @@ jobs:
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
@@ -448,10 +441,10 @@ jobs:
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
bump-downstream-repo-versions:
|
||||
name: Bump downstream repo versions
|
||||
bump-doc-version:
|
||||
name: Bump doc version
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
needs: [allocate-runners]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
@@ -463,58 +456,13 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump downstream repo versions
|
||||
- name: Bump doc version
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-versions.ts
|
||||
run: pnpm tsx bin/bump-doc-version.ts
|
||||
env:
|
||||
TARGET_REPOS: website,docs,demo
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||
|
||||
bump-helm-charts-version:
|
||||
name: Bump helm charts version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Bump helm charts version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
run: |
|
||||
./.github/scripts/update-helm-charts-version.sh
|
||||
|
||||
bump-homebrew-greptime-version:
|
||||
name: Bump homebrew greptime version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Bump homebrew greptime version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
run: |
|
||||
./.github/scripts/update-homebrew-greptme-version.sh
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
|
||||
7
.github/workflows/semantic-pull-request.yml
vendored
7
.github/workflows/semantic-pull-request.yml
vendored
@@ -11,17 +11,14 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -28,7 +28,6 @@ debug/
|
||||
# Logs
|
||||
**/__unittest_logs
|
||||
logs/
|
||||
!grafana/dashboards/logs/
|
||||
|
||||
# cpython's generated python byte code
|
||||
**/__pycache__/
|
||||
@@ -58,9 +57,3 @@ tests-fuzz/corpus/
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
# github
|
||||
!/.github
|
||||
|
||||
# Claude code
|
||||
CLAUDE.md
|
||||
|
||||
@@ -10,10 +10,12 @@
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
|
||||
@@ -108,7 +108,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
|
||||
2603
Cargo.lock
generated
2603
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
50
Cargo.toml
50
Cargo.toml
@@ -13,7 +13,6 @@ members = [
|
||||
"src/common/datasource",
|
||||
"src/common/decimal",
|
||||
"src/common/error",
|
||||
"src/common/event-recorder",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/greptimedb-telemetry",
|
||||
@@ -31,15 +30,12 @@ members = [
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/session",
|
||||
"src/common/sql",
|
||||
"src/common/stat",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/version",
|
||||
"src/common/wal",
|
||||
"src/common/workload",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
"src/file-engine",
|
||||
@@ -51,7 +47,6 @@ members = [
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/metric-engine",
|
||||
"src/mito-codec",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
@@ -73,19 +68,16 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.16.0"
|
||||
version = "0.15.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.result_large_err = "allow"
|
||||
clippy.large_enum_variant = "allow"
|
||||
clippy.doc_overindented_list_items = "allow"
|
||||
clippy.uninlined_format_args = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
@@ -121,26 +113,24 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ccfd4da48bc0254ed865e479cd981a3581b02d84" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e82b0158cd38d4021edb4e4c0ae77f999051e62f" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -156,7 +146,6 @@ meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev =
|
||||
mockall = "0.13"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
||||
notify = "8.0"
|
||||
num_cpus = "1.16"
|
||||
object_store_opendal = "0.50"
|
||||
@@ -168,14 +157,12 @@ opentelemetry-proto = { version = "0.27", features = [
|
||||
"with-serde",
|
||||
"logs",
|
||||
] }
|
||||
ordered-float = { version = "4.3", features = ["serde"] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "54.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
promql-parser = { version = "0.5.1", features = ["ser"] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
@@ -188,7 +175,7 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "8dbd01ed809f5a791833a594e85b144e36e45820", features = [
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
@@ -197,7 +184,6 @@ rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
sea-query = "0.32"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
@@ -206,7 +192,7 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "df6fcca80ce903f5beef7002cd2c1b062e7024f8", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.54.x"
|
||||
@@ -227,13 +213,10 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||
tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
vrl = "0.25"
|
||||
zstd = "0.13"
|
||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||
|
||||
@@ -251,7 +234,6 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-event-recorder = { path = "src/common/event-recorder" }
|
||||
common-frontend = { path = "src/common/frontend" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
@@ -269,13 +251,11 @@ common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-sql = { path = "src/common/sql" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
common-version = { path = "src/common/version" }
|
||||
common-wal = { path = "src/common/wal" }
|
||||
common-workload = { path = "src/common/workload" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
@@ -287,7 +267,6 @@ log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
metric-engine = { path = "src/metric-engine" }
|
||||
mito-codec = { path = "src/mito-codec" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
@@ -303,7 +282,6 @@ query = { path = "src/query" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
stat = { path = "src/common/stat" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
|
||||
195
README.md
195
README.md
@@ -8,8 +8,6 @@
|
||||
|
||||
<h2 align="center">Real-Time & Cloud-Native Observability Database<br/>for metrics, logs, and traces</h2>
|
||||
|
||||
> Delivers sub-second querying at PB scale and exceptional cost efficiency from edge to cloud.
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
@@ -51,77 +49,74 @@
|
||||
</div>
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [⭐ Key Features](#features)
|
||||
- [Quick Comparison](#quick-comparison)
|
||||
- [Architecture](#architecture)
|
||||
- [Try GreptimeDB](#try-greptimedb)
|
||||
- [**Features: Why GreptimeDB**](#why-greptimedb)
|
||||
- [Architecture](https://docs.greptime.com/contributor-guide/overview/#architecture)
|
||||
- [Try it for free](#try-greptimedb)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Build From Source](#build-from-source)
|
||||
- [Tools & Extensions](#tools--extensions)
|
||||
- [Project Status](#project-status)
|
||||
- [Community](#community)
|
||||
- [Join the community](#community)
|
||||
- [Contributing](#contributing)
|
||||
- [Tools & Extensions](#tools--extensions)
|
||||
- [License](#license)
|
||||
- [Commercial Support](#commercial-support)
|
||||
- [Contributing](#contributing)
|
||||
- [Acknowledgement](#acknowledgement)
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether you’re operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
|
||||
**GreptimeDB** is an open-source, cloud-native, unified & cost-effective observability database for **Metrics**, **Logs**, and **Traces**. You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
|
||||
## Features
|
||||
## News
|
||||
|
||||
| Feature | Description |
|
||||
| --------- | ----------- |
|
||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
**[GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)**
|
||||
|
||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||
## Why GreptimeDB
|
||||
|
||||
## Quick Comparison
|
||||
Our core developers have been building observability data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
|
||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
||||
| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
|
||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
||||
| Integration | REST, SQL, Common protocols | Varies | Varies |
|
||||
* **Unified Processing of Observability Data**
|
||||
|
||||
**Performance:**
|
||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
||||
* [TSBS Benchmark](https://github.com/GreptimeTeam/greptimedb/tree/main/docs/benchmarks/tsbs)
|
||||
A unified database that treats metrics, logs, and traces as timestamped wide events with context, supporting [SQL](https://docs.greptime.com/user-guide/query-data/sql)/[PromQL](https://docs.greptime.com/user-guide/query-data/promql) queries and [stream processing](https://docs.greptime.com/user-guide/flow-computation/overview) to simplify complex data stacks.
|
||||
|
||||
Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/features-that-you-concern#how-is-greptimedbs-performance-compared-to-other-solutions).
|
||||
* **High Performance and Cost-effective**
|
||||
|
||||
## Architecture
|
||||
Written in Rust, combines a distributed query engine with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skip data, and vector) and optimized columnar storage to deliver sub-second responses on petabyte-scale data and high-cost efficiency.
|
||||
|
||||
* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
|
||||
* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
|
||||
* **Developer-Friendly**
|
||||
|
||||
Access standardized SQL/PromQL interfaces through built-in web dashboard, REST API, and MySQL/PostgreSQL protocols. Supports widely adopted data ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview) for seamless migration and integration.
|
||||
|
||||
* **Flexible Deployment Options**
|
||||
|
||||
Deploy GreptimeDB anywhere from ARM-based edge devices to cloud environments with unified APIs and bandwidth-efficient data synchronization. Query edge and cloud data seamlessly through identical APIs. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
### 1. [Live Demo](https://greptime.com/playground)
|
||||
|
||||
Experience GreptimeDB directly in your browser.
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||
|
||||
Start instantly with a free cluster.
|
||||
|
||||
### 3. Docker (Local Quickstart)
|
||||
### 3. Docker Image
|
||||
|
||||
To install GreptimeDB locally, the recommended way is via Docker:
|
||||
|
||||
```shell
|
||||
docker pull greptime/greptimedb
|
||||
```
|
||||
|
||||
Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
|
||||
-v "$(pwd)/greptimedb:./greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
@@ -129,90 +124,114 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
--mysql-addr 0.0.0.0:4002 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
```
|
||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
|
||||
|
||||
**Troubleshooting:**
|
||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
||||
* Failed to start? Check the container logs with `docker logs greptime` for further details.
|
||||
Access the dashboard via `http://localhost:4000/dashboard`.
|
||||
|
||||
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
- [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
- [Demo Scenes](https://github.com/GreptimeTeam/demo-scene)
|
||||
- [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
* [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
* [Demos](https://github.com/GreptimeTeam/demo-scene)
|
||||
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
|
||||
## Build From Source
|
||||
## Build
|
||||
|
||||
Check the prerequisite:
|
||||
|
||||
**Prerequisites:**
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||
* Python toolchain (optional): Required only if using some test scripts.
|
||||
|
||||
**Build and Run:**
|
||||
```bash
|
||||
Build GreptimeDB binary:
|
||||
|
||||
```shell
|
||||
make
|
||||
```
|
||||
|
||||
Run a standalone server:
|
||||
|
||||
```shell
|
||||
cargo run -- standalone start
|
||||
```
|
||||
|
||||
## Tools & Extensions
|
||||
|
||||
- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
||||
- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
|
||||
- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||
- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
||||
### Kubernetes
|
||||
|
||||
- [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
|
||||
### Dashboard
|
||||
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
|
||||
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
|
||||
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
|
||||
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
|
||||
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||
|
||||
### Grafana Dashboard
|
||||
|
||||
Our official Grafana dashboard for monitoring GreptimeDB is available at [grafana](grafana/README.md) directory.
|
||||
|
||||
## Project Status
|
||||
|
||||
> **Status:** Beta.
|
||||
> **GA (v1.0):** Targeted for mid 2025.
|
||||
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||
|
||||
- Being used in production by early adopters
|
||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
||||
- Suitable for evaluation and pilot deployments
|
||||
While in Beta, GreptimeDB is already:
|
||||
|
||||
* Being used in production by early adopters
|
||||
* Actively maintained with regular releases, [about version number](https://docs.greptime.com/nightly/reference/about-greptimedb-version)
|
||||
* Suitable for testing and evaluation
|
||||
|
||||
For production use, we recommend using the latest stable release.
|
||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
||||
|
||||
If you find this project useful, a ⭐ would mean a lot to us!
|
||||
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
|
||||
|
||||
## Community
|
||||
|
||||
We invite you to engage and contribute!
|
||||
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||
community, please check out:
|
||||
|
||||
- [Slack](https://greptime.com/slack)
|
||||
- [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- [Official Website](https://greptime.com/)
|
||||
- [Blog](https://greptime.com/blogs/)
|
||||
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
||||
- [X (Twitter)](https://X.com/greptime)
|
||||
- [YouTube](https://www.youtube.com/@greptime)
|
||||
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- Greptime official [website](https://greptime.com)
|
||||
|
||||
## License
|
||||
In addition, you may:
|
||||
|
||||
GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt).
|
||||
- View our official [Blog](https://greptime.com/blogs/)
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
## Commercial Support
|
||||
|
||||
Running GreptimeDB in your organization?
|
||||
We offer enterprise add-ons, services, training, and consulting.
|
||||
[Contact us](https://greptime.com/contactus) for details.
|
||||
If you are running GreptimeDB OSS in your organization, we offer additional
|
||||
enterprise add-ons, installation services, training, and consulting. [Contact
|
||||
us](https://greptime.com/contactus) and we will reach out to you with more
|
||||
detail of our commercial license.
|
||||
|
||||
## License
|
||||
|
||||
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
||||
open contributions and allowing you to use the software however you want.
|
||||
|
||||
## Contributing
|
||||
|
||||
- Read our [Contribution Guidelines](https://github.com/GreptimeTeam/greptimedb/blob/main/CONTRIBUTING.md).
|
||||
- Explore [Internal Concepts](https://docs.greptime.com/contributor-guide/overview.html) and [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb).
|
||||
- Pick up a [good first issue](https://github.com/GreptimeTeam/greptimedb/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and join the #contributors [Slack](https://greptime.com/slack) channel.
|
||||
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
Special thanks to all contributors! See [AUTHORS.md](https://github.com/GreptimeTeam/greptimedb/blob/main/AUTHOR.md).
|
||||
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
|
||||
|
||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
||||
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
||||
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
|
||||
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
|
||||
134
config/config.md
134
config/config.md
@@ -27,7 +27,6 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
@@ -100,7 +99,7 @@
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -123,7 +122,6 @@
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -147,7 +145,6 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -157,7 +154,6 @@
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -186,30 +182,26 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
@@ -232,12 +224,10 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -284,7 +274,6 @@
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
@@ -293,67 +282,49 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
||||
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||
| `slow_query.ttl` | String | `30d` | The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`. |
|
||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (only applicable for PostgreSQL/MySQL backends)<br/>When using PostgreSQL or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
@@ -381,29 +352,30 @@
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `30d` | TTL for the events table that will be used to store the events. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -426,7 +398,6 @@
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -452,8 +423,8 @@
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -469,7 +440,7 @@
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -492,7 +463,6 @@
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -516,7 +486,6 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -526,7 +495,6 @@
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -555,23 +523,26 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Flownode
|
||||
@@ -581,16 +552,6 @@
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `flow.batching_mode` | -- | -- | -- |
|
||||
| `flow.batching_mode.query_timeout` | String | `600s` | The default batching engine query timeout is 10 minutes. |
|
||||
| `flow.batching_mode.slow_query_threshold` | String | `60s` | will output a warn log for any query that runs for more that this threshold |
|
||||
| `flow.batching_mode.experimental_min_refresh_duration` | String | `5s` | The minimum duration between two queries execution by batching mode task |
|
||||
| `flow.batching_mode.grpc_conn_timeout` | String | `5s` | The gRPC connection timeout |
|
||||
| `flow.batching_mode.experimental_grpc_max_retries` | Integer | `3` | The gRPC max retry number |
|
||||
| `flow.batching_mode.experimental_frontend_scan_timeout` | String | `30s` | Flow wait for available frontend timeout,<br/>if failed to find available frontend after frontend_scan_timeout elapsed, return error<br/>which prevent flownode from starting |
|
||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -618,16 +579,15 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
@@ -44,13 +44,6 @@ runtime_size = 8
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
## Compression mode for datanode side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
@@ -129,11 +122,11 @@ dir = "./greptimedb_data/wal"
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a purge.
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a purge.
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "1m"
|
||||
|
||||
@@ -259,7 +252,7 @@ parallelism = 0
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -367,10 +360,6 @@ timeout = "30s"
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
## To skip the ssl verification
|
||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
||||
skip_ssl_validation = false
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
@@ -474,9 +463,6 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 128
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -513,9 +499,6 @@ content_cache_size = "128MiB"
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## Cache size for index result.
|
||||
result_cache_size = "128MiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
@@ -632,7 +615,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -643,25 +626,43 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## The slow query log options.
|
||||
[logging.slow_query]
|
||||
## Whether to enable slow query log.
|
||||
enable = false
|
||||
|
||||
## The threshold of slow query.
|
||||
## @toml2docs:none-default
|
||||
threshold = "10s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||
## @toml2docs:none-default
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
@@ -672,11 +673,3 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -7,29 +7,6 @@ node_id = 14
|
||||
## The number of flow worker in flownode.
|
||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||
#+num_workers=0
|
||||
[flow.batching_mode]
|
||||
## The default batching engine query timeout is 10 minutes.
|
||||
#+query_timeout="600s"
|
||||
## will output a warn log for any query that runs for more that this threshold
|
||||
#+slow_query_threshold="60s"
|
||||
## The minimum duration between two queries execution by batching mode task
|
||||
#+experimental_min_refresh_duration="5s"
|
||||
## The gRPC connection timeout
|
||||
#+grpc_conn_timeout="5s"
|
||||
## The gRPC max retry number
|
||||
#+experimental_grpc_max_retries=3
|
||||
## Flow wait for available frontend timeout,
|
||||
## if failed to find available frontend after frontend_scan_timeout elapsed, return error
|
||||
## which prevent flownode from starting
|
||||
#+experimental_frontend_scan_timeout="30s"
|
||||
## Frontend activity timeout
|
||||
## if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,
|
||||
## it will be removed from the list that flownode use to connect
|
||||
#+experimental_frontend_activity_timeout="60s"
|
||||
## Maximum number of filters allowed in a single query
|
||||
#+experimental_max_filter_num_per_query=20
|
||||
## Time window merge distance
|
||||
#+experimental_time_window_merge_threshold=3
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
@@ -106,7 +83,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -117,30 +94,27 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The slow query log options.
|
||||
[logging.slow_query]
|
||||
## Whether to enable slow query log.
|
||||
enable = false
|
||||
|
||||
## The threshold of slow query.
|
||||
## @toml2docs:none-default
|
||||
threshold = "10s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||
## @toml2docs:none-default
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
[query]
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -37,12 +37,6 @@ enable_cors = true
|
||||
## Customize allowed origins for HTTP CORS.
|
||||
## @toml2docs:none-default
|
||||
cors_allowed_origins = ["https://example.com"]
|
||||
## Whether to enable validation for Prometheus remote write requests.
|
||||
## Available options:
|
||||
## - strict: deny invalid UTF-8 strings (default).
|
||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
||||
## - unchecked: do not valid strings.
|
||||
prom_validation_mode = "strict"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
@@ -54,13 +48,6 @@ bind_addr = "127.0.0.1:4001"
|
||||
server_addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
@@ -197,9 +184,6 @@ metadata_cache_tti = "5m"
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
## Whether to allow query fallback when push down optimize fails.
|
||||
## Default to false, meaning when push down optimize failed, return error msg
|
||||
allow_query_fallback = false
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
@@ -221,7 +205,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -232,9 +216,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -242,34 +223,36 @@ otlp_export_protocol = "http"
|
||||
default_ratio = 1.0
|
||||
|
||||
## The slow query log options.
|
||||
[slow_query]
|
||||
[logging.slow_query]
|
||||
## Whether to enable slow query log.
|
||||
enable = true
|
||||
enable = false
|
||||
|
||||
## The record type of slow queries. It can be `system_table` or `log`.
|
||||
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.
|
||||
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`.
|
||||
record_type = "system_table"
|
||||
## The threshold of slow query.
|
||||
## @toml2docs:none-default
|
||||
threshold = "10s"
|
||||
|
||||
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`.
|
||||
threshold = "30s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||
## @toml2docs:none-default
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`.
|
||||
ttl = "30d"
|
||||
|
||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
@@ -280,11 +263,3 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
|
||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
@@ -16,7 +24,6 @@ store_key_prefix = ""
|
||||
## - `etcd_store` (default value)
|
||||
## - `memory_store`
|
||||
## - `postgres_store`
|
||||
## - `mysql_store`
|
||||
backend = "etcd_store"
|
||||
|
||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||
@@ -43,11 +50,6 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The delay before starting region failure detection.
|
||||
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
|
||||
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
|
||||
region_failure_detector_initialization_delay = '10m'
|
||||
|
||||
## Whether to allow region failover on local WAL.
|
||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||
allow_region_failover_on_local_wal = false
|
||||
@@ -65,60 +67,6 @@ node_max_idle_time = "24hours"
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## TLS configuration for kv store backend (only applicable for PostgreSQL/MySQL backends)
|
||||
## When using PostgreSQL or MySQL as metadata store, you can configure TLS here
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
## - "prefer" (default) - Try TLS, fallback to plain
|
||||
## - "require" - Require TLS
|
||||
## - "verify_ca" - Require TLS and verify CA
|
||||
## - "verify_full" - Require TLS and verify hostname
|
||||
mode = "prefer"
|
||||
|
||||
## Path to client certificate file (for client authentication)
|
||||
## Like "/path/to/client.crt"
|
||||
cert_path = ""
|
||||
|
||||
## Path to client private key file (for client authentication)
|
||||
## Like "/path/to/client.key"
|
||||
key_path = ""
|
||||
|
||||
## Path to CA certificate file (for server certificate verification)
|
||||
## Required when using custom CAs or self-signed certificates
|
||||
## Leave empty to use system root certificates only
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
|
||||
@@ -240,11 +188,6 @@ create_topic_timeout = "30s"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events.
|
||||
ttl = "30d"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
@@ -258,7 +201,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -269,25 +212,43 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## The slow query log options.
|
||||
[logging.slow_query]
|
||||
## Whether to enable slow query log.
|
||||
enable = false
|
||||
|
||||
## The threshold of slow query.
|
||||
## @toml2docs:none-default
|
||||
threshold = "10s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||
## @toml2docs:none-default
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
@@ -298,11 +259,3 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -43,13 +43,6 @@ enable_cors = true
|
||||
## @toml2docs:none-default
|
||||
cors_allowed_origins = ["https://example.com"]
|
||||
|
||||
## Whether to enable validation for Prometheus remote write requests.
|
||||
## Available options:
|
||||
## - strict: deny invalid UTF-8 strings (default).
|
||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
||||
## - unchecked: do not valid strings.
|
||||
prom_validation_mode = "strict"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -350,7 +343,7 @@ parallelism = 0
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -458,10 +451,6 @@ timeout = "30s"
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
## To skip the ssl verification
|
||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
||||
skip_ssl_validation = false
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
@@ -565,9 +554,6 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 128
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -604,9 +590,6 @@ content_cache_size = "128MiB"
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## Cache size for index result.
|
||||
result_cache_size = "128MiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
@@ -723,7 +706,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -734,9 +717,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -744,27 +724,25 @@ otlp_export_protocol = "http"
|
||||
default_ratio = 1.0
|
||||
|
||||
## The slow query log options.
|
||||
[slow_query]
|
||||
[logging.slow_query]
|
||||
## Whether to enable slow query log.
|
||||
#+ enable = false
|
||||
|
||||
## The record type of slow queries. It can be `system_table` or `log`.
|
||||
## @toml2docs:none-default
|
||||
#+ record_type = "system_table"
|
||||
enable = false
|
||||
|
||||
## The threshold of slow query.
|
||||
## @toml2docs:none-default
|
||||
#+ threshold = "10s"
|
||||
threshold = "10s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||
## @toml2docs:none-default
|
||||
#+ sample_ratio = 1.0
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
@@ -775,7 +753,7 @@ write_interval = "30s"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
@@ -786,11 +764,3 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
75
cyborg/bin/bump-doc-version.ts
Normal file
75
cyborg/bin/bump-doc-version.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
async function triggerWorkflow(workflowId: string, version: string) {
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
try {
|
||||
await docsClient.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: "docs",
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function determineWorkflow(version: string): [string, string] {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
try {
|
||||
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||
triggerWorkflow(workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing version: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
interface RepoConfig {
|
||||
tokenEnv: string;
|
||||
repo: string;
|
||||
workflowLogic: (version: string) => [string, string] | null;
|
||||
}
|
||||
|
||||
const REPO_CONFIGS: Record<string, RepoConfig> = {
|
||||
website: {
|
||||
tokenEnv: "WEBSITE_REPO_TOKEN",
|
||||
repo: "website",
|
||||
workflowLogic: (version: string) => {
|
||||
// Skip nightly versions for website
|
||||
if (version.includes('nightly')) {
|
||||
console.log('Nightly version detected for website, skipping workflow trigger.');
|
||||
return null;
|
||||
}
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
},
|
||||
demo: {
|
||||
tokenEnv: "DEMO_REPO_TOKEN",
|
||||
repo: "demo-scene",
|
||||
workflowLogic: (version: string) => {
|
||||
// Skip nightly versions for demo
|
||||
if (version.includes('nightly')) {
|
||||
console.log('Nightly version detected for demo, skipping workflow trigger.');
|
||||
return null;
|
||||
}
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
},
|
||||
docs: {
|
||||
tokenEnv: "DOCS_REPO_TOKEN",
|
||||
repo: "docs",
|
||||
workflowLogic: (version: string) => {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
|
||||
const client = obtainClient(repoConfig.tokenEnv);
|
||||
try {
|
||||
await client.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: repoConfig.repo,
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function processRepo(repoName: string, version: string) {
|
||||
const repoConfig = REPO_CONFIGS[repoName];
|
||||
if (!repoConfig) {
|
||||
throw new Error(`Unknown repository: ${repoName}`);
|
||||
}
|
||||
|
||||
try {
|
||||
const workflowResult = repoConfig.workflowLogic(version);
|
||||
if (workflowResult === null) {
|
||||
// Skip this repo (e.g., nightly version for website)
|
||||
return;
|
||||
}
|
||||
|
||||
const [workflowId, apiVersion] = workflowResult;
|
||||
await triggerWorkflow(repoConfig, workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
// Get target repositories from environment variable
|
||||
// Default to both if not specified
|
||||
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
|
||||
|
||||
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
|
||||
|
||||
const errors: string[] = [];
|
||||
|
||||
// Process each repository
|
||||
for (const repo of targetRepos) {
|
||||
try {
|
||||
await processRepo(repo, cleanVersion);
|
||||
} catch (error) {
|
||||
errors.push(`${repo}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('All repositories processed successfully');
|
||||
}
|
||||
|
||||
// Execute main function
|
||||
main().catch((error) => {
|
||||
core.setFailed(`Unexpected error: ${error.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -55,25 +55,12 @@ async function main() {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
|
||||
// Get available assignees for the docs repo
|
||||
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
})
|
||||
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
||||
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
||||
|
||||
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
||||
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
||||
core.info(`Assigning issue to: ${assignee}`)
|
||||
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: assignee,
|
||||
assignee: actor,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
|
||||
@@ -47,6 +47,4 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -47,6 +47,4 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -15,6 +15,4 @@ ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -18,6 +18,4 @@ ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 173 KiB |
@@ -48,4 +48,4 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
|
||||
|
||||
## Addition
|
||||
- You can tune GreptimeDB's configuration to get better performance.
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments-administration/configuration#storage-options).
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).
|
||||
|
||||
@@ -11,6 +11,6 @@ And database will reply with something like:
|
||||
Log Level changed from Some("info") to "trace,flow=debug"%
|
||||
```
|
||||
|
||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`.
|
||||
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
@@ -14,7 +14,7 @@ impl SqlQueryHandler for Instance {
|
||||
```
|
||||
|
||||
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
|
||||
will be fed into `StatementExecutor`:
|
||||
will be feed into `StatementExecutor`:
|
||||
|
||||
```rust
|
||||
// in Frontend Instance:
|
||||
@@ -27,7 +27,7 @@ an example.
|
||||
|
||||
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
|
||||
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
|
||||
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two modes (
|
||||
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
|
||||
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
|
||||
|
||||
Summarize as the diagram below:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Profile memory usage of GreptimeDB
|
||||
|
||||
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](./memory-profile-scripts/scripts).
|
||||
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](docs/how-to/memory-profile-scripts).
|
||||
|
||||
## Prerequisites
|
||||
### jemalloc
|
||||
@@ -30,23 +30,6 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
```bash
|
||||
@@ -57,31 +40,10 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
|
||||
```bash
|
||||
# Check current profiling status
|
||||
curl -X GET localhost:4000/debug/prof/mem/status
|
||||
|
||||
# Activate heap profiling (if not already active)
|
||||
curl -X POST localhost:4000/debug/prof/mem/activate
|
||||
|
||||
# Deactivate heap profiling
|
||||
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||
```
|
||||
|
||||
### Dump memory profiling data
|
||||
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
```bash
|
||||
curl -X POST localhost:4000/debug/prof/mem > greptime.hprof
|
||||
# or output flamegraph directly
|
||||
curl -X POST "localhost:4000/debug/prof/mem?output=flamegraph" > greptime.svg
|
||||
# or output pprof format
|
||||
curl -X POST "localhost:4000/debug/prof/mem?output=proto" > greptime.pprof
|
||||
```
|
||||
|
||||
You can periodically dump profiling data and compare them to find the delta memory usage.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/datafusion/tree/main/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
|
||||
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
|
||||
|
||||
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's preferred way to write UDAF.)
|
||||
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's prefer way to write UDAF.)
|
||||
|
||||
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we find a way to bypass DataFusion's restriction, a restriction that DataFusion simply doesn't pass the input data's type when creating an Accumulator.
|
||||
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we found a way to bypassing DataFusion's restriction, a restriction that DataFusion simply don't pass the input data's type when creating an Accumulator.
|
||||
|
||||
> There's an example in `my_sum_udaf_example.rs`, take that as quick start.
|
||||
|
||||
@@ -16,7 +16,7 @@ You must first define a struct that will be used to create your accumulator. For
|
||||
struct MySumAccumulatorCreator {}
|
||||
```
|
||||
|
||||
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both be annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
|
||||
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
|
||||
|
||||
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
|
||||
|
||||
@@ -32,11 +32,11 @@ pub trait AggregateFunctionCreator: Send + Sync + Debug {
|
||||
|
||||
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
|
||||
|
||||
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, its state types are `i64` (for sum) and `u64` (for count).
|
||||
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, it's state types are `i64` (for sum) and `u64` (for count).
|
||||
|
||||
The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically.
|
||||
|
||||
# 2. Impl `Accumulator` trait for your accumulator.
|
||||
# 2. Impl `Accumulator` trait for you accumulator.
|
||||
|
||||
The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is:
|
||||
|
||||
@@ -49,7 +49,7 @@ pub trait Accumulator: Send + Sync + Debug {
|
||||
}
|
||||
```
|
||||
|
||||
The DataFusion basically executes aggregate like this:
|
||||
The DataFusion basically execute aggregate like this:
|
||||
|
||||
1. Partitioning all input data for aggregate. Create an accumulator for each part.
|
||||
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
|
||||
@@ -57,16 +57,16 @@ The DataFusion basically executes aggregate like this:
|
||||
4. Call `merge_batch` to merge all accumulator's internal state to one.
|
||||
5. Execute `evaluate` on the chosen one to get the final calculation result.
|
||||
|
||||
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
|
||||
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
|
||||
|
||||
# 3. Register your aggregate function to our query engine.
|
||||
|
||||
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
|
||||
|
||||
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to calculate, and so the count of the arguments is two.
|
||||
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
|
||||
|
||||
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.
|
||||
|
||||
# (Optional) 4. Make your aggregate function automatically registered.
|
||||
|
||||
If you've written a great aggregate function that wants to let everyone use it, you can make it automatically register to our query engine at start time. It's quick and simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.
|
||||
If you've written a great aggregate function that want to let everyone use it, you can make it automatically registered to our query engine at start time. It's quick simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverages deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
|
||||
@@ -76,7 +76,7 @@ pub trait CompactionStrategy {
|
||||
```
|
||||
|
||||
The most suitable compaction strategy for time-series scenario would be
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/managing/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
a hybrid strategy that combines time window compaction with size-tired compaction, just like [Cassandra](https://cassandra.apache.org/doc/latest/cassandra/operating/compaction/twcs.html) and [ScyllaDB](https://docs.scylladb.com/stable/architecture/compaction/compaction-strategies.html#time-window-compaction-strategy-twcs) does.
|
||||
|
||||
We can first group SSTs in level n into buckets according to some predefined time window. Within that window,
|
||||
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
|
||||
|
||||
@@ -28,7 +28,7 @@ In order to do those things while maintaining a low memory footprint, you need t
|
||||
- Greptime Flow's is built on top of [Hydroflow](https://github.com/hydro-project/hydroflow).
|
||||
- We have three choices for the Dataflow/Streaming process framework for our simple continuous aggregation feature:
|
||||
1. Based on the timely/differential dataflow crate that [materialize](https://github.com/MaterializeInc/materialize) based on. Later, it's proved too obscure for a simple usage, and is hard to customize memory usage control.
|
||||
2. Based on a simple dataflow framework that we write from ground up, like what [arroyo](https://www.arroyo.dev/) or [risingwave](https://www.risingwave.dev/) did, for example the core streaming logic of [arroyo](https://github.com/ArroyoSystems/arroyo/blob/master/crates/arroyo-datastream/src/lib.rs) only takes up to 2000 line of codes. However, it means maintaining another layer of dataflow framework, which might seem easy in the beginning, but I fear it might be too burdensome to maintain once we need more features.
|
||||
2. Based on a simple dataflow framework that we write from ground up, like what [arroyo](https://www.arroyo.dev/) or [risingwave](https://www.risingwave.dev/) did, for example the core streaming logic of [arroyo](https://github.com/ArroyoSystems/arroyo/blob/master/arroyo-datastream/src/lib.rs) only takes up to 2000 line of codes. However, it means maintaining another layer of dataflow framework, which might seem easy in the beginning, but I fear it might be too burdensome to maintain once we need more features.
|
||||
3. Based on a simple and lower level dataflow framework that someone else write, like [hydroflow](https://github.com/hydro-project/hydroflow), this approach combines the best of both worlds. Firstly, it boasts ease of comprehension and customization. Secondly, the dataflow framework offers precisely the necessary features for crafting uncomplicated single-node dataflow programs while delivering decent performance.
|
||||
|
||||
Hence, we choose the third option, and use a simple logical plan that's anagonistic to the underlying dataflow framework, as it only describe how the dataflow graph should be doing, not how it do that. And we built operator in hydroflow to execute the plan. And the result hydroflow graph is wrapped in a engine that only support data in/out and tick event to flush and compute the result. This provide a thin middle layer that's easy to maintain and allow switching to other dataflow framework if necessary.
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
---
|
||||
Feature Name: Repartition
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/6558
|
||||
Date: 2025-06-20
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method for repartitioning a table, to adjust the partition rule and data distribution.
|
||||
|
||||
# Motivation
|
||||
|
||||
With time passing, the data distribution and skew pattern of a table might change. We need a way to repartition the table to suit the new pattern.
|
||||
|
||||
# Details
|
||||
|
||||
Here is a rough workflow diagram of the entire repartition process, each step is described in detail below.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Frontend
|
||||
participant Metasrv
|
||||
participant Datanodes
|
||||
participant Region0 as Region 0
|
||||
|
||||
Frontend->>Frontend: Process request, validation etc.
|
||||
Frontend->>Metasrv: Submit procedure
|
||||
Metasrv->>Metasrv: Compute diff and generate migration plan
|
||||
Metasrv->>Metasrv: Allocate necessary region resources (with Paas)
|
||||
Metasrv->>Datanodes: Stop compaction and snapshot
|
||||
rect rgb(255, 225, 225)
|
||||
note over Frontend, Region0: No Ingestion Period
|
||||
Metasrv->>Frontend: Stop processing write requests
|
||||
Metasrv->>Metasrv: Update metadata
|
||||
Metasrv->>Frontend: Start processing read requests
|
||||
end
|
||||
Metasrv->>Datanodes: Update region rule, stage version changes from now on
|
||||
Region0->>Region0: Compute new manifests for all regions
|
||||
Region0->>Datanodes: Submit manifest changes
|
||||
Metasrv->>Datanodes: Recover compaction and snapshot, make staged changes visible
|
||||
|
||||
note over Frontend, Datanodes: Reload Cache
|
||||
Metasrv->>Metasrv: Release resources (with Paas)
|
||||
Metasrv->>Metasrv: Schedule optional compaction (to remote compactor)
|
||||
```
|
||||
|
||||
## Preprocessing
|
||||
|
||||
This phase is for static analysis of the new partition rule. The server can know whether the repartitioning is possible, how to do the repartitioning, and how much resources are needed.
|
||||
|
||||
In theory, the input and output partition rules for repartitioning can be completely unrelated. But in practice, to avoid a very large change set, we'll only allow two simple kinds of change. One splits one region into two regions (region split) and another merges two regions into one (region merge).
|
||||
|
||||
After validating the new partition rule using the same validation logic as table creation, we compute the difference between the old and new partition rules. The resulting diff may contain several independent groups of changes. During subsequent processing, each group of changes can be handled independently and can succeed or fail without affecting other groups or creating non-idempotently retryable scenarios.
|
||||
|
||||
Next, we generate a repartition plan for each group of changes. Each plan contains this information for all regions involved in that particular plan. And one target region will only be referenced by a single plan.
|
||||
|
||||
With those plans, we can determine the resource requirements for the repartition operation, where resources here primarily refer to Regions. Metasrv will coordinate with PaaS layer to pre-allocate the necessary regions at this stage. These new regions start completely empty, and their metadata and manifests will be populated during subsequent modification steps.
|
||||
|
||||
## Data Processing
|
||||
|
||||
This phase is primarily for region's change, including region's metadata (route table and the corresponding rule) and manifest.
|
||||
|
||||
Once we start processing one plan through a procedure, we'll first stop the region's compaction and snapshot. This is to avoid any states being removed due to compaction (which may removes old SST files) and snapshot (which may removes old manifest files).
|
||||
|
||||
Metasrv will trying to update the metadata of partition, or the region route table (related to `PartitionRuleManager`). This step is in the "no ingestion" scope, so no new data will be ingested. Since this won't take much time, the affection to the cluster is minimized. Metasrv will also update the region rule to corresponding regions on Datanodes.
|
||||
|
||||
Every regions and all the ingestion requests to the region server will have a version of region rule, to identify under which rule the request is processed. The version can be something like `hash(region_rule)`. Once the region rule on region server is updated, all ingestion request with old rule will be rejected, and all requests with new rule will be accepted but not visible. They can still be flushed to persisted storage, but their version change (new manifest) will be staged.
|
||||
|
||||
Then region 0 (or let metasrv to pick any operational region) will compute the new manifests for all target regions. This step is done by first reading all old manifests, and remapping the files with new partition rule, to get the content of new manifests. Notice this step only handles the manifests before region rule change on region server, and won't touch those staged manifests, as they are already with the new rule.
|
||||
|
||||
Those new manifest will be submitted to the corresponding target regions by region 0 via a `RegionEdit` request. If this request falls after a few retries, region 0 will try to rollback this change by directly overwriting the manifest on object storage. and report this failure to metasrv and let the entire repartition procedure to fail. And we can also optionally compute the new manifest for those staged version changes (like another repartition) and submit them to the target regions to make the also visible even if the repartition fails.
|
||||
|
||||
In the other hand, a successful `RegionEdit` request also acknowledges those staged version changes and make them visible.
|
||||
|
||||
After this step, the repartition is done in the data plane. We can start to process compaction and snapshot again.
|
||||
|
||||
## Postprocessing
|
||||
|
||||
After the main processing is done, we can do some extra postprocessing to reduce the performance impact of repartition. Including reloading caches in frontend's route table, metasrv's kv cache and datanode's read/write/page cache etc.
|
||||
|
||||
We can also schedule an optional compaction to reorganize all the data file under the new partition rule to reduce potential fragmentation or read amplification.
|
||||
|
||||
## Procedure
|
||||
|
||||
Here describe the repartition procedure step by step:
|
||||
|
||||
- <on frontend> Validating repartition request
|
||||
- <on frontend> Initialize the repartition procedure
|
||||
- Calculate rule diff and repartition plan group
|
||||
- Allocate necessary new regions
|
||||
- Lock the table key
|
||||
- For each repartition subprocedure
|
||||
- Stop compaction and snapshot
|
||||
- Forbid new ingestion requests, update metadata, allow ingestion requests.
|
||||
- Update region rule to regions
|
||||
- Pick one region to calculate new manifest for all regions in this repartition group
|
||||
- Let that region to apply new manifest to each region via `RegionEdit`
|
||||
- If failed after some retries, revert this manifest change to other succeeded regions and mark this failure.
|
||||
- If all succeeded, acknowledge those staged version changes and make them visible.
|
||||
- Return result
|
||||
- Collect results from subprocedure.
|
||||
- For those who failed, we need to restart those regions to force reconstruct their status from manifests
|
||||
- For those who succeeded, collect and merge their rule diff
|
||||
- Unlock the table key
|
||||
- Report the result to user.
|
||||
- <in background> Reload cache
|
||||
- <in background> Maybe trigger a special compaction
|
||||
|
||||
In addition of sequential step, rollback is also an important part of this procedure. There are three steps can be rolled back when unrecoverable failure occurs.
|
||||
|
||||
If the metadata update is not committed, we can overwrite the metadata to previous version. This step is scoped in the "no ingestion" period, so no new data will be ingested and the status of both datanode and metasrv will be consistent.
|
||||
|
||||
If the `RegionEdit` to other regions is not acknowledged, or partial acknowledged, we can directly overwrite the manifest on object storage from the central region (who computes the new manifest), and force region server to reload corresponding region to load its state from object storage to recover.
|
||||
|
||||
If the staged version changes are not acknowledged, we can re-compute manifest based on old rule for staged data, and apply them directly like above. This is like another smaller repartition for those staged data.
|
||||
|
||||
## Region rule validation and diff calculation
|
||||
|
||||
In the current codebase, the rule checker is not complete. It can't check uniqueness and completeness of the rule. This RFC also propose a new way to validate the rule.
|
||||
|
||||
The proposed validation way is based on a check-point system, which first generates a group of check-points from the rule, and then check if all the point is covered and only covered by one rule.
|
||||
|
||||
All the partition rule expressionis limited to be the form of `<column> <operator> <value>`, and the operator is limited to be comparison operators. Those expressions are allowed to be nested with `AND` and `OR` operators. Based on this, we can first extract all the unique values on each column, adding and subtracting a little epsilon to cover its left and right boundary.
|
||||
|
||||
Since we accept integer, float and string as the value type, compute on them directly is not convenient. So we'll first normalize them to a common type and only need to preserve the relative partial ordering. This also avoids the problem of "what is next/previous value" of string and "what's a good precision" for float.
|
||||
|
||||
After normalization, we get a set of scatter points for each column. Then we can generate a set of check-points by combining all the scatter points like building a cartesian product. This might bring a large number of check-points, so we can do an prune optimization to remove some of them by merging some of the expression zones. Those expressions who have identical N-1 edge sub-expressions with one adjacent edge can be merged together. This prune check is with a time complexity of O(N * M * log(M)), where N is the number of active dimensions and M is the number of expression zones. Diff calculation is also done by finding different expression zones between the old and new rule set, and check if we can transform one to another by merging some of the expression zones.
|
||||
|
||||
The step to validate the check-points set against expressions can be treated as a tiny expression of `PhysicalExpr`. This evaluation will give a boolean matrix of K*M shape, where K is the number of check-points. We then check in each row of the matrix, if there is one and only one true value.
|
||||
|
||||
## Compute and use new manifest
|
||||
|
||||
We can generate a new set of manifest file based on old manifest and two versions of rule. From abvoe rule processing part, we can tell how a new rule & region is from previous one. So a simple way to get the new manifest is also apply the step of change to manifest files. E.g., if region A is from region B and C, we simply combine all file IDs from B and C to generate the content of A.
|
||||
|
||||
If necessary, we can do this better by involving some metadata related to data, like min-max statistics of each file, and pre-evaluate over min-max to filter out unneeded files when generating new manifest.
|
||||
|
||||
The way to use new manifest needs one more extra step based on the current implementation. We'll need to record either in manifest or in file metadata, of what rule is used when generating (flush or compaction) a SST file. Then in every single read request, we need to append the current region rule as predicate to the read request, to ensure no data belong to other regions will be read. We can use the stored region rule to reduce the number of new predicates to apply, by removing the identical predicate between the current region rule and the stored region rule. So ideally in a table that has not been repartitioned recently, the overhead of checking region rule is minimal.
|
||||
|
||||
## Pre-required tasks
|
||||
|
||||
In above steps, we assume some functionalities are implemented. Here list them with where they are used and how to implement them.
|
||||
|
||||
### Cross-region read
|
||||
|
||||
The current data directory structure is `{table_id}/{region_id}/[data/metadata]/{file_id}`, every region can only access files under their own directory. After repartition, data file may be placed in other previous old regions. So we need to support cross-region read. This new access method allows region to access any file under the same table. Related tracking issue is <https://github.com/GreptimeTeam/greptimedb/issues/6409>.
|
||||
|
||||
### Global GC worker
|
||||
|
||||
This is to simplify state management of data files. As one file may be referenced in multiple manifests, or no manifest at all. After this, every region and the repartition process only need to care about generateing and using new files, without tracking whether a file should be deleted or not. Leaving the deletion to the global GC worker. This worker basically works by counting reference from manifest file, and remove unused one. Related tracking issue is **TBD**.
|
||||
|
||||
# Alternatives
|
||||
|
||||
In the "Data Processing" section, we can enlarge the "no ingestion" period to include almost all the steps. This can simplify the entire procedure by a lot, but will bring a longer time of ingestion pause which may not be acceptable.
|
||||
@@ -1,151 +0,0 @@
|
||||
---
|
||||
Feature Name: Compatibility Test Framework
|
||||
Tracking Issue: TBD
|
||||
Date: 2025-07-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a compatibility test framework for GreptimeDB to ensure backward/forward compatibility for different versions of GreptimeDB.
|
||||
|
||||
# Motivation
|
||||
|
||||
In current practice, we don't have a systematic way to test and ensure the compatibility of different versions of GreptimeDB. Each time we release a new version, we need to manually test the compatibility with ad-hoc cases. This is not only time-consuming, but also prone to errors and unmaintainable. Highly rely on the release manager to ensure the compatibility of different versions of GreptimeDB.
|
||||
|
||||
We don't have a detailed guide on the release SoP of how to test and ensure the compatibility of the new version. And has broken the compatibility of the new version many times (`v0.14.1` and `v0.15.1` are two examples, which are both released right after the major release).
|
||||
|
||||
# Details
|
||||
|
||||
This RFC proposes a compatibility test framework that is easy to maintain, extend and run. It can tell the compatibility between any given two versions of GreptimeDB, both backward and forward. It's based on the Sqlness library but used in a different way.
|
||||
|
||||
Generally speaking, the framework is composed of two parts:
|
||||
|
||||
1. Test cases: A set of test cases that are maintained dedicatedly for the compatibility test. Still in the `.sql` and `.result` format.
|
||||
2. Test framework: A new sqlness runner that is used to run the test cases. With some new features that is not required by the integration sqlness test.
|
||||
|
||||
## Test Cases
|
||||
|
||||
### Structure
|
||||
|
||||
The case set is organized in three parts:
|
||||
|
||||
- `1.feature`: Use a new feature
|
||||
- `2.verify`: Verify database behavior
|
||||
- `3.cleanup`: Paired with `1.feature`, cleanup the test environment.
|
||||
|
||||
These three parts are organized in a tree structure, and should be run in sequence:
|
||||
|
||||
```
|
||||
compatibility_test/
|
||||
├── 1.feature/
|
||||
│ ├── feature-a/
|
||||
│ ├── feature-b/
|
||||
│ └── feature-c/
|
||||
├── 2.verify/
|
||||
│ ├── verify-metadata/
|
||||
│ ├── verify-data/
|
||||
│ └── verify-schema/
|
||||
└── 3.cleanup/
|
||||
├── cleanup-a/
|
||||
├── cleanup-b/
|
||||
└── cleanup-c/
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
For example, for a new feature like adding new index option ([#6416](https://github.com/GreptimeTeam/greptimedb/pull/6416)), we (who implement the feature) create a new test case like this:
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/1.feature/index-option/granularity_and_false_positive_rate.sql
|
||||
|
||||
-- SQLNESS ARG since=0.15.0
|
||||
-- SQLNESS IGNORE_RESULT
|
||||
CREATE TABLE granularity_and_false_positive_rate (ts timestamp time index, val double) with ("index.granularity" = "8192", "index.false_positive_rate" = "0.01");
|
||||
```
|
||||
|
||||
And
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/3.cleanup/index-option/granularity_and_false_positive_rate.sql
|
||||
drop table granularity_and_false_positive_rate;
|
||||
```
|
||||
|
||||
Since this new feature don't require some special way to verify the database behavior, we can reuse existing test cases in `2.verify/` to verify the database behavior. For example, we can reuse the `verify-metadata` test case to verify the metadata of the table.
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/2.verify/verify-metadata/show-create-table.sql
|
||||
|
||||
-- SQLNESS TEMPLATE TABLE="SHOW TABLES";
|
||||
SHOW CREATE TABLE $TABLE;
|
||||
```
|
||||
|
||||
In this example, we use some new sqlness features that will be introduced in the next section (`since`, `IGNORE_RESULT`, `TEMPLATE`).
|
||||
|
||||
### Maintenance
|
||||
|
||||
Each time implement a new feature that should be covered by the compatibility test, we should create a new test case in `1.feature/` and `3.cleanup/` for them. And check if existing cases in `2.verify/` can be reused to verify the database behavior.
|
||||
|
||||
This simulates an enthusiastic user who uses all the new features at the first time. All the new Maintenance burden is on the feature implementer to write one more test case for the new feature, to "fixation" the behavior. And once there is a breaking change in the future, it can be detected by the compatibility test framework automatically.
|
||||
|
||||
Another topic is about deprecation. If a feature is deprecated, we should also mark it in the test case. Still use above example, assume we deprecate the `index.granularity` and `index.false_positive_rate` index options in `v0.99.0`, we can mark them as:
|
||||
```sql
|
||||
-- SQLNESS ARG since=0.15.0 till=0.99.0
|
||||
...
|
||||
```
|
||||
|
||||
This tells the framework to ignore this feature in version `v0.99.0` and later. Currently, we have so many experimental features that are scheduled to be broken in the future, this is a good way to mark them.
|
||||
|
||||
## Test Framework
|
||||
|
||||
This section is about new sqlness features required by this framework.
|
||||
|
||||
### Since and Till
|
||||
|
||||
Follows the `ARG` interceptor in sqlness, we can mark a feature is available between two given versions. Only the `since` is required:
|
||||
|
||||
```sql
|
||||
-- SQLNESS ARG since=VERSION_STRING [till=VERSION_STRING]
|
||||
```
|
||||
|
||||
### IGNORE_RESULT
|
||||
|
||||
`IGNORE_RESULT` is a new interceptor, it tells the runner to ignore the result of the query, only check whether the query is executed successfully.
|
||||
|
||||
This is useful to reduce the Maintenance burden of the test cases, unlike the integration sqlness test, in most cases we don't care about the result of the query, only need to make sure the query is executed successfully.
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` is another new interceptor, it can generate queries from a template based on a runtime data.
|
||||
|
||||
In above example, we need to run the `SHOW CREATE TABLE` query for all existing tables, so we can use the `TEMPLATE` interceptor to generate the query with a dynamic table list.
|
||||
|
||||
### RUNNER
|
||||
|
||||
There are also some extra requirement for the runner itself:
|
||||
|
||||
- It should run the test cases in sequence, first `1.feature/`, then `2.verify/`, and finally `3.cleanup/`.
|
||||
- It should be able to fetch required version automatically to finish the test.
|
||||
- It should handle the `since` and `till` properly.
|
||||
|
||||
On the `1.feature` phase, the runner needs to identify all features need to be tested by version number. And then restart with a new version (the `to` version) to run `2.verify/` and `3.cleanup/` phase.
|
||||
|
||||
## Test Report
|
||||
|
||||
Finally, we can run the compatibility test to verify the compatibility between any given two versions of GreptimeDB, for example:
|
||||
|
||||
```bash
|
||||
# check backward compatibility between v0.15.0 and v0.16.0 when releasing v0.16.0
|
||||
./sqlness run --from=0.15.0 --to=0.16.0
|
||||
|
||||
# check forward compatibility when downgrading from v0.15.0 to v0.13.0
|
||||
./sqlness run --from=0.15.0 --to=0.13.0
|
||||
```
|
||||
|
||||
We can also use a script to run the compatibility test for all the versions in a given range to give a quick report with all versions we need.
|
||||
|
||||
And we always bump the version in `Cargo.toml` to the next major release version, so the next major release version can be used as "latest" unpublished version for scenarios like local testing.
|
||||
|
||||
# Alternatives
|
||||
|
||||
There was a previous attempt to implement a compatibility test framework that was disabled due to some reasons [#3728](https://github.com/GreptimeTeam/greptimedb/issues/3728).
|
||||
20
flake.lock
generated
20
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745735608,
|
||||
"narHash": "sha256-L0jzm815XBFfF2wCFmR+M1CF+beIEFj6SxlqVKF59Ec=",
|
||||
"lastModified": 1742452566,
|
||||
"narHash": "sha256-sVuLDQ2UIWfXUBbctzrZrXM2X05YjX08K7XHMztt36E=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "c39a78eba6ed2a022cc3218db90d485077101496",
|
||||
"rev": "7d9ba794daf5e8cc7ee728859bc688d8e26d5f06",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,16 +41,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1748162331,
|
||||
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
|
||||
"lastModified": 1743576891,
|
||||
"narHash": "sha256-vXiKURtntURybE6FMNFAVpRPr8+e8KoLPrYs9TGuAKc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
|
||||
"rev": "44a69ed688786e98a101f02b712c313f1ade37ab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"ref": "nixos-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -65,11 +65,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1745694049,
|
||||
"narHash": "sha256-fxvRYH/tS7hGQeg9zCVh5RBcSWT+JGJet7RA8Ss+rC0=",
|
||||
"lastModified": 1742296961,
|
||||
"narHash": "sha256-gCpvEQOrugHWLimD1wTFOJHagnSEP6VYBDspq96Idu0=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "d8887c0758bbd2d5f752d5bd405d4491e90e7ed6",
|
||||
"rev": "15d87419f1a123d8f888d608129c3ce3ff8f13d4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "Development environment flake";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -21,7 +21,7 @@
|
||||
lib = nixpkgs.lib;
|
||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||
sha256 = "sha256-tJJr8oqX3YD+ohhPK7jlt/7kvKBnBqJVjYtoFr520d4=";
|
||||
sha256 = "sha256-i0Sh/ZFFsHlZ3oFZFc24qdk6Cd8Do8OPU4HJQsrKOeM=";
|
||||
};
|
||||
in
|
||||
{
|
||||
@@ -51,7 +51,6 @@
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
NIX_HARDENING_ENABLE = "";
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2,63 +2,30 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This repository contains Grafana dashboards for visualizing metrics and logs of GreptimeDB instances running in either cluster or standalone mode. **The Grafana version should be greater than 9.0**.
|
||||
This repository maintains the Grafana dashboards for GreptimeDB. It has two types of dashboards:
|
||||
|
||||
We highly recommend using the self-monitoring feature provided by [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator) to automatically collect metrics and logs from your GreptimeDB instances and store them in a dedicated GreptimeDB instance.
|
||||
- `cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
|
||||
- `standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
|
||||
|
||||
- **Metrics Dashboards**
|
||||
As the rapid development of GreptimeDB, the metrics may be changed, and please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||
|
||||
- `dashboards/metrics/cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/metrics/cluster/dashboard.md) for more details.
|
||||
**NOTE**:
|
||||
|
||||
- `dashboards/metrics/standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/metrics/standalone/dashboard.md) for more details.
|
||||
- The Grafana version should be greater than 9.0.
|
||||
|
||||
- **Logs Dashboard**
|
||||
- If you want to modify the dashboards, you only need to modify the `cluster/dashboard.json` and run the `make dashboards` command to generate the `standalone/dashboard.json` and other related files.
|
||||
|
||||
The `dashboards/logs/dashboard.json` provides a comprehensive Grafana dashboard for visualizing GreptimeDB logs. To utilize this dashboard effectively, you need to collect logs in JSON format from your GreptimeDB instances and store them in a dedicated GreptimeDB instance.
|
||||
To maintain the dashboards easily, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
|
||||
|
||||
For proper integration, the logs table must adhere to the following schema design with the table name `_gt_logs`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE IF NOT EXISTS `_gt_logs` (
|
||||
`pod_ip` STRING NULL,
|
||||
`namespace` STRING NULL,
|
||||
`cluster` STRING NULL,
|
||||
`file` STRING NULL,
|
||||
`module_path` STRING NULL,
|
||||
`level` STRING NULL,
|
||||
`target` STRING NULL,
|
||||
`role` STRING NULL,
|
||||
`pod` STRING NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM'),
|
||||
`message` STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', backend = 'bloom', case_sensitive = 'false'),
|
||||
`err` STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', backend = 'bloom', case_sensitive = 'false'),
|
||||
`timestamp` TIMESTAMP(9) NOT NULL,
|
||||
TIME INDEX (`timestamp`),
|
||||
PRIMARY KEY (`level`, `target`, `role`)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH (
|
||||
append_mode = 'true'
|
||||
)
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
As GreptimeDB evolves rapidly, metrics may change over time. We welcome your feedback and contributions to improve these dashboards 🤗
|
||||
|
||||
To modify the metrics dashboards, simply edit the `dashboards/metrics/cluster/dashboard.json` file and run the `make dashboards` command. This will automatically generate the updated `dashboards/metrics/standalone/dashboard.json` and other related files.
|
||||
|
||||
For easier dashboard maintenance, we utilize the [`dac`](https://github.com/zyy17/dac) tool to generate human-readable intermediate dashboards and documentation:
|
||||
|
||||
- `dashboards/metrics/cluster/dashboard.yaml`: The intermediate dashboard file for the GreptimeDB cluster.
|
||||
- `dashboards/metrics/standalone/dashboard.yaml`: The intermediate dashboard file for standalone GreptimeDB instances.
|
||||
- `cluster/dashboard.yaml`: The intermediate dashboard for the GreptimeDB cluster.
|
||||
- `standalone/dashboard.yaml`: The intermediate dashboard for the standalone GreptimeDB instance.
|
||||
|
||||
## Data Sources
|
||||
|
||||
The following data sources are used to fetch metrics and logs:
|
||||
There are two data sources for the dashboards to fetch the metrics:
|
||||
|
||||
- **`${metrics}`**: Prometheus data source for providing the GreptimeDB metrics.
|
||||
- **`${logs}`**: MySQL data source for providing the GreptimeDB logs.
|
||||
- **`${information_schema}`**: MySQL data source for providing the information schema of the current instance and used for the `overview` panel. It is the MySQL port of the current monitored instance.
|
||||
- **Prometheus**: Expose the metrics of GreptimeDB.
|
||||
- **Information Schema**: It is the MySQL port of the current monitored instance. The `overview` dashboard will use this datasource to show the information schema of the current instance.
|
||||
|
||||
## Instance Filters
|
||||
|
||||
@@ -76,14 +43,14 @@ And the legend will be like: `[{{instance}}]-[{{ pod }}]`.
|
||||
|
||||
## Deployment
|
||||
|
||||
### (Recommended) Helm Chart
|
||||
### Helm
|
||||
|
||||
If you use the [Helm Chart](https://github.com/GreptimeTeam/helm-charts) to deploy a GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
|
||||
If you use the Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy a GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
|
||||
|
||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/overview).
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
|
||||
|
||||
### Self-host Prometheus and import dashboards manually
|
||||
|
||||
@@ -118,5 +85,5 @@ The standalone GreptimeDB instance will collect metrics from your cluster, and t
|
||||
|
||||
3. **Import the dashboards based on your deployment scenario**
|
||||
|
||||
- **Cluster**: Import the `dashboards/metrics/cluster/dashboard.json` dashboard.
|
||||
- **Standalone**: Import the `dashboards/metrics/standalone/dashboard.json` dashboard.
|
||||
- **Cluster**: Import the `cluster/dashboard.json` dashboard.
|
||||
- **Standalone**: Import the `standalone/dashboard.json` dashboard.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -46,7 +46,6 @@
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
|
||||
# Mito Engine
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -60,7 +59,7 @@
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))`<br/>`sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
@@ -68,41 +67,26 @@
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"read\|Reader::read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation=~"read\|Reader::read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"write\|Writer::write\|Writer::close"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation =~ "Writer::write\|Writer::close\|write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `status-history` | Counter of region migration by source and destination | `prometheus` | -- | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `{{pod}}-{{state}}-{{error_type}}` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `binBps` | `Datanode-{{datanode_id}}-writeload` |
|
||||
| Rate of SQL Executions (RDS) | `rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])` | `timeseries` | Displays the rate of SQL executions processed by the Meta service using the RDS backend. | `prometheus` | `none` | `{{pod}} {{op}} {{type}} {{result}} ` |
|
||||
| SQL Execution Latency (RDS) | `histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))` | `timeseries` | Measures the response time of SQL executions via the RDS backend. | `prometheus` | `ms` | `{{pod}} {{op}} {{type}} {{result}} p90` |
|
||||
| Handler Execution Latency | `histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))` | `timeseries` | Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.<br/> | `prometheus` | `s` | `{{pod}} {{name}} p90` |
|
||||
| Heartbeat Packet Size | `histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))` | `timeseries` | Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.<br/> | `prometheus` | `bytes` | `{{pod}}` |
|
||||
| Meta Heartbeat Receive Rate | `rate(greptime_meta_heartbeat_rate[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}` |
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -180,18 +180,13 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -202,26 +197,16 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -232,26 +217,16 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -262,26 +237,16 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -292,11 +257,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -411,21 +371,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
|
||||
- title: 'Frontend Handle Bulk Insert Elapsed Time '
|
||||
type: timeseries
|
||||
description: Per-stage time for frontend to handle bulk insert requests
|
||||
unit: s
|
||||
queries:
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- title: Mito Engine
|
||||
panels:
|
||||
- title: Request OPS per Instance
|
||||
@@ -527,7 +472,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{pod}}]'
|
||||
- title: Compaction Elapsed Time per Instance by Stage
|
||||
- title: Compaction P99 per Instance by Stage
|
||||
type: timeseries
|
||||
description: Compaction latency by stage
|
||||
unit: s
|
||||
@@ -537,11 +482,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
|
||||
- title: Compaction P99 per Instance
|
||||
type: timeseries
|
||||
description: Compaction P99 per Instance.
|
||||
@@ -622,75 +562,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]'
|
||||
- title: Compaction Input/Output Bytes
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-input'
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-output'
|
||||
- title: Region Worker Handle Bulk Insert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Active Series and Field Builders Count
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: none
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-series'
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
|
||||
- title: Region Worker Convert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to decode requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
@@ -708,41 +579,41 @@ groups:
|
||||
description: Read QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"read|Reader::read"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
- title: Read P99 per Instance
|
||||
type: timeseries
|
||||
description: Read P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation=~"read|Reader::read"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
- title: Write QPS per Instance
|
||||
type: timeseries
|
||||
description: Write QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation=~"write|Writer::write|Writer::close"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
- title: Write P99 per Instance
|
||||
type: timeseries
|
||||
description: Write P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation =~ "Writer::write|Writer::close|write"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
- title: List QPS per Instance
|
||||
type: timeseries
|
||||
description: List QPS per Instance.
|
||||
@@ -778,7 +649,7 @@ groups:
|
||||
description: Other Request P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read|write|list|Writer::write|Writer::close|Reader::read"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read|write|list"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -805,8 +676,9 @@ groups:
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
type: status-history
|
||||
type: state-timeline
|
||||
description: Counter of region migration by source and destination
|
||||
unit: none
|
||||
queries:
|
||||
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
|
||||
datasource:
|
||||
@@ -827,127 +699,17 @@ groups:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{state}}-{{error_type}}'
|
||||
legendFormat: __auto
|
||||
- title: Datanode load
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: binBps
|
||||
unit: none
|
||||
queries:
|
||||
- expr: greptime_datanode_load
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: Datanode-{{datanode_id}}-writeload
|
||||
- title: Rate of SQL Executions (RDS)
|
||||
type: timeseries
|
||||
description: Displays the rate of SQL executions processed by the Meta service using the RDS backend.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} '
|
||||
- title: SQL Execution Latency (RDS)
|
||||
type: timeseries
|
||||
description: 'Measures the response time of SQL executions via the RDS backend. '
|
||||
unit: ms
|
||||
queries:
|
||||
- expr: histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} p90'
|
||||
- title: Handler Execution Latency
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: |-
|
||||
histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{name}} p90'
|
||||
- title: Heartbeat Packet Size
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta Heartbeat Receive Rate
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: rate(greptime_meta_heartbeat_rate[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta KV Ops Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: Rate of meta KV Ops
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: DDL Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateLogicalTables-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateView-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateFlow-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: DropTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
legendFormat: __auto
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
@@ -1,292 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 12,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "mysql",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 20,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"dedupStrategy": "none",
|
||||
"enableInfiniteScrolling": true,
|
||||
"enableLogDetails": true,
|
||||
"prettifyLogMessage": false,
|
||||
"showCommonLabels": false,
|
||||
"showLabels": false,
|
||||
"showTime": true,
|
||||
"sortOrder": "Descending",
|
||||
"wrapLogMessage": false
|
||||
},
|
||||
"pluginVersion": "11.6.0",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "greptime_private",
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT `timestamp`, CONCAT('[', `level`, ']', ' ', '<', `target`, '>', ' ', `message`),\n `role`,\n `pod`,\n `pod_ip`,\n `namespace`,\n `cluster`,\n `err`,\n `file`,\n `module_path`\nFROM\n `_gt_logs`\nWHERE\n (\n \"$level\" = \"'all'\"\n OR `level` IN ($level)\n ) \n AND (\n \"$role\" = \"'all'\"\n OR `role` IN ($role)\n )\n AND (\n \"$pod\" = \"\"\n OR `pod` = '$pod'\n )\n AND (\n \"$target\" = \"\"\n OR `target` = '$target'\n )\n AND (\n \"$search\" = \"\"\n OR matches_term(`message`, '$search')\n )\n AND (\n \"$exclude\" = \"\"\n OR NOT matches_term(`message`, '$exclude')\n )\n AND $__timeFilter(`timestamp`)\nORDER BY `timestamp` DESC\nLIMIT $limit;\n",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Logs",
|
||||
"type": "logs"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"refresh": "",
|
||||
"schemaVersion": 41,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"text": "logs",
|
||||
"value": "P98F38F12DB221A8C"
|
||||
},
|
||||
"includeAll": false,
|
||||
"name": "datasource",
|
||||
"options": [],
|
||||
"query": "mysql",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"type": "datasource"
|
||||
},
|
||||
{
|
||||
"allValue": "'all'",
|
||||
"current": {
|
||||
"text": [
|
||||
"$__all"
|
||||
],
|
||||
"value": [
|
||||
"$__all"
|
||||
]
|
||||
},
|
||||
"includeAll": true,
|
||||
"label": "level",
|
||||
"multi": true,
|
||||
"name": "level",
|
||||
"options": [
|
||||
{
|
||||
"selected": false,
|
||||
"text": "INFO",
|
||||
"value": "INFO"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "ERROR",
|
||||
"value": "ERROR"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "WARN",
|
||||
"value": "WARN"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "DEBUG",
|
||||
"value": "DEBUG"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "TRACE",
|
||||
"value": "TRACE"
|
||||
}
|
||||
],
|
||||
"query": "INFO,ERROR,WARN,DEBUG,TRACE",
|
||||
"type": "custom"
|
||||
},
|
||||
{
|
||||
"allValue": "'all'",
|
||||
"current": {
|
||||
"text": [
|
||||
"$__all"
|
||||
],
|
||||
"value": [
|
||||
"$__all"
|
||||
]
|
||||
},
|
||||
"includeAll": true,
|
||||
"label": "role",
|
||||
"multi": true,
|
||||
"name": "role",
|
||||
"options": [
|
||||
{
|
||||
"selected": false,
|
||||
"text": "datanode",
|
||||
"value": "datanode"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "frontend",
|
||||
"value": "frontend"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "meta",
|
||||
"value": "meta"
|
||||
}
|
||||
],
|
||||
"query": "datanode,frontend,meta",
|
||||
"type": "custom"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"text": "",
|
||||
"value": ""
|
||||
},
|
||||
"label": "pod",
|
||||
"name": "pod",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "",
|
||||
"value": ""
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"type": "textbox"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"text": "",
|
||||
"value": ""
|
||||
},
|
||||
"label": "target",
|
||||
"name": "target",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "",
|
||||
"value": ""
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"type": "textbox"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"text": "",
|
||||
"value": ""
|
||||
},
|
||||
"label": "search",
|
||||
"name": "search",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "",
|
||||
"value": ""
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"type": "textbox"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"text": "",
|
||||
"value": ""
|
||||
},
|
||||
"label": "exclude",
|
||||
"name": "exclude",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "",
|
||||
"value": ""
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"type": "textbox"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"text": "2000",
|
||||
"value": "2000"
|
||||
},
|
||||
"includeAll": false,
|
||||
"label": "limit",
|
||||
"name": "limit",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "2000",
|
||||
"value": "2000"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "5000",
|
||||
"value": "5000"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "8000",
|
||||
"value": "8000"
|
||||
}
|
||||
],
|
||||
"query": "2000,5000,8000",
|
||||
"type": "custom"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "GreptimeDB Logs",
|
||||
"uid": "edx5veo4rd3wge2",
|
||||
"version": 1
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -46,7 +46,6 @@
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
|
||||
# Mito Engine
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -60,7 +59,7 @@
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))`<br/>`sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
@@ -68,41 +67,26 @@
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"read\|Reader::read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{operation=~"read\|Reader::read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"write\|Writer::write\|Writer::close"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation =~ "Writer::write\|Writer::close\|write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `status-history` | Counter of region migration by source and destination | `prometheus` | -- | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `{{pod}}-{{state}}-{{error_type}}` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `binBps` | `Datanode-{{datanode_id}}-writeload` |
|
||||
| Rate of SQL Executions (RDS) | `rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])` | `timeseries` | Displays the rate of SQL executions processed by the Meta service using the RDS backend. | `prometheus` | `none` | `{{pod}} {{op}} {{type}} {{result}} ` |
|
||||
| SQL Execution Latency (RDS) | `histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))` | `timeseries` | Measures the response time of SQL executions via the RDS backend. | `prometheus` | `ms` | `{{pod}} {{op}} {{type}} {{result}} p90` |
|
||||
| Handler Execution Latency | `histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))` | `timeseries` | Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.<br/> | `prometheus` | `s` | `{{pod}} {{name}} p90` |
|
||||
| Heartbeat Packet Size | `histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))` | `timeseries` | Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.<br/> | `prometheus` | `bytes` | `{{pod}}` |
|
||||
| Meta Heartbeat Receive Rate | `rate(greptime_meta_heartbeat_rate[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}` |
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -180,18 +180,13 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -202,26 +197,16 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -232,26 +217,16 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -262,26 +237,16 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: bytes
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -292,11 +257,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -411,21 +371,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
|
||||
- title: 'Frontend Handle Bulk Insert Elapsed Time '
|
||||
type: timeseries
|
||||
description: Per-stage time for frontend to handle bulk insert requests
|
||||
unit: s
|
||||
queries:
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- title: Mito Engine
|
||||
panels:
|
||||
- title: Request OPS per Instance
|
||||
@@ -527,7 +472,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{pod}}]'
|
||||
- title: Compaction Elapsed Time per Instance by Stage
|
||||
- title: Compaction P99 per Instance by Stage
|
||||
type: timeseries
|
||||
description: Compaction latency by stage
|
||||
unit: s
|
||||
@@ -537,11 +482,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
|
||||
- expr: sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
|
||||
- title: Compaction P99 per Instance
|
||||
type: timeseries
|
||||
description: Compaction P99 per Instance.
|
||||
@@ -622,75 +562,6 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]'
|
||||
- title: Compaction Input/Output Bytes
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-input'
|
||||
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-output'
|
||||
- title: Region Worker Handle Bulk Insert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Active Series and Field Builders Count
|
||||
type: timeseries
|
||||
description: Compaction oinput output bytes
|
||||
unit: none
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-series'
|
||||
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
|
||||
- title: Region Worker Convert Requests
|
||||
type: timeseries
|
||||
description: Per-stage elapsed time for region worker to decode requests.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
|
||||
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
@@ -708,41 +579,41 @@ groups:
|
||||
description: Read QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"read|Reader::read"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
- title: Read P99 per Instance
|
||||
type: timeseries
|
||||
description: Read P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{operation=~"read|Reader::read"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
- title: Write QPS per Instance
|
||||
type: timeseries
|
||||
description: Write QPS per Instance.
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{ operation=~"write|Writer::write|Writer::close"}[$__rate_interval]))
|
||||
- expr: sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-{{scheme}}'
|
||||
- title: Write P99 per Instance
|
||||
type: timeseries
|
||||
description: Write P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation =~ "Writer::write|Writer::close|write"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]'
|
||||
- title: List QPS per Instance
|
||||
type: timeseries
|
||||
description: List QPS per Instance.
|
||||
@@ -778,7 +649,7 @@ groups:
|
||||
description: Other Request P99 per Instance.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read|write|list|Writer::write|Writer::close|Reader::read"}[$__rate_interval])))
|
||||
- expr: histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read|write|list"}[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -805,8 +676,9 @@ groups:
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
type: status-history
|
||||
type: state-timeline
|
||||
description: Counter of region migration by source and destination
|
||||
unit: none
|
||||
queries:
|
||||
- expr: greptime_meta_region_migration_stat{datanode_type="src"}
|
||||
datasource:
|
||||
@@ -827,127 +699,17 @@ groups:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{state}}-{{error_type}}'
|
||||
legendFormat: __auto
|
||||
- title: Datanode load
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: binBps
|
||||
unit: none
|
||||
queries:
|
||||
- expr: greptime_datanode_load
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: Datanode-{{datanode_id}}-writeload
|
||||
- title: Rate of SQL Executions (RDS)
|
||||
type: timeseries
|
||||
description: Displays the rate of SQL executions processed by the Meta service using the RDS backend.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} '
|
||||
- title: SQL Execution Latency (RDS)
|
||||
type: timeseries
|
||||
description: 'Measures the response time of SQL executions via the RDS backend. '
|
||||
unit: ms
|
||||
queries:
|
||||
- expr: histogram_quantile(0.90, sum by(pod, op, type, result, le) (rate(greptime_meta_rds_pg_sql_execute_elapsed_ms_bucket[$__rate_interval])))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{op}} {{type}} {{result}} p90'
|
||||
- title: Handler Execution Latency
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows latency of Meta handlers by pod and handler name, useful for monitoring handler performance and detecting latency spikes.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: |-
|
||||
histogram_quantile(0.90, sum by(pod, le, name) (
|
||||
rate(greptime_meta_handler_execute_bucket[$__rate_interval])
|
||||
))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}} {{name}} p90'
|
||||
- title: Heartbeat Packet Size
|
||||
type: timeseries
|
||||
description: |
|
||||
Shows p90 heartbeat message sizes, helping track network usage and identify anomalies in heartbeat payload.
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(pod, le) (greptime_meta_heartbeat_stat_memory_size_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta Heartbeat Receive Rate
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: rate(greptime_meta_heartbeat_rate[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}'
|
||||
- title: Meta KV Ops Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: Rate of meta KV Ops
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: none
|
||||
queries:
|
||||
- expr: rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{op}} p99'
|
||||
- title: DDL Latency
|
||||
type: timeseries
|
||||
description: Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateLogicalTables-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateView-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: CreateFlow-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: DropTable-{{step}} p90
|
||||
- expr: histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
legendFormat: __auto
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DASHBOARD_DIR=${1:-grafana/dashboards/metrics}
|
||||
DASHBOARD_DIR=${1:-grafana/dashboards}
|
||||
|
||||
check_dashboard_description() {
|
||||
for dashboard in $(find $DASHBOARD_DIR -name "*.json"); do
|
||||
@@ -25,8 +25,8 @@ check_dashboard_description() {
|
||||
check_dashboards_generation() {
|
||||
./grafana/scripts/gen-dashboards.sh
|
||||
|
||||
if [[ -n "$(git diff --name-only grafana/dashboards/metrics)" ]]; then
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the 'make dashboards' command."
|
||||
if [[ -n "$(git diff --name-only grafana/dashboards)" ]]; then
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the `make dashboards` command."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/metrics/cluster}
|
||||
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/metrics/standalone}
|
||||
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/cluster}
|
||||
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/standalone}
|
||||
DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
|
||||
|
||||
remove_instance_filters() {
|
||||
# Remove the instance filters for the standalone dashboards.
|
||||
sed -E 's/instance=~\\"(\$datanode|\$frontend|\$metasrv|\$flownode)\\",?//g' "$CLUSTER_DASHBOARD_DIR/dashboard.json" > "$STANDALONE_DASHBOARD_DIR/dashboard.json"
|
||||
sed 's/instance=~\\"$datanode\\",//; s/instance=~\\"$datanode\\"//; s/instance=~\\"$frontend\\",//; s/instance=~\\"$frontend\\"//; s/instance=~\\"$metasrv\\",//; s/instance=~\\"$metasrv\\"//; s/instance=~\\"$flownode\\",//; s/instance=~\\"$flownode\\"//;' $CLUSTER_DASHBOARD_DIR/dashboard.json > $STANDALONE_DASHBOARD_DIR/dashboard.json
|
||||
}
|
||||
|
||||
generate_intermediate_dashboards_and_docs() {
|
||||
|
||||
@@ -26,17 +26,6 @@ excludes = [
|
||||
"src/common/base/src/secrets.rs",
|
||||
"src/servers/src/repeated_field.rs",
|
||||
"src/servers/src/http/test_helpers.rs",
|
||||
# enterprise
|
||||
"src/common/meta/src/rpc/ddl/trigger.rs",
|
||||
"src/operator/src/expr_helper/trigger.rs",
|
||||
"src/sql/src/statements/alter/trigger.rs",
|
||||
"src/sql/src/statements/create/trigger.rs",
|
||||
"src/sql/src/statements/show/trigger.rs",
|
||||
"src/sql/src/statements/drop/trigger.rs",
|
||||
"src/sql/src/parsers/alter_parser/trigger.rs",
|
||||
"src/sql/src/parsers/create_parser/trigger.rs",
|
||||
"src/sql/src/parsers/show_parser/trigger.rs",
|
||||
"src/mito2/src/extension.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2025-05-19"
|
||||
channel = "nightly-2025-04-15"
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
from multiprocessing import Pool
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
@@ -24,10 +24,6 @@ def find_rust_files(directory):
|
||||
if "test" in root.lower():
|
||||
continue
|
||||
|
||||
# Skip the target directory
|
||||
if "target" in Path(root).parts:
|
||||
continue
|
||||
|
||||
for file in files:
|
||||
# Skip files with "test" in the filename
|
||||
if "test" in file.lower():
|
||||
|
||||
149
scripts/install.sh
Normal file → Executable file
149
scripts/install.sh
Normal file → Executable file
@@ -53,54 +53,6 @@ get_arch_type() {
|
||||
esac
|
||||
}
|
||||
|
||||
# Verify SHA256 checksum
|
||||
verify_sha256() {
|
||||
file="$1"
|
||||
expected_sha256="$2"
|
||||
|
||||
if command -v sha256sum >/dev/null 2>&1; then
|
||||
actual_sha256=$(sha256sum "$file" | cut -d' ' -f1)
|
||||
elif command -v shasum >/dev/null 2>&1; then
|
||||
actual_sha256=$(shasum -a 256 "$file" | cut -d' ' -f1)
|
||||
else
|
||||
echo "Warning: No SHA256 verification tool found (sha256sum or shasum). Skipping checksum verification."
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ "$actual_sha256" = "$expected_sha256" ]; then
|
||||
echo "SHA256 checksum verified successfully."
|
||||
return 0
|
||||
else
|
||||
echo "Error: SHA256 checksum verification failed!"
|
||||
echo "Expected: $expected_sha256"
|
||||
echo "Actual: $actual_sha256"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Prompt for user confirmation (compatible with different shells)
|
||||
prompt_confirmation() {
|
||||
message="$1"
|
||||
printf "%s (y/N): " "$message"
|
||||
|
||||
# Try to read user input, fallback if read fails
|
||||
answer=""
|
||||
if read answer </dev/tty 2>/dev/null; then
|
||||
case "$answer" in
|
||||
[Yy]|[Yy][Ee][Ss])
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
else
|
||||
echo ""
|
||||
echo "Cannot read user input. Defaulting to No."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
download_artifact() {
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest stable released version.
|
||||
@@ -119,104 +71,17 @@ download_artifact() {
|
||||
fi
|
||||
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
PKG_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}"
|
||||
PACKAGE_NAME="${PKG_NAME}.tar.gz"
|
||||
SHA256_FILE="${PKG_NAME}.sha256sum"
|
||||
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
|
||||
|
||||
if [ -n "${PACKAGE_NAME}" ]; then
|
||||
# Check if files already exist and prompt for override
|
||||
if [ -f "${PACKAGE_NAME}" ]; then
|
||||
echo "File ${PACKAGE_NAME} already exists."
|
||||
if prompt_confirmation "Do you want to override it?"; then
|
||||
echo "Overriding existing file..."
|
||||
rm -f "${PACKAGE_NAME}"
|
||||
else
|
||||
echo "Skipping download. Using existing file."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "${BIN}" ]; then
|
||||
echo "Binary ${BIN} already exists."
|
||||
if prompt_confirmation "Do you want to override it?"; then
|
||||
echo "Will override existing binary..."
|
||||
rm -f "${BIN}"
|
||||
else
|
||||
echo "Installation cancelled."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Download package if not exists
|
||||
if [ ! -f "${PACKAGE_NAME}" ]; then
|
||||
echo "Downloading ${PACKAGE_NAME}..."
|
||||
# Use curl instead of wget for better compatibility
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
if ! curl -L -o "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
|
||||
echo "Error: Failed to download ${PACKAGE_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
if ! wget -O "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
|
||||
echo "Error: Failed to download ${PACKAGE_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Error: Neither curl nor wget is available for downloading."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Download and verify SHA256 checksum
|
||||
echo "Downloading SHA256 checksum..."
|
||||
sha256_download_success=0
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
if curl -L -s -o "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
|
||||
sha256_download_success=1
|
||||
fi
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
if wget -q -O "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
|
||||
sha256_download_success=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $sha256_download_success -eq 1 ] && [ -f "${SHA256_FILE}" ]; then
|
||||
expected_sha256=$(cat "${SHA256_FILE}" | cut -d' ' -f1)
|
||||
if [ -n "$expected_sha256" ]; then
|
||||
if ! verify_sha256 "${PACKAGE_NAME}" "${expected_sha256}"; then
|
||||
echo "SHA256 verification failed. Removing downloaded file."
|
||||
rm -f "${PACKAGE_NAME}" "${SHA256_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Warning: Could not parse SHA256 checksum from file."
|
||||
fi
|
||||
rm -f "${SHA256_FILE}"
|
||||
else
|
||||
echo "Warning: Could not download SHA256 checksum file. Skipping verification."
|
||||
fi
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
|
||||
|
||||
# Extract the binary and clean the rest.
|
||||
echo "Extracting ${PACKAGE_NAME}..."
|
||||
if ! tar xf "${PACKAGE_NAME}"; then
|
||||
echo "Error: Failed to extract ${PACKAGE_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find the binary in the extracted directory
|
||||
extracted_dir="${PACKAGE_NAME%.tar.gz}"
|
||||
if [ -f "${extracted_dir}/${BIN}" ]; then
|
||||
mv "${extracted_dir}/${BIN}" "${PWD}/"
|
||||
rm -f "${PACKAGE_NAME}"
|
||||
rm -rf "${extracted_dir}"
|
||||
chmod +x "${BIN}"
|
||||
echo "Installation completed successfully!"
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
else
|
||||
echo "Error: Binary ${BIN} not found in extracted archive"
|
||||
rm -f "${PACKAGE_NAME}"
|
||||
rm -rf "${extracted_dir}"
|
||||
exit 1
|
||||
fi
|
||||
tar xvf "${PACKAGE_NAME}" && \
|
||||
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
|
||||
rm -r "${PACKAGE_NAME}" && \
|
||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::any::Any;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
@@ -67,28 +66,12 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid time unit: {time_unit}"))]
|
||||
InvalidTimeUnit {
|
||||
time_unit: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Inconsistent time unit: {:?}", units))]
|
||||
InconsistentTimeUnit {
|
||||
units: Vec<TimeUnit>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. }
|
||||
| Error::InvalidTimeUnit { .. }
|
||||
| Error::InconsistentTimeUnit { .. } => StatusCode::InvalidArguments,
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
@@ -47,7 +46,7 @@ use greptime_proto::v1::{
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, InconsistentTimeUnitSnafu, InvalidTimeUnitSnafu, Result};
|
||||
use crate::error::{self, Result};
|
||||
use crate::v1::column::Values;
|
||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||
|
||||
@@ -292,7 +291,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
@@ -705,7 +703,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
ConcreteDataType::Vector(_) => Arc::new(BinaryVector::from_vec(values.binary_values)),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Json(_) => {
|
||||
@@ -867,7 +864,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
ConcreteDataType::Vector(_) => values.binary_values.into_iter().map(|v| v.into()).collect(),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Json(_) => {
|
||||
@@ -1054,7 +1050,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
Value::Int64(v) => Some(ValueData::I64Value(v)),
|
||||
Value::Float32(v) => Some(ValueData::F32Value(*v)),
|
||||
Value::Float64(v) => Some(ValueData::F64Value(*v)),
|
||||
Value::String(v) => Some(ValueData::StringValue(v.into_string())),
|
||||
Value::String(v) => Some(ValueData::StringValue(v.as_utf8().to_string())),
|
||||
Value::Binary(v) => Some(ValueData::BinaryValue(v.to_vec())),
|
||||
Value::Date(v) => Some(ValueData::DateValue(v.val())),
|
||||
Value::Timestamp(v) => Some(match v.unit() {
|
||||
@@ -1080,89 +1076,6 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_unit(unit: v1::TimeUnit) -> TimeUnit {
|
||||
match unit {
|
||||
v1::TimeUnit::Second => TimeUnit::Second,
|
||||
v1::TimeUnit::Millisecond => TimeUnit::Millisecond,
|
||||
v1::TimeUnit::Microsecond => TimeUnit::Microsecond,
|
||||
v1::TimeUnit::Nanosecond => TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_pb_time_unit(unit: TimeUnit) -> v1::TimeUnit {
|
||||
match unit {
|
||||
TimeUnit::Second => v1::TimeUnit::Second,
|
||||
TimeUnit::Millisecond => v1::TimeUnit::Millisecond,
|
||||
TimeUnit::Microsecond => v1::TimeUnit::Microsecond,
|
||||
TimeUnit::Nanosecond => v1::TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_ranges(time_ranges: v1::TimeRanges) -> Result<Vec<(Timestamp, Timestamp)>> {
|
||||
if time_ranges.time_ranges.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let proto_time_unit = v1::TimeUnit::try_from(time_ranges.time_unit).map_err(|_| {
|
||||
InvalidTimeUnitSnafu {
|
||||
time_unit: time_ranges.time_unit,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let time_unit = from_pb_time_unit(proto_time_unit);
|
||||
Ok(time_ranges
|
||||
.time_ranges
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
(
|
||||
Timestamp::new(r.start, time_unit),
|
||||
Timestamp::new(r.end, time_unit),
|
||||
)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// All time_ranges must be of the same time unit.
|
||||
///
|
||||
/// if input `time_ranges` is empty, it will return a default `TimeRanges` with `Millisecond` as the time unit.
|
||||
pub fn to_pb_time_ranges(time_ranges: &[(Timestamp, Timestamp)]) -> Result<v1::TimeRanges> {
|
||||
let is_same_time_unit = time_ranges.windows(2).all(|x| {
|
||||
x[0].0.unit() == x[1].0.unit()
|
||||
&& x[0].1.unit() == x[1].1.unit()
|
||||
&& x[0].0.unit() == x[0].1.unit()
|
||||
});
|
||||
|
||||
if !is_same_time_unit {
|
||||
let all_time_units: Vec<_> = time_ranges
|
||||
.iter()
|
||||
.map(|(s, e)| [s.unit(), e.unit()])
|
||||
.clone()
|
||||
.flatten()
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
InconsistentTimeUnitSnafu {
|
||||
units: all_time_units,
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
|
||||
let mut pb_time_ranges = v1::TimeRanges {
|
||||
// default time unit is Millisecond
|
||||
time_unit: v1::TimeUnit::Millisecond as i32,
|
||||
time_ranges: Vec::with_capacity(time_ranges.len()),
|
||||
};
|
||||
if let Some((start, _end)) = time_ranges.first() {
|
||||
pb_time_ranges.time_unit = to_pb_time_unit(start.unit()) as i32;
|
||||
}
|
||||
for (start, end) in time_ranges {
|
||||
pb_time_ranges.time_ranges.push(v1::TimeRange {
|
||||
start: start.value(),
|
||||
end: end.value(),
|
||||
});
|
||||
}
|
||||
Ok(pb_time_ranges)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -22,7 +22,6 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extensions: HashMap<String, Vec<u8>>,
|
||||
pub metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
@@ -30,7 +29,6 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extensions: region_response.extensions,
|
||||
metadata: region_response.metadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,16 +37,6 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response with metadata.
|
||||
pub fn from_metadata(metadata: Vec<u8>) -> Self {
|
||||
Self {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use greptime_proto::v1::{
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, ConvertColumnDefaultConstraintSnafu, Result};
|
||||
use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
|
||||
@@ -77,48 +77,6 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnDef` from the given `ColumnSchema`.
|
||||
///
|
||||
/// TODO(weny): Add tests for this function.
|
||||
pub fn try_as_column_def(column_schema: &ColumnSchema, is_primary_key: bool) -> Result<ColumnDef> {
|
||||
let column_datatype =
|
||||
ColumnDataTypeWrapper::try_from(column_schema.data_type.clone()).map(|w| w.to_parts())?;
|
||||
|
||||
let semantic_type = if column_schema.is_time_index() {
|
||||
SemanticType::Timestamp
|
||||
} else if is_primary_key {
|
||||
SemanticType::Tag
|
||||
} else {
|
||||
SemanticType::Field
|
||||
} as i32;
|
||||
let comment = column_schema
|
||||
.metadata()
|
||||
.get(COMMENT_KEY)
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
let default_constraint = match column_schema.default_constraint() {
|
||||
None => vec![],
|
||||
Some(v) => v
|
||||
.clone()
|
||||
.try_into()
|
||||
.context(ConvertColumnDefaultConstraintSnafu {
|
||||
column: &column_schema.name,
|
||||
})?,
|
||||
};
|
||||
let options = options_from_column_schema(column_schema);
|
||||
Ok(ColumnDef {
|
||||
name: column_schema.name.clone(),
|
||||
data_type: column_datatype.0 as i32,
|
||||
is_nullable: column_schema.is_nullable(),
|
||||
default_constraint,
|
||||
semantic_type,
|
||||
comment,
|
||||
datatype_extension: column_datatype.1,
|
||||
options,
|
||||
})
|
||||
}
|
||||
|
||||
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||
let mut options = ColumnOptions::default();
|
||||
@@ -268,20 +226,18 @@ mod tests {
|
||||
assert!(options.is_none());
|
||||
|
||||
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
))
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
})
|
||||
.unwrap();
|
||||
schema.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
@@ -291,18 +247,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
);
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
|
||||
}
|
||||
|
||||
pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
||||
let (name, content) = opt.split_once(':').context(InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
@@ -57,24 +57,6 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn static_user_provider_from_option(opt: &String) -> Result<StaticUserProvider> {
|
||||
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
match name {
|
||||
STATIC_USER_PROVIDER => {
|
||||
let provider = StaticUserProvider::new(content)?;
|
||||
Ok(provider)
|
||||
}
|
||||
_ => InvalidConfigSnafu {
|
||||
value: name.to_string(),
|
||||
msg: format!("Invalid UserProviderOption, expect only {STATIC_USER_PROVIDER}"),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
type Username<'a> = &'a str;
|
||||
type HostOrIp<'a> = &'a str;
|
||||
|
||||
|
||||
@@ -38,14 +38,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert to utf8"))]
|
||||
FromUtf8 {
|
||||
#[snafu(source)]
|
||||
error: std::string::FromUtf8Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Authentication source failure"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
@@ -93,7 +85,7 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalParam { .. } | Error::FromUtf8 { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
@@ -22,12 +22,10 @@ mod user_provider;
|
||||
pub mod tests;
|
||||
|
||||
pub use common::{
|
||||
auth_mysql, static_user_provider_from_option, user_provider_from_option, userinfo_by_name,
|
||||
HashedPassword, Identity, Password,
|
||||
auth_mysql, user_provider_from_option, userinfo_by_name, HashedPassword, Identity, Password,
|
||||
};
|
||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use user_info::UserInfo;
|
||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||
pub use user_provider::UserProvider;
|
||||
|
||||
/// pub type alias
|
||||
|
||||
@@ -15,15 +15,15 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{FromUtf8Snafu, InvalidConfigSnafu, Result};
|
||||
use crate::error::{InvalidConfigSnafu, Result};
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
|
||||
pub struct StaticUserProvider {
|
||||
pub(crate) struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
@@ -60,18 +60,6 @@ impl StaticUserProvider {
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a random username/password pair
|
||||
/// This is useful for invoking from other components in the cluster
|
||||
pub fn get_one_user_pwd(&self) -> Result<(String, String)> {
|
||||
let kv = self.users.iter().next().context(InvalidConfigSnafu {
|
||||
value: "",
|
||||
msg: "Expect at least one pair of username and password",
|
||||
})?;
|
||||
let username = kv.0;
|
||||
let pwd = String::from_utf8(kv.1.clone()).context(FromUtf8Snafu)?;
|
||||
Ok((username.clone(), pwd))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
enterprise = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
@@ -18,10 +17,8 @@ arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-frontend.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-procedure.workspace = true
|
||||
@@ -44,8 +41,6 @@ moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -277,26 +277,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to invoke frontend services"))]
|
||||
InvokeFrontend {
|
||||
source: common_frontend::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Meta client is not provided"))]
|
||||
MetaClientMissing {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find frontend node: {}", addr))]
|
||||
FrontendNotFound {
|
||||
addr: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
@@ -365,10 +345,6 @@ impl ErrorExt for Error {
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvokeFrontend { source, .. } => source.status_code(),
|
||||
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use meta_client::MetaClientRef;
|
||||
|
||||
@@ -14,11 +14,9 @@
|
||||
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod builder;
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManagerBuilder {
|
||||
pub fn new(
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
information_extension,
|
||||
backend,
|
||||
cache_registry,
|
||||
procedure_manager: None,
|
||||
process_manager: None,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories: std::collections::HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_procedure_manager(mut self, procedure_manager: ProcedureManagerRef) -> Self {
|
||||
self.procedure_manager = Some(procedure_manager);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_process_manager(mut self, process_manager: ProcessManagerRef) -> Self {
|
||||
self.process_manager = Some(process_manager);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the extra information tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_extra_information_table_factories(
|
||||
mut self,
|
||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_information_table_factories = factories;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Arc<KvBackendCatalogManager> {
|
||||
let Self {
|
||||
information_extension,
|
||||
backend,
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
} = self;
|
||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: {
|
||||
let provider = InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
process_manager.clone(),
|
||||
backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
},
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
},
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -22,27 +22,23 @@ use common_catalog::consts::{
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||
ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManagerRef;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
@@ -53,11 +49,8 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
@@ -70,22 +63,57 @@ use crate::CatalogManager;
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
/// Provides the extension methods for the `information_schema` tables
|
||||
pub(super) information_extension: InformationExtensionRef,
|
||||
information_extension: InformationExtensionRef,
|
||||
/// Manages partition rules.
|
||||
pub(super) partition_manager: PartitionRuleManagerRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
/// Manages table metadata.
|
||||
pub(super) table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
pub(super) system_catalog: SystemCatalog,
|
||||
system_catalog: SystemCatalog,
|
||||
/// Cache registry for all caches.
|
||||
pub(super) cache_registry: LayeredCacheRegistryRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
/// Only available in `Standalone` mode.
|
||||
pub(super) procedure_manager: Option<ProcedureManagerRef>,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
|
||||
pub(super) const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
},
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
@@ -108,61 +136,6 @@ impl KvBackendCatalogManager {
|
||||
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
|
||||
self.procedure_manager.clone()
|
||||
}
|
||||
|
||||
// Override logical table's partition key indices with physical table's.
|
||||
async fn override_logical_table_partition_key_indices(
|
||||
table_route_cache: &TableRouteCacheRef,
|
||||
table_info_manager: &TableInfoManager,
|
||||
table: TableRef,
|
||||
) -> Result<TableRef> {
|
||||
// If the table is not a metric table, return the table directly.
|
||||
if table.table_info().meta.engine != METRIC_ENGINE_NAME {
|
||||
return Ok(table);
|
||||
}
|
||||
|
||||
if let Some(table_route_value) = table_route_cache
|
||||
.get(table.table_info().table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
||||
&& let Some(physical_table_info_value) = table_info_manager
|
||||
.get(logical_route.physical_table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
|
||||
// Remap partition key indices from physical table to logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.partition_key_indices
|
||||
.iter()
|
||||
.filter_map(|&physical_index| {
|
||||
// Get the column name from the physical table using the physical index
|
||||
physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.get(physical_index)
|
||||
.and_then(|physical_column| {
|
||||
// Find the corresponding index in the logical table schema
|
||||
new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_index_by_name(physical_column.name.as_str())
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
|
||||
return Ok(new_table);
|
||||
}
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -289,28 +262,16 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
|
||||
let table = table_cache
|
||||
if let Some(table) = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
let table_route_cache: TableRouteCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
})?;
|
||||
return Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
self.table_metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
.map(Some);
|
||||
.context(GetTableCacheSnafu)?
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
@@ -323,64 +284,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, schema_name, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
|
||||
let table_cache: TableNameCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_name_cache",
|
||||
})?;
|
||||
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
// falldown to pg_catalog
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
let table_info_cache: TableInfoCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_info_cache",
|
||||
})?;
|
||||
table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await
|
||||
.context(GetTableCacheSnafu)
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
@@ -432,20 +336,8 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let catalog = catalog.to_string();
|
||||
let schema = schema.to_string();
|
||||
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
|
||||
let table_route_cache: Result<TableRouteCacheRef> =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
});
|
||||
|
||||
common_runtime::spawn_global(async move {
|
||||
let table_route_cache = match table_route_cache {
|
||||
Ok(table_route_cache) => table_route_cache,
|
||||
Err(e) => {
|
||||
let _ = tx.send(Err(e)).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let table_id_stream = metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(&catalog, &schema)
|
||||
@@ -472,7 +364,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let metadata_manager = metadata_manager.clone();
|
||||
let tx = tx.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let table_route_cache = table_route_cache.clone();
|
||||
common_runtime::spawn_global(async move {
|
||||
// we don't explicitly close the semaphore so just ignore the potential error.
|
||||
let _ = semaphore.acquire().await;
|
||||
@@ -490,16 +381,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
};
|
||||
|
||||
for table in table_info_values.into_values().map(build_table) {
|
||||
let table = if let Ok(table) = table {
|
||||
Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
table
|
||||
};
|
||||
if tx.send(table).await.is_err() {
|
||||
return;
|
||||
}
|
||||
@@ -529,19 +410,15 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
pub(super) struct SystemCatalog {
|
||||
pub(super) catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
pub(super) catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pub(super) pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provider for default catalog
|
||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(super) extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
@@ -605,17 +482,11 @@ impl SystemCatalog {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
let information_schema_provider =
|
||||
self.catalog_cache.get_with_by_ref(catalog, move || {
|
||||
let provider = InformationSchemaProvider::new(
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
self.process_manager.clone(),
|
||||
self.backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(let_chains)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
@@ -25,7 +24,7 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::metadata::TableId;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -41,7 +40,6 @@ pub mod information_schema {
|
||||
pub use crate::system_schema::information_schema::*;
|
||||
}
|
||||
|
||||
pub mod process_manager;
|
||||
pub mod table_source;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -89,23 +87,6 @@ pub trait CatalogManager: Send + Sync {
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns the table id of provided table ident.
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
Ok(self
|
||||
.table(catalog, schema, table_name, query_ctx)
|
||||
.await?
|
||||
.map(|t| t.table_info().ident.table_id))
|
||||
}
|
||||
|
||||
/// Returns the table of provided id.
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>>;
|
||||
|
||||
/// Returns the tables by table ids.
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::metadata::TableId;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
@@ -144,18 +144,6 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.flat_map(|(_, schema_entries)| schema_entries.values())
|
||||
.flat_map(|tables| tables.values())
|
||||
.find(|t| t.table_info().ident.table_id == table_id)
|
||||
.map(|t| t.table_info()))
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
@@ -364,13 +352,10 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||
let backend = Arc::new(MemoryKvBackend::new());
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
None, // we don't need ProcessManager on regions server.
|
||||
backend,
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
@@ -34,20 +34,4 @@ lazy_static! {
|
||||
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
|
||||
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
|
||||
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
|
||||
|
||||
/// Count of running process in each catalog.
|
||||
pub static ref PROCESS_LIST_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||
"greptime_process_list_count",
|
||||
"Running process count per catalog",
|
||||
&["catalog"]
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
/// Count of killed process in each catalog.
|
||||
pub static ref PROCESS_KILL_COUNT: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_process_kill_count",
|
||||
"Completed kill process requests count",
|
||||
&["catalog"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -1,639 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant, UNIX_EPOCH};
|
||||
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_frontend::slow_query_event::SlowQueryEvent;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use rand::random;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::statement::Statement;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
use crate::error;
|
||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
||||
|
||||
pub type ProcessId = u32;
|
||||
pub type ProcessManagerRef = Arc<ProcessManager>;
|
||||
|
||||
/// Query process manager.
|
||||
pub struct ProcessManager {
|
||||
/// Local frontend server address,
|
||||
server_addr: String,
|
||||
/// Next process id for local queries.
|
||||
next_id: AtomicU32,
|
||||
/// Running process per catalog.
|
||||
catalogs: RwLock<HashMap<String, HashMap<ProcessId, CancellableProcess>>>,
|
||||
/// Frontend selector to locate frontend nodes.
|
||||
frontend_selector: Option<MetaClientSelector>,
|
||||
}
|
||||
|
||||
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
|
||||
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryStatement {
|
||||
Sql(Statement),
|
||||
Promql(EvalStmt),
|
||||
}
|
||||
|
||||
impl Display for QueryStatement {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
||||
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessManager {
|
||||
/// Create a [ProcessManager] instance with server address and kv client.
|
||||
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
||||
let frontend_selector = meta_client.map(MetaClientSelector::new);
|
||||
Self {
|
||||
server_addr,
|
||||
next_id: Default::default(),
|
||||
catalogs: Default::default(),
|
||||
frontend_selector,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessManager {
|
||||
/// Registers a submitted query. Use the provided id if present.
|
||||
#[must_use]
|
||||
pub fn register_query(
|
||||
self: &Arc<Self>,
|
||||
catalog: String,
|
||||
schemas: Vec<String>,
|
||||
query: String,
|
||||
client: String,
|
||||
query_id: Option<ProcessId>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
) -> Ticket {
|
||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||
let process = ProcessInfo {
|
||||
id,
|
||||
catalog: catalog.clone(),
|
||||
schemas,
|
||||
query,
|
||||
start_timestamp: current_time_millis(),
|
||||
client,
|
||||
frontend: self.server_addr.clone(),
|
||||
};
|
||||
let cancellation_handle = Arc::new(CancellationHandle::default());
|
||||
let cancellable_process = CancellableProcess::new(cancellation_handle.clone(), process);
|
||||
|
||||
self.catalogs
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(catalog.clone())
|
||||
.or_default()
|
||||
.insert(id, cancellable_process);
|
||||
|
||||
Ticket {
|
||||
catalog,
|
||||
manager: self.clone(),
|
||||
id,
|
||||
cancellation_handle,
|
||||
_slow_query_timer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates the next process id.
|
||||
pub fn next_id(&self) -> u32 {
|
||||
self.next_id.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// De-register a query from process list.
|
||||
pub fn deregister_query(&self, catalog: String, id: ProcessId) {
|
||||
if let Entry::Occupied(mut o) = self.catalogs.write().unwrap().entry(catalog) {
|
||||
let process = o.get_mut().remove(&id);
|
||||
debug!("Deregister process: {:?}", process);
|
||||
if o.get().is_empty() {
|
||||
o.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// List local running processes in given catalog.
|
||||
pub fn local_processes(&self, catalog: Option<&str>) -> error::Result<Vec<ProcessInfo>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
let result = if let Some(catalog) = catalog {
|
||||
if let Some(catalogs) = catalogs.get(catalog) {
|
||||
catalogs.values().map(|p| p.process.clone()).collect()
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
} else {
|
||||
catalogs
|
||||
.values()
|
||||
.flat_map(|v| v.values().map(|p| p.process.clone()))
|
||||
.collect()
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn list_all_processes(
|
||||
&self,
|
||||
catalog: Option<&str>,
|
||||
) -> error::Result<Vec<ProcessInfo>> {
|
||||
let mut processes = vec![];
|
||||
if let Some(remote_frontend_selector) = self.frontend_selector.as_ref() {
|
||||
let frontends = remote_frontend_selector
|
||||
.select(|node| node.peer.addr != self.server_addr)
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?;
|
||||
for mut f in frontends {
|
||||
let result = f
|
||||
.list_process(ListProcessRequest {
|
||||
catalog: catalog.unwrap_or_default().to_string(),
|
||||
})
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu);
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
processes.extend(resp.processes);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(e; "Skipping failing node: {:?}", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
processes.extend(self.local_processes(catalog)?);
|
||||
Ok(processes)
|
||||
}
|
||||
|
||||
/// Kills query with provided catalog and id.
|
||||
pub async fn kill_process(
|
||||
&self,
|
||||
server_addr: String,
|
||||
catalog: String,
|
||||
id: ProcessId,
|
||||
) -> error::Result<bool> {
|
||||
if server_addr == self.server_addr {
|
||||
self.kill_local_process(catalog, id).await
|
||||
} else {
|
||||
let mut nodes = self
|
||||
.frontend_selector
|
||||
.as_ref()
|
||||
.context(error::MetaClientMissingSnafu)?
|
||||
.select(|node| node.peer.addr == server_addr)
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?;
|
||||
ensure!(
|
||||
!nodes.is_empty(),
|
||||
error::FrontendNotFoundSnafu { addr: server_addr }
|
||||
);
|
||||
|
||||
let request = KillProcessRequest {
|
||||
server_addr,
|
||||
catalog,
|
||||
process_id: id,
|
||||
};
|
||||
nodes[0]
|
||||
.kill_process(request)
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
/// Kills local query with provided catalog and id.
|
||||
pub async fn kill_local_process(&self, catalog: String, id: ProcessId) -> error::Result<bool> {
|
||||
if let Some(catalogs) = self.catalogs.write().unwrap().get_mut(&catalog) {
|
||||
if let Some(process) = catalogs.remove(&id) {
|
||||
process.handle.cancel();
|
||||
info!(
|
||||
"Killed process, catalog: {}, id: {:?}",
|
||||
process.process.catalog, process.process.id
|
||||
);
|
||||
PROCESS_KILL_COUNT.with_label_values(&[&catalog]).inc();
|
||||
Ok(true)
|
||||
} else {
|
||||
debug!("Failed to kill process, id not found: {}", id);
|
||||
Ok(false)
|
||||
}
|
||||
} else {
|
||||
debug!("Failed to kill process, catalog not found: {}", catalog);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Ticket {
|
||||
pub(crate) catalog: String,
|
||||
pub(crate) manager: ProcessManagerRef,
|
||||
pub(crate) id: ProcessId,
|
||||
pub cancellation_handle: Arc<CancellationHandle>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
}
|
||||
|
||||
impl Drop for Ticket {
|
||||
fn drop(&mut self) {
|
||||
self.manager
|
||||
.deregister_query(std::mem::take(&mut self.catalog), self.id);
|
||||
}
|
||||
}
|
||||
|
||||
struct CancellableProcess {
|
||||
handle: Arc<CancellationHandle>,
|
||||
process: ProcessInfo,
|
||||
}
|
||||
|
||||
impl Drop for CancellableProcess {
|
||||
fn drop(&mut self) {
|
||||
PROCESS_LIST_COUNT
|
||||
.with_label_values(&[&self.process.catalog])
|
||||
.dec();
|
||||
}
|
||||
}
|
||||
|
||||
impl CancellableProcess {
|
||||
fn new(handle: Arc<CancellationHandle>, process: ProcessInfo) -> Self {
|
||||
PROCESS_LIST_COUNT
|
||||
.with_label_values(&[&process.catalog])
|
||||
.inc();
|
||||
Self { handle, process }
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for CancellableProcess {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CancellableProcess")
|
||||
.field("cancelled", &self.handle.is_cancelled())
|
||||
.field("process", &self.process)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// SlowQueryTimer is used to log slow query when it's dropped.
|
||||
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
|
||||
pub struct SlowQueryTimer {
|
||||
start: Instant,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
threshold: Option<Duration>,
|
||||
sample_ratio: Option<f64>,
|
||||
tx: Sender<SlowQueryEvent>,
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
pub fn new(
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
threshold: Option<Duration>,
|
||||
sample_ratio: Option<f64>,
|
||||
tx: Sender<SlowQueryEvent>,
|
||||
) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
stmt,
|
||||
query_ctx,
|
||||
threshold,
|
||||
sample_ratio,
|
||||
tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
fn send_slow_query_event(&self, elapsed: Duration, threshold: Duration) {
|
||||
let mut slow_query_event = SlowQueryEvent {
|
||||
cost: elapsed.as_millis() as u64,
|
||||
threshold: threshold.as_millis() as u64,
|
||||
query: "".to_string(),
|
||||
query_ctx: self.query_ctx.clone(),
|
||||
|
||||
// The following fields are only used for PromQL queries.
|
||||
is_promql: false,
|
||||
promql_range: None,
|
||||
promql_step: None,
|
||||
promql_start: None,
|
||||
promql_end: None,
|
||||
};
|
||||
|
||||
match &self.stmt {
|
||||
QueryStatement::Promql(stmt) => {
|
||||
slow_query_event.is_promql = true;
|
||||
slow_query_event.query = stmt.expr.to_string();
|
||||
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
||||
|
||||
let start = stmt
|
||||
.start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
let end = stmt
|
||||
.end
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
slow_query_event.promql_range = Some((end - start) as u64);
|
||||
slow_query_event.promql_start = Some(start);
|
||||
slow_query_event.promql_end = Some(end);
|
||||
}
|
||||
QueryStatement::Sql(stmt) => {
|
||||
slow_query_event.query = stmt.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Send SlowQueryEvent to the handler.
|
||||
if let Err(e) = self.tx.try_send(slow_query_event) {
|
||||
error!(e; "Failed to send slow query event");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SlowQueryTimer {
|
||||
fn drop(&mut self) {
|
||||
if let Some(threshold) = self.threshold {
|
||||
// Calculate the elaspsed duration since the timer is created.
|
||||
let elapsed = self.start.elapsed();
|
||||
if elapsed > threshold {
|
||||
if let Some(ratio) = self.sample_ratio {
|
||||
// Only capture a portion of slow queries based on sample_ratio.
|
||||
// Generate a random number in [0, 1) and compare it with sample_ratio.
|
||||
if ratio >= 1.0 || random::<f64>() <= ratio {
|
||||
self.send_slow_query_event(elapsed, threshold);
|
||||
}
|
||||
} else {
|
||||
// Captures all slow queries if sample_ratio is not set.
|
||||
self.send_slow_query_event(elapsed, threshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::process_manager::ProcessManager;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_query() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
let ticket = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["test".to_string()],
|
||||
"SELECT * FROM table".to_string(),
|
||||
"".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(None).unwrap();
|
||||
assert_eq!(running_processes.len(), 1);
|
||||
assert_eq!(&running_processes[0].frontend, "127.0.0.1:8000");
|
||||
assert_eq!(running_processes[0].id, ticket.id);
|
||||
assert_eq!(&running_processes[0].query, "SELECT * FROM table");
|
||||
|
||||
drop(ticket);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_register_query_with_custom_id() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
let custom_id = 12345;
|
||||
|
||||
let ticket = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["test".to_string()],
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
Some(custom_id),
|
||||
None,
|
||||
);
|
||||
|
||||
assert_eq!(ticket.id, custom_id);
|
||||
|
||||
let running_processes = process_manager.local_processes(None).unwrap();
|
||||
assert_eq!(running_processes.len(), 1);
|
||||
assert_eq!(running_processes[0].id, custom_id);
|
||||
assert_eq!(&running_processes[0].client, "client1");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_queries_same_catalog() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
let ticket1 = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["schema1".to_string()],
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let ticket2 = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["schema2".to_string()],
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
||||
assert_eq!(running_processes.len(), 2);
|
||||
|
||||
// Verify both processes are present
|
||||
let ids: Vec<u32> = running_processes.iter().map(|p| p.id).collect();
|
||||
assert!(ids.contains(&ticket1.id));
|
||||
assert!(ids.contains(&ticket2.id));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_catalogs() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
let _ticket1 = process_manager.clone().register_query(
|
||||
"catalog1".to_string(),
|
||||
vec!["schema1".to_string()],
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let _ticket2 = process_manager.clone().register_query(
|
||||
"catalog2".to_string(),
|
||||
vec!["schema2".to_string()],
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Test listing processes for specific catalog
|
||||
let catalog1_processes = process_manager.local_processes(Some("catalog1")).unwrap();
|
||||
assert_eq!(catalog1_processes.len(), 1);
|
||||
assert_eq!(&catalog1_processes[0].catalog, "catalog1");
|
||||
|
||||
let catalog2_processes = process_manager.local_processes(Some("catalog2")).unwrap();
|
||||
assert_eq!(catalog2_processes.len(), 1);
|
||||
assert_eq!(&catalog2_processes[0].catalog, "catalog2");
|
||||
|
||||
// Test listing all processes
|
||||
let all_processes = process_manager.local_processes(None).unwrap();
|
||||
assert_eq!(all_processes.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deregister_query() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
let ticket = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["test".to_string()],
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
||||
process_manager.deregister_query("public".to_string(), ticket.id);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cancellation_handle() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
let ticket = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["test".to_string()],
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
ticket.cancellation_handle.cancel();
|
||||
assert!(ticket.cancellation_handle.is_cancelled());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_kill_local_process() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
let ticket = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["test".to_string()],
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
let killed = process_manager
|
||||
.kill_process(
|
||||
"127.0.0.1:8000".to_string(),
|
||||
"public".to_string(),
|
||||
ticket.id,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(killed);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_kill_nonexistent_process() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
let killed = process_manager
|
||||
.kill_process("127.0.0.1:8000".to_string(), "public".to_string(), 999)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!killed);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_kill_process_nonexistent_catalog() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
let killed = process_manager
|
||||
.kill_process("127.0.0.1:8000".to_string(), "nonexistent".to_string(), 1)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!killed);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_process_info_fields() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
let _ticket = process_manager.clone().register_query(
|
||||
"test_catalog".to_string(),
|
||||
vec!["schema1".to_string(), "schema2".to_string()],
|
||||
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
||||
"test_client".to_string(),
|
||||
Some(42),
|
||||
None,
|
||||
);
|
||||
|
||||
let processes = process_manager.local_processes(None).unwrap();
|
||||
assert_eq!(processes.len(), 1);
|
||||
|
||||
let process = &processes[0];
|
||||
assert_eq!(process.id, 42);
|
||||
assert_eq!(&process.catalog, "test_catalog");
|
||||
assert_eq!(process.schemas, vec!["schema1", "schema2"]);
|
||||
assert_eq!(&process.query, "SELECT COUNT(*) FROM users WHERE age > 18");
|
||||
assert_eq!(&process.client, "test_client");
|
||||
assert_eq!(&process.frontend, "127.0.0.1:8000");
|
||||
assert!(process.start_timestamp > 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ticket_drop_deregisters_process() {
|
||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
||||
|
||||
{
|
||||
let _ticket = process_manager.clone().register_query(
|
||||
"public".to_string(),
|
||||
vec!["test".to_string()],
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Process should be registered
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
||||
} // ticket goes out of scope here
|
||||
|
||||
// Process should be automatically deregistered
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
pub mod predicate;
|
||||
mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
@@ -96,7 +96,7 @@ trait SystemSchemaProviderInner {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SystemTable {
|
||||
pub(crate) trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
@@ -110,7 +110,7 @@ pub trait SystemTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
|
||||
@@ -19,7 +19,6 @@ mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod procedure_info;
|
||||
pub mod process_list;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
@@ -38,13 +37,11 @@ use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use process_list::InformationSchemaProcessList;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
@@ -53,7 +50,6 @@ use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
@@ -113,36 +109,12 @@ macro_rules! setup_memory_table {
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct MakeInformationTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||
pub kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
/// A factory trait for making information schema tables.
|
||||
///
|
||||
/// This trait allows for extensibility of the information schema by providing
|
||||
/// a way to dynamically create custom information schema tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub trait InformationSchemaTableFactory {
|
||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
#[allow(dead_code)]
|
||||
kv_backend: KvBackendRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
@@ -152,7 +124,6 @@ impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
@@ -162,16 +133,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
#[cfg(feature = "enterprise")]
|
||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
kv_backend: self.kv_backend.clone(),
|
||||
};
|
||||
return Some(factory.make_information_table(req));
|
||||
}
|
||||
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
@@ -246,10 +207,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
self.catalog_manager.clone(),
|
||||
),
|
||||
) as _),
|
||||
PROCESS_LIST => self
|
||||
.process_manager
|
||||
.as_ref()
|
||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -260,18 +217,12 @@ impl InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager,
|
||||
process_manager,
|
||||
tables: HashMap::new(),
|
||||
kv_backend,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
@@ -279,16 +230,6 @@ impl InformationSchemaProvider {
|
||||
provider
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(crate) fn with_extra_table_factories(
|
||||
mut self,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_table_factories = factories;
|
||||
self.build_tables();
|
||||
self
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
@@ -336,22 +277,16 @@ impl InformationSchemaProvider {
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
for name in self.extra_table_factories.keys() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
pub trait InformationTable {
|
||||
trait InformationTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::system_schema::information_schema::table_names::*;
|
||||
@@ -368,18 +367,28 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
|
||||
TRIGGERS => (
|
||||
vec![
|
||||
string_column("TRIGGER_CATALOG"),
|
||||
string_column("TRIGGER_SCHEMA"),
|
||||
string_column("TRIGGER_NAME"),
|
||||
ColumnSchema::new(
|
||||
"trigger_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
false,
|
||||
),
|
||||
string_column("TRIGGER_DEFINITION"),
|
||||
ColumnSchema::new(
|
||||
"flownode_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
true,
|
||||
),
|
||||
string_column("EVENT_MANIPULATION"),
|
||||
string_column("EVENT_OBJECT_CATALOG"),
|
||||
string_column("EVENT_OBJECT_SCHEMA"),
|
||||
string_column("EVENT_OBJECT_TABLE"),
|
||||
bigint_column("ACTION_ORDER"),
|
||||
string_column("ACTION_CONDITION"),
|
||||
string_column("ACTION_STATEMENT"),
|
||||
string_column("ACTION_ORIENTATION"),
|
||||
string_column("ACTION_TIMING"),
|
||||
string_column("ACTION_REFERENCE_OLD_TABLE"),
|
||||
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
||||
string_column("ACTION_REFERENCE_OLD_ROW"),
|
||||
string_column("ACTION_REFERENCE_NEW_ROW"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
string_column("SQL_MODE"),
|
||||
string_column("DEFINER"),
|
||||
string_column("CHARACTER_SET_CLIENT"),
|
||||
string_column("COLLATION_CONNECTION"),
|
||||
string_column("DATABASE_COLLATION"),
|
||||
],
|
||||
vec![],
|
||||
),
|
||||
|
||||
@@ -329,8 +329,13 @@ impl InformationSchemaPartitionsBuilder {
|
||||
self.partition_names.push(Some(&partition_name));
|
||||
self.partition_ordinal_positions
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
let expressions = if partition.partition.partition_columns().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(partition.partition.to_string())
|
||||
};
|
||||
|
||||
self.partition_expressions.push(expressions.as_deref());
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
)));
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_frontend::DisplayProcessId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use common_time::{Duration, Timestamp};
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DurationMillisecondVectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
VectorRef,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{self, InternalSnafu};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
/// Column names of `information_schema.process_list`
|
||||
pub const ID: &str = "id";
|
||||
pub const CATALOG: &str = "catalog";
|
||||
pub const SCHEMAS: &str = "schemas";
|
||||
pub const QUERY: &str = "query";
|
||||
pub const CLIENT: &str = "client";
|
||||
pub const FRONTEND: &str = "frontend";
|
||||
pub const START_TIMESTAMP: &str = "start_timestamp";
|
||||
pub const ELAPSED_TIME: &str = "elapsed_time";
|
||||
|
||||
/// `information_schema.process_list` table implementation that tracks running
|
||||
/// queries in current cluster.
|
||||
pub struct InformationSchemaProcessList {
|
||||
schema: SchemaRef,
|
||||
process_manager: ProcessManagerRef,
|
||||
}
|
||||
|
||||
impl InformationSchemaProcessList {
|
||||
pub fn new(process_manager: ProcessManagerRef) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
process_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(ID, CDT::string_datatype(), false),
|
||||
ColumnSchema::new(CATALOG, CDT::string_datatype(), false),
|
||||
ColumnSchema::new(SCHEMAS, CDT::string_datatype(), false),
|
||||
ColumnSchema::new(QUERY, CDT::string_datatype(), false),
|
||||
ColumnSchema::new(CLIENT, CDT::string_datatype(), false),
|
||||
ColumnSchema::new(FRONTEND, CDT::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
START_TIMESTAMP,
|
||||
CDT::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(ELAPSED_TIME, CDT::duration_millisecond_datatype(), false),
|
||||
]))
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaProcessList {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
"process_list"
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> error::Result<SendableRecordBatchStream> {
|
||||
let process_manager = self.process_manager.clone();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
self.schema.arrow_schema().clone(),
|
||||
futures::stream::once(async move {
|
||||
make_process_list(process_manager, request)
|
||||
.await
|
||||
.map(RecordBatch::into_df_record_batch)
|
||||
.map_err(|e| datafusion::error::DataFusionError::External(Box::new(e)))
|
||||
}),
|
||||
));
|
||||
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Build running process list.
|
||||
async fn make_process_list(
|
||||
process_manager: ProcessManagerRef,
|
||||
request: ScanRequest,
|
||||
) -> error::Result<RecordBatch> {
|
||||
let predicates = Predicates::from_scan_request(&Some(request));
|
||||
let current_time = current_time_millis();
|
||||
// todo(hl): find a way to extract user catalog to filter queries from other users.
|
||||
let queries = process_manager.list_all_processes(None).await?;
|
||||
|
||||
let mut id_builder = StringVectorBuilder::with_capacity(queries.len());
|
||||
let mut catalog_builder = StringVectorBuilder::with_capacity(queries.len());
|
||||
let mut schemas_builder = StringVectorBuilder::with_capacity(queries.len());
|
||||
let mut query_builder = StringVectorBuilder::with_capacity(queries.len());
|
||||
let mut client_builder = StringVectorBuilder::with_capacity(queries.len());
|
||||
let mut frontend_builder = StringVectorBuilder::with_capacity(queries.len());
|
||||
let mut start_time_builder = TimestampMillisecondVectorBuilder::with_capacity(queries.len());
|
||||
let mut elapsed_time_builder = DurationMillisecondVectorBuilder::with_capacity(queries.len());
|
||||
|
||||
for process in queries {
|
||||
let display_id = DisplayProcessId {
|
||||
server_addr: process.frontend.to_string(),
|
||||
id: process.id,
|
||||
}
|
||||
.to_string();
|
||||
let schemas = process.schemas.join(",");
|
||||
let id = Value::from(display_id);
|
||||
let catalog = Value::from(process.catalog);
|
||||
let schemas = Value::from(schemas);
|
||||
let query = Value::from(process.query);
|
||||
let client = Value::from(process.client);
|
||||
let frontend = Value::from(process.frontend);
|
||||
let start_timestamp = Value::from(Timestamp::new_millisecond(process.start_timestamp));
|
||||
let elapsed_time = Value::from(Duration::new_millisecond(
|
||||
current_time - process.start_timestamp,
|
||||
));
|
||||
let row = [
|
||||
(ID, &id),
|
||||
(CATALOG, &catalog),
|
||||
(SCHEMAS, &schemas),
|
||||
(QUERY, &query),
|
||||
(CLIENT, &client),
|
||||
(FRONTEND, &frontend),
|
||||
(START_TIMESTAMP, &start_timestamp),
|
||||
(ELAPSED_TIME, &elapsed_time),
|
||||
];
|
||||
if predicates.eval(&row) {
|
||||
id_builder.push(id.as_string().as_deref());
|
||||
catalog_builder.push(catalog.as_string().as_deref());
|
||||
schemas_builder.push(schemas.as_string().as_deref());
|
||||
query_builder.push(query.as_string().as_deref());
|
||||
client_builder.push(client.as_string().as_deref());
|
||||
frontend_builder.push(frontend.as_string().as_deref());
|
||||
start_time_builder.push(start_timestamp.as_timestamp().map(|t| t.value().into()));
|
||||
elapsed_time_builder.push(elapsed_time.as_duration().map(|d| d.value().into()));
|
||||
}
|
||||
}
|
||||
|
||||
RecordBatch::new(
|
||||
InformationSchemaProcessList::schema(),
|
||||
vec![
|
||||
Arc::new(id_builder.finish()) as VectorRef,
|
||||
Arc::new(catalog_builder.finish()) as VectorRef,
|
||||
Arc::new(schemas_builder.finish()) as VectorRef,
|
||||
Arc::new(query_builder.finish()) as VectorRef,
|
||||
Arc::new(client_builder.finish()) as VectorRef,
|
||||
Arc::new(frontend_builder.finish()) as VectorRef,
|
||||
Arc::new(start_time_builder.finish()) as VectorRef,
|
||||
Arc::new(elapsed_time_builder.finish()) as VectorRef,
|
||||
],
|
||||
)
|
||||
.context(error::CreateRecordBatchSnafu)
|
||||
}
|
||||
@@ -44,7 +44,6 @@ const DISK_SIZE: &str = "disk_size";
|
||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||
const MANIFEST_SIZE: &str = "manifest_size";
|
||||
const SST_SIZE: &str = "sst_size";
|
||||
const SST_NUM: &str = "sst_num";
|
||||
const INDEX_SIZE: &str = "index_size";
|
||||
const ENGINE: &str = "engine";
|
||||
const REGION_ROLE: &str = "region_role";
|
||||
@@ -88,7 +87,6 @@ impl InformationSchemaRegionStatistics {
|
||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(SST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(SST_NUM, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(INDEX_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(REGION_ROLE, ConcreteDataType::string_datatype(), true),
|
||||
@@ -151,7 +149,6 @@ struct InformationSchemaRegionStatisticsBuilder {
|
||||
memtable_sizes: UInt64VectorBuilder,
|
||||
manifest_sizes: UInt64VectorBuilder,
|
||||
sst_sizes: UInt64VectorBuilder,
|
||||
sst_nums: UInt64VectorBuilder,
|
||||
index_sizes: UInt64VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
region_roles: StringVectorBuilder,
|
||||
@@ -170,7 +167,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sst_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sst_nums: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
index_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_roles: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -201,7 +197,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||
(SST_SIZE, &Value::from(region_stat.sst_size)),
|
||||
(SST_NUM, &Value::from(region_stat.sst_num)),
|
||||
(INDEX_SIZE, &Value::from(region_stat.index_size)),
|
||||
(ENGINE, &Value::from(region_stat.engine.as_str())),
|
||||
(REGION_ROLE, &Value::from(region_stat.role.to_string())),
|
||||
@@ -220,7 +215,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||
self.sst_sizes.push(Some(region_stat.sst_size));
|
||||
self.sst_nums.push(Some(region_stat.sst_num));
|
||||
self.index_sizes.push(Some(region_stat.index_size));
|
||||
self.engines.push(Some(®ion_stat.engine));
|
||||
self.region_roles.push(Some(®ion_stat.role.to_string()));
|
||||
@@ -236,7 +230,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
Arc::new(self.memtable_sizes.finish()),
|
||||
Arc::new(self.manifest_sizes.finish()),
|
||||
Arc::new(self.sst_sizes.finish()),
|
||||
Arc::new(self.sst_nums.finish()),
|
||||
Arc::new(self.index_sizes.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.region_roles.finish()),
|
||||
|
||||
@@ -47,4 +47,3 @@ pub const VIEWS: &str = "views";
|
||||
pub const FLOWS: &str = "flows";
|
||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||
pub const PROCESS_LIST: &str = "process_list";
|
||||
|
||||
@@ -169,7 +169,7 @@ impl DfPartitionStream for PGClass {
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] because we don't have users.
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] cuz we don't have user.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
|
||||
@@ -70,11 +70,6 @@ impl DfTableSourceProvider {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the query context.
|
||||
pub fn query_ctx(&self) -> &QueryContextRef {
|
||||
&self.query_ctx
|
||||
}
|
||||
|
||||
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> {
|
||||
if self.disallow_cross_catalog_query {
|
||||
match &table_ref {
|
||||
@@ -212,7 +207,6 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use crate::memory::MemoryCatalogManager;
|
||||
|
||||
#[test]
|
||||
@@ -329,13 +323,12 @@ mod tests {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Arc::new(NoopInformationExtension),
|
||||
backend.clone(),
|
||||
layered_cache_registry,
|
||||
)
|
||||
.build();
|
||||
|
||||
None,
|
||||
);
|
||||
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||
view_info.table_type = TableType::View;
|
||||
|
||||
@@ -5,18 +5,13 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = [
|
||||
"pg_kvbackend",
|
||||
"mysql_kvbackend",
|
||||
]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend", "meta-srv/pg_kvbackend"]
|
||||
mysql_kvbackend = ["common-meta/mysql_kvbackend", "meta-srv/mysql_kvbackend"]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
base64.workspace = true
|
||||
@@ -43,17 +38,20 @@ common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
operator.workspace = true
|
||||
opendal = { version = "0.51.1", features = [
|
||||
"services-fs",
|
||||
"services-s3",
|
||||
] }
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
@@ -66,7 +64,6 @@ tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -58,7 +58,6 @@ where
|
||||
info!("{desc}, average operation cost: {cost:.2} ms");
|
||||
}
|
||||
|
||||
/// Command to benchmark table metadata operations.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct BenchTableMetadataCommand {
|
||||
#[clap(long)]
|
||||
@@ -160,7 +159,6 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
options: Default::default(),
|
||||
region_numbers: (1..=100).collect(),
|
||||
partition_key_indices: vec![],
|
||||
column_ids: vec![],
|
||||
};
|
||||
|
||||
RawTableInfo {
|
||||
@@ -188,7 +186,6 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
name: String::new(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
partition_expr: Default::default(),
|
||||
},
|
||||
leader_peer: Some(Peer {
|
||||
id: rng.random_range(0..10),
|
||||
|
||||
154
src/cli/src/cmd.rs
Normal file
154
src/cli/src/cmd.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, InvalidReplCommandSnafu, Result};
|
||||
|
||||
/// Represents the parsed command from the user (which may be over many lines)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ReplCommand {
|
||||
Help,
|
||||
UseDatabase { db_name: String },
|
||||
Sql { sql: String },
|
||||
Exit,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for ReplCommand {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(input: &str) -> Result<Self> {
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
return InvalidReplCommandSnafu {
|
||||
reason: "No command specified".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
// If line ends with ';', it must be treated as a complete input.
|
||||
// However, the opposite is not true.
|
||||
let input_is_completed = input.ends_with(';');
|
||||
|
||||
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
|
||||
let lowercase = input.to_lowercase();
|
||||
match lowercase.as_str() {
|
||||
"help" => Ok(Self::Help),
|
||||
"exit" | "quit" => Ok(Self::Exit),
|
||||
_ => match input.split_once(' ') {
|
||||
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
|
||||
Ok(Self::UseDatabase {
|
||||
db_name: database.trim().to_string(),
|
||||
})
|
||||
}
|
||||
// Any valid SQL must contains at least one whitespace.
|
||||
Some(_) if input_is_completed => Ok(Self::Sql {
|
||||
sql: input.to_string(),
|
||||
}),
|
||||
_ => InvalidReplCommandSnafu {
|
||||
reason: format!("unknown command '{input}', maybe input is not completed"),
|
||||
}
|
||||
.fail(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplCommand {
|
||||
pub fn help() -> &'static str {
|
||||
r#"
|
||||
Available commands (case insensitive):
|
||||
- 'help': print this help
|
||||
- 'exit' or 'quit': exit the REPL
|
||||
- 'use <your database name>': switch to another database/schema context
|
||||
- Other typed in text will be treated as SQL.
|
||||
You can enter new line while typing, just remember to end it with ';'.
|
||||
"#
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error::InvalidReplCommand;
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
fn test_ok(s: &str, expected: ReplCommand) {
|
||||
let actual: ReplCommand = s.try_into().unwrap();
|
||||
assert_eq!(expected, actual, "'{}'", s);
|
||||
}
|
||||
|
||||
fn test_err(s: &str) {
|
||||
let result: Result<ReplCommand> = s.try_into();
|
||||
assert!(matches!(result, Err(InvalidReplCommand { .. })))
|
||||
}
|
||||
|
||||
test_err("");
|
||||
test_err(" ");
|
||||
test_err("\t");
|
||||
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok(" help", ReplCommand::Help);
|
||||
test_ok(" help ", ReplCommand::Help);
|
||||
test_ok(" HELP ", ReplCommand::Help);
|
||||
test_ok(" Help; ", ReplCommand::Help);
|
||||
test_ok(" help ; ", ReplCommand::Help);
|
||||
|
||||
test_ok("exit", ReplCommand::Exit);
|
||||
test_ok("exit;", ReplCommand::Exit);
|
||||
test_ok("exit ;", ReplCommand::Exit);
|
||||
test_ok("EXIT", ReplCommand::Exit);
|
||||
|
||||
test_ok("quit", ReplCommand::Exit);
|
||||
test_ok("quit;", ReplCommand::Exit);
|
||||
test_ok("quit ;", ReplCommand::Exit);
|
||||
test_ok("QUIT", ReplCommand::Exit);
|
||||
|
||||
test_ok(
|
||||
"use Foo",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
test_ok(
|
||||
" use Foo ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
// ensure that database name is case sensitive
|
||||
test_ok(
|
||||
" use FOO ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "FOO".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
// ensure that we aren't messing with capitalization
|
||||
test_ok(
|
||||
"SELECT * from foo;",
|
||||
ReplCommand::Sql {
|
||||
sql: "SELECT * from foo".to_string(),
|
||||
},
|
||||
);
|
||||
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
|
||||
test_err("insert blah");
|
||||
test_ok(
|
||||
"insert blah;",
|
||||
ReplCommand::Sql {
|
||||
sql: "insert blah".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod export;
|
||||
mod import;
|
||||
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::data::export::ExportCommand;
|
||||
use crate::data::import::ImportCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
||||
#[derive(Subcommand)]
|
||||
pub enum DataCommand {
|
||||
Export(ExportCommand),
|
||||
Import(ImportCommand),
|
||||
}
|
||||
|
||||
impl DataCommand {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
match self {
|
||||
DataCommand::Export(cmd) => cmd.build().await,
|
||||
DataCommand::Import(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,10 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_meta::peer::Peer;
|
||||
use object_store::Error as ObjectStoreError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -32,7 +29,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
msg: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create default catalog and schema"))]
|
||||
InitMetadata {
|
||||
#[snafu(implicit)]
|
||||
@@ -75,20 +71,6 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get table metadata"))]
|
||||
TableMetadata {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected error: {}", msg))]
|
||||
Unexpected {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing config, msg: {}", msg))]
|
||||
MissingConfig {
|
||||
msg: String,
|
||||
@@ -119,6 +101,9 @@ pub enum Error {
|
||||
error: reqwest::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
@@ -238,81 +223,23 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found: {table_id}"))]
|
||||
TableNotFound {
|
||||
table_id: TableId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("OpenDAL operator failed"))]
|
||||
OpenDal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: ObjectStoreError,
|
||||
error: opendal::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("S3 config need be set"))]
|
||||
S3ConfigNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Output directory not set"))]
|
||||
OutputDirNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Empty store addresses"))]
|
||||
EmptyStoreAddrs {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported memory backend"))]
|
||||
UnsupportedMemoryBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("File path invalid: {}", msg))]
|
||||
InvalidFilePath {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid arguments: {}", msg))]
|
||||
InvalidArguments {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init backend"))]
|
||||
InitBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: ObjectStoreError,
|
||||
},
|
||||
|
||||
#[snafu(display("Covert column schemas to defs failed"))]
|
||||
CovertColumnSchemasToDefs {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: operator::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request to datanode: {}", peer))]
|
||||
SendRequestToDatanode {
|
||||
peer: Peer,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -320,25 +247,20 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. }
|
||||
| Error::TableMetadata { source, .. } => source.status_code(),
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::InitTimezone { .. }
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidFilePath { .. }
|
||||
| Error::UnsupportedMemoryBackend { .. }
|
||||
| Error::InvalidArguments { .. }
|
||||
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::CovertColumnSchemasToDefs { source, .. } => source.status_code(),
|
||||
Error::SendRequestToDatanode { source, .. } => source.status_code(),
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
@@ -346,7 +268,6 @@ impl ErrorExt for Error {
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::Unexpected { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SerdeJson { .. }
|
||||
| Error::FileIo { .. }
|
||||
@@ -355,16 +276,14 @@ impl ErrorExt for Error {
|
||||
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::OpenDal { .. } | Error::InitBackend { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. }
|
||||
| Error::OutputDirNotSet { .. }
|
||||
| Error::EmptyStoreAddrs { .. } => StatusCode::InvalidArguments,
|
||||
Error::OpenDal { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
|
||||
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user