Compare commits

..

1 Commits

Author SHA1 Message Date
discord9
e2c7dfcaba fix: choose frontend randomly 2025-05-09 17:39:20 +08:00
456 changed files with 13214 additions and 28689 deletions

2
.github/CODEOWNERS vendored
View File

@@ -4,7 +4,7 @@
* @GreptimeTeam/db-approver
## [Module] Database Engine
## [Module] Databse Engine
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag

View File

@@ -52,7 +52,7 @@ runs:
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: servers/dashboard
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
@@ -70,7 +70,7 @@ runs:
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with:
base-image: centos
features: servers/dashboard
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}

View File

@@ -22,6 +22,7 @@ datanode:
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
overwrite_entry_start_id = true
frontend:
configData: |-

View File

@@ -8,7 +8,7 @@ set -e
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
function create_version() {
# Read from environment variables.
# Read from envrionment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty" >&2
exit 1
@@ -16,8 +16,7 @@ function create_version() {
if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
# NOTE: Need a `v` prefix for the version string.
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
export NEXT_RELEASE_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then

View File

@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
# Create a cluster with 1 control-plane node and 5 workers.
# Ceate a cluster with 1 control-plane node and 5 workers.
function create_kind_cluster() {
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
kind: Cluster

View File

@@ -4,7 +4,7 @@ DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
@@ -17,7 +17,7 @@ update_dev_builder_version() {
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile

View File

@@ -1,46 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION}
GITHUB_TOKEN=${GITHUB_TOKEN}
update_helm_charts_version() {
# Configure Git configs.
git config --global user.email update-helm-charts-version@greptime.com
git config --global user.name update-helm-charts-version
# Clone helm-charts repository.
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
cd helm-charts
# Set default remote for gh CLI
gh repo set-default GreptimeTeam/helm-charts
# Checkout a new branch.
BRANCH_NAME="chore/greptimedb-${VERSION}"
git checkout -b $BRANCH_NAME
# Update version.
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
# Update docs.
make docs
# Commit the changes.
git add .
git commit -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "chore: Update GreptimeDB version to ${VERSION}" \
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_helm_charts_version

View File

@@ -1,42 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION}
GITHUB_TOKEN=${GITHUB_TOKEN}
update_homebrew_greptime_version() {
# Configure Git configs.
git config --global user.email update-greptime-version@greptime.com
git config --global user.name update-greptime-version
# Clone helm-charts repository.
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
cd homebrew-greptime
# Set default remote for gh CLI
gh repo set-default GreptimeTeam/homebrew-greptime
# Checkout a new branch.
BRANCH_NAME="chore/greptimedb-${VERSION}"
git checkout -b $BRANCH_NAME
# Update version.
make update-greptime-version VERSION=${VERSION}
# Commit the changes.
git add .
git commit -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "chore: Update GreptimeDB version to ${VERSION}" \
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_homebrew_greptime_version

View File

@@ -41,7 +41,7 @@ function upload_artifacts() {
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt

View File

@@ -55,11 +55,6 @@ on:
description: Build and push images to DockerHub and ACR
required: false
default: true
upload_artifacts_to_s3:
type: boolean
description: Whether upload artifacts to s3
required: false
default: false
cargo_profile:
type: choice
description: The cargo profile to use in building GreptimeDB.
@@ -243,7 +238,7 @@ jobs:
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
- name: Echo Docker image tag to step summary
run: |
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
@@ -286,7 +281,7 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
upload-to-s3: false
dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3.

View File

@@ -195,7 +195,6 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps:
@@ -223,12 +222,12 @@ jobs:
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin --force
- name: Download pre-built binary
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
- name: Unzip bianry
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
@@ -276,7 +275,7 @@ jobs:
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime binary
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
@@ -300,7 +299,6 @@ jobs:
needs: build-greptime-ci
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
@@ -330,9 +328,9 @@ jobs:
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluster
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
@@ -433,7 +431,6 @@ jobs:
needs: build-greptime-ci
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
@@ -478,9 +475,9 @@ jobs:
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluster
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
@@ -581,7 +578,6 @@ jobs:
needs: build
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest ]
mode:

View File

@@ -124,9 +124,9 @@ jobs:
fetch-depth: 0
persist-credentials: false
- uses: cachix/install-nix-action@v31
- run: nix develop --command cargo check --bin greptime
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
with:
nix_path: nixpkgs=channel:nixos-24.11
- run: nix develop --command cargo build --bin greptime
check-status:
name: Check status

View File

@@ -88,7 +88,7 @@ env:
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
jobs:
@@ -124,7 +124,7 @@ jobs:
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
- name: Create version
id: create-version
@@ -388,7 +388,7 @@ jobs:
### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
@@ -444,7 +444,7 @@ jobs:
bump-doc-version:
name: Bump doc version
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners, publish-github-release]
needs: [allocate-runners]
runs-on: ubuntu-latest
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
@@ -466,8 +466,8 @@ jobs:
bump-website-version:
name: Bump website version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [allocate-runners, publish-github-release]
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners]
runs-on: ubuntu-latest
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
@@ -487,48 +487,6 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
bump-helm-charts-version:
name: Bump helm charts version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Bump helm charts version
env:
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
VERSION: ${{ needs.allocate-runners.outputs.version }}
run: |
./.github/scripts/update-helm-charts-version.sh
bump-homebrew-greptime-version:
name: Bump homebrew greptime version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Bump homebrew greptime version
env:
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
VERSION: ${{ needs.allocate-runners.outputs.version }}
run: |
./.github/scripts/update-homebrew-greptme-version.sh
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team

View File

@@ -14,9 +14,6 @@ concurrency:
jobs:
check:
runs-on: ubuntu-latest
permissions:
pull-requests: write # Add permissions to modify PRs
issues: write
timeout-minutes: 10
steps:
- uses: actions/checkout@v4

3
.gitignore vendored
View File

@@ -58,6 +58,3 @@ tests-fuzz/corpus/
## default data home
greptimedb_data
# github
!/.github

View File

@@ -108,7 +108,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:

236
Cargo.lock generated
View File

@@ -173,9 +173,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.89"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
[[package]]
name = "anymap2"
@@ -1571,7 +1571,7 @@ dependencies = [
"partition",
"paste",
"prometheus",
"rustc-hash 2.0.0",
"rustc-hash 2.1.1",
"serde_json",
"session",
"snafu 0.8.5",
@@ -1593,9 +1593,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.1.24"
version = "1.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
dependencies = [
"jobserver",
"libc",
@@ -1852,9 +1852,8 @@ dependencies = [
"futures",
"humantime",
"meta-client",
"meta-srv",
"nu-ansi-term",
"object-store",
"opendal 0.51.2",
"query",
"rand 0.9.0",
"reqwest",
@@ -1890,7 +1889,6 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-telemetry",
"datatypes",
"enum_dispatch",
"futures",
"futures-util",
@@ -2225,7 +2223,6 @@ version = "0.15.0"
dependencies = [
"api",
"arrow-flight",
"bytes",
"common-base",
"common-error",
"common-macro",
@@ -2288,11 +2285,8 @@ dependencies = [
name = "common-mem-prof"
version = "0.15.0"
dependencies = [
"anyhow",
"common-error",
"common-macro",
"mappings",
"pprof_util",
"snafu 0.8.5",
"tempfile",
"tikv-jemalloc-ctl",
@@ -2323,10 +2317,8 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-telemetry",
"common-test-util",
"common-time",
"common-wal",
"common-workload",
"datafusion-common",
"datafusion-expr",
"datatypes",
@@ -2334,7 +2326,6 @@ dependencies = [
"deadpool-postgres",
"derive_builder 0.20.1",
"etcd-client",
"flexbuffers",
"futures",
"futures-util",
"hex",
@@ -2343,7 +2334,6 @@ dependencies = [
"itertools 0.14.0",
"lazy_static",
"moka",
"object-store",
"prometheus",
"prost 0.13.5",
"rand 0.9.0",
@@ -2542,7 +2532,6 @@ name = "common-test-util"
version = "0.15.0"
dependencies = [
"client",
"common-grpc",
"common-query",
"common-recordbatch",
"once_cell",
@@ -2601,15 +2590,6 @@ dependencies = [
"toml 0.8.19",
]
[[package]]
name = "common-workload"
version = "0.15.0"
dependencies = [
"api",
"common-telemetry",
"serde",
]
[[package]]
name = "concurrent-queue"
version = "2.5.0"
@@ -2891,9 +2871,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
version = "0.5.13"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
dependencies = [
"crossbeam-utils",
]
@@ -2950,9 +2930,9 @@ dependencies = [
[[package]]
name = "csv"
version = "1.3.0"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe"
checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
dependencies = [
"csv-core",
"itoa",
@@ -3537,7 +3517,6 @@ dependencies = [
"common-time",
"common-version",
"common-wal",
"common-workload",
"dashmap",
"datafusion",
"datafusion-common",
@@ -4000,25 +3979,6 @@ dependencies = [
"syn 2.0.100",
]
[[package]]
name = "env_filter"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
dependencies = [
"log",
]
[[package]]
name = "env_logger"
version = "0.11.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f"
dependencies = [
"env_filter",
"log",
]
[[package]]
name = "equator"
version = "0.2.2"
@@ -4265,19 +4225,6 @@ dependencies = [
"miniz_oxide",
]
[[package]]
name = "flexbuffers"
version = "25.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "935627e7bc8f083035d9faad09ffaed9128f73fb1f74a8798f115749c43378e8"
dependencies = [
"bitflags 1.3.2",
"byteorder",
"num_enum 0.5.11",
"serde",
"serde_derive",
]
[[package]]
name = "float-cmp"
version = "0.10.0"
@@ -4372,7 +4319,6 @@ dependencies = [
"session",
"smallvec",
"snafu 0.8.5",
"sql",
"store-api",
"strum 0.27.1",
"substrait 0.15.0",
@@ -4478,7 +4424,6 @@ dependencies = [
"promql-parser",
"prost 0.13.5",
"query",
"rand 0.9.0",
"serde",
"serde_json",
"servers",
@@ -4876,7 +4821,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2dca1dc67862d7b410838aef81232274c019b3f6#2dca1dc67862d7b410838aef81232274c019b3f6"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=17a3550751c8b1e02ec16be40101d5f24dc255c3#17a3550751c8b1e02ec16be40101d5f24dc255c3"
dependencies = [
"prost 0.13.5",
"serde",
@@ -5704,28 +5649,6 @@ dependencies = [
"str_stack",
]
[[package]]
name = "inferno"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2094aecddc672e902cd773bad7071542f63641e01e9187c3bba4b43005e837e9"
dependencies = [
"ahash 0.8.11",
"clap 4.5.19",
"crossbeam-channel",
"crossbeam-utils",
"dashmap",
"env_logger",
"indexmap 2.9.0",
"itoa",
"log",
"num-format",
"once_cell",
"quick-xml 0.37.5",
"rgb",
"str_stack",
]
[[package]]
name = "influxdb_line_protocol"
version = "0.1.0"
@@ -6437,9 +6360,9 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.22"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "log-query"
@@ -6631,19 +6554,6 @@ dependencies = [
"thiserror 1.0.64",
]
[[package]]
name = "mappings"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e434981a332777c2b3062652d16a55f8e74fa78e6b1882633f0d77399c84fc2a"
dependencies = [
"anyhow",
"libc",
"once_cell",
"pprof_util",
"tracing",
]
[[package]]
name = "match_cfg"
version = "0.1.0"
@@ -6805,7 +6715,6 @@ dependencies = [
"common-time",
"common-version",
"common-wal",
"common-workload",
"dashmap",
"datatypes",
"deadpool",
@@ -6976,7 +6885,6 @@ dependencies = [
"common-decimal",
"common-error",
"common-function",
"common-grpc",
"common-macro",
"common-meta",
"common-query",
@@ -7594,34 +7502,13 @@ dependencies = [
"libc",
]
[[package]]
name = "num_enum"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
dependencies = [
"num_enum_derive 0.5.11",
]
[[package]]
name = "num_enum"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
dependencies = [
"num_enum_derive 0.7.3",
]
[[package]]
name = "num_enum_derive"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
"syn 1.0.109",
"num_enum_derive",
]
[[package]]
@@ -7675,7 +7562,7 @@ dependencies = [
"lazy_static",
"md5",
"moka",
"opendal",
"opendal 0.52.0",
"prometheus",
"tokio",
"uuid",
@@ -7714,7 +7601,7 @@ dependencies = [
"futures",
"futures-util",
"object_store",
"opendal",
"opendal 0.52.0",
"pin-project",
"tokio",
]
@@ -7740,6 +7627,35 @@ version = "11.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
[[package]]
name = "opendal"
version = "0.51.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b1063ea459fa9e94584115743b06330f437902dd1d9f692b863ef1875a20548"
dependencies = [
"anyhow",
"async-trait",
"backon",
"base64 0.22.1",
"bytes",
"chrono",
"crc32c",
"futures",
"getrandom 0.2.15",
"http 1.1.0",
"log",
"md-5",
"once_cell",
"percent-encoding",
"quick-xml 0.36.2",
"reqsign",
"reqwest",
"serde",
"serde_json",
"tokio",
"uuid",
]
[[package]]
name = "opendal"
version = "0.52.0"
@@ -8091,7 +8007,7 @@ dependencies = [
"arrow 53.4.1",
"arrow-ipc 53.4.1",
"lazy_static",
"num_enum 0.7.3",
"num_enum",
"opentelemetry-proto 0.27.0",
"paste",
"prost 0.13.5",
@@ -8428,9 +8344,9 @@ dependencies = [
[[package]]
name = "pgwire"
version = "0.30.1"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec79ee18e6cafde8698885646780b967ecc905120798b8359dd0da64f9688e89"
checksum = "a4e6fcdc2ae2173ef8ee1005b6e46453d45195ac3d97caac0db7ecf64ab4aa85"
dependencies = [
"async-trait",
"bytes",
@@ -8736,7 +8652,7 @@ dependencies = [
"cfg-if",
"criterion 0.5.1",
"findshlibs",
"inferno 0.11.21",
"inferno",
"libc",
"log",
"nix 0.26.4",
@@ -8753,21 +8669,6 @@ dependencies = [
"thiserror 1.0.64",
]
[[package]]
name = "pprof_util"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa015c78eed2130951e22c58d2095849391e73817ab2e74f71b0b9f63dd8416"
dependencies = [
"anyhow",
"backtrace",
"flate2",
"inferno 0.12.2",
"num",
"paste",
"prost 0.13.5",
]
[[package]]
name = "ppv-lite86"
version = "0.2.20"
@@ -8986,7 +8887,8 @@ dependencies = [
[[package]]
name = "promql-parser"
version = "0.5.1"
source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=0410e8b459dda7cb222ce9596f8bf3971bd07bd2#0410e8b459dda7cb222ce9596f8bf3971bd07bd2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60d851f6523a8215e2fbf86b6cef4548433f8b76092e9ffb607105de52ae63fd"
dependencies = [
"cfgrammar",
"chrono",
@@ -8996,7 +8898,6 @@ dependencies = [
"regex",
"serde",
"serde_json",
"unescaper",
]
[[package]]
@@ -9352,15 +9253,6 @@ dependencies = [
"serde",
]
[[package]]
name = "quick-xml"
version = "0.37.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb"
dependencies = [
"memchr",
]
[[package]]
name = "quinn"
version = "0.11.5"
@@ -9371,7 +9263,7 @@ dependencies = [
"pin-project-lite",
"quinn-proto",
"quinn-udp",
"rustc-hash 2.0.0",
"rustc-hash 2.1.1",
"rustls",
"socket2",
"thiserror 1.0.64",
@@ -9388,7 +9280,7 @@ dependencies = [
"bytes",
"rand 0.8.5",
"ring",
"rustc-hash 2.0.0",
"rustc-hash 2.1.1",
"rustls",
"slab",
"thiserror 1.0.64",
@@ -9638,9 +9530,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
@@ -9822,15 +9714,14 @@ dependencies = [
[[package]]
name = "ring"
version = "0.17.8"
version = "0.17.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
dependencies = [
"cc",
"cfg-if",
"getrandom 0.2.15",
"libc",
"spin",
"untrusted",
"windows-sys 0.52.0",
]
@@ -9924,7 +9815,6 @@ dependencies = [
"hmac",
"pbkdf2",
"rand 0.8.5",
"serde",
"serde_json",
"sha2",
"stringprep",
@@ -9933,8 +9823,8 @@ dependencies = [
[[package]]
name = "rskafka"
version = "0.6.0"
source = "git+https://github.com/influxdata/rskafka.git?rev=8dbd01ed809f5a791833a594e85b144e36e45820#8dbd01ed809f5a791833a594e85b144e36e45820"
version = "0.5.0"
source = "git+https://github.com/influxdata/rskafka.git?rev=75535b5ad9bae4a5dbb582c82e44dfd81ec10105#75535b5ad9bae4a5dbb582c82e44dfd81ec10105"
dependencies = [
"bytes",
"chrono",
@@ -9944,11 +9834,11 @@ dependencies = [
"integer-encoding 4.0.2",
"lz4",
"parking_lot 0.12.3",
"rand 0.9.0",
"rand 0.8.5",
"rsasl",
"rustls",
"snap",
"thiserror 2.0.12",
"thiserror 1.0.64",
"tokio",
"tokio-rustls",
"tracing",
@@ -10103,9 +9993,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "rustc-hash"
version = "2.0.0"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rustc_version"

View File

@@ -36,7 +36,6 @@ members = [
"src/common/time",
"src/common/version",
"src/common/wal",
"src/common/workload",
"src/datanode",
"src/datatypes",
"src/file-engine",
@@ -79,7 +78,6 @@ clippy.implicit_clone = "warn"
clippy.result_large_err = "allow"
clippy.large_enum_variant = "allow"
clippy.doc_overindented_list_items = "allow"
clippy.uninlined_format_args = "allow"
rust.unknown_lints = "deny"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
@@ -132,7 +130,7 @@ etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2dca1dc67862d7b410838aef81232274c019b3f6" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "17a3550751c8b1e02ec16be40101d5f24dc255c3" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -164,9 +162,7 @@ parquet = { version = "54.2", default-features = false, features = ["arrow", "as
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "0410e8b459dda7cb222ce9596f8bf3971bd07bd2", features = [
"ser",
] }
promql-parser = { version = "0.5.1", features = ["ser"] }
prost = { version = "0.13", features = ["no-recursion-limit"] }
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
@@ -179,7 +175,7 @@ reqwest = { version = "0.12", default-features = false, features = [
"stream",
"multipart",
] }
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "8dbd01ed809f5a791833a594e85b144e36e45820", features = [
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls",
] }
rstest = "0.25"
@@ -260,7 +256,6 @@ common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" }
common-version = { path = "src/common/version" }
common-wal = { path = "src/common/wal" }
common-workload = { path = "src/common/workload" }
datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" }

View File

@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
DEV_BUILDER_IMAGE_TAG ?= 2025-04-15-1a517ec8-20250428023155
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu

View File

@@ -121,7 +121,7 @@ docker pull greptime/greptimedb
```shell
docker run -p 127.0.0.1:4000-4003:4000-4003 \
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
-v "$(pwd)/greptimedb:/greptimedb_data" \
--name greptime --rm \
greptime/greptimedb:latest standalone start \
--http-addr 0.0.0.0:4000 \
@@ -129,7 +129,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
--mysql-addr 0.0.0.0:4002 \
--postgres-addr 0.0.0.0:4003
```
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
**Troubleshooting:**
@@ -167,7 +167,7 @@ cargo run -- standalone start
## Project Status
> **Status:** Beta.
> **Status:** Beta.
> **GA (v1.0):** Targeted for mid 2025.
- Being used in production by early adopters
@@ -197,8 +197,8 @@ GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/license
## Commercial Support
Running GreptimeDB in your organization?
We offer enterprise add-ons, services, training, and consulting.
Running GreptimeDB in your organization?
We offer enterprise add-ons, services, training, and consulting.
[Contact us](https://greptime.com/contactus) for details.
## Contributing
@@ -215,3 +215,4 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
- [etcd](https://etcd.io/) (meta service)

View File

@@ -27,7 +27,6 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
@@ -155,7 +154,6 @@
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -190,11 +188,10 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `slow_query` | -- | -- | The slow query log options. |
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
@@ -227,7 +224,6 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
@@ -292,12 +288,10 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `slow_query` | -- | -- | The slow query log options. |
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `30d` | The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`. |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
@@ -331,10 +325,6 @@
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -372,6 +362,10 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
@@ -501,7 +495,6 @@
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -536,6 +529,10 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
@@ -588,5 +585,9 @@
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |

View File

@@ -499,9 +499,6 @@ content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## Cache size for index result.
result_cache_size = "128MiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -635,6 +632,19 @@ max_log_files = 720
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]

View File

@@ -100,6 +100,19 @@ max_log_files = 720
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.

View File

@@ -37,12 +37,6 @@ enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## Whether to enable validation for Prometheus remote write requests.
## Available options:
## - strict: deny invalid UTF-8 strings (default).
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
## - unchecked: do not valid strings.
prom_validation_mode = "strict"
## The gRPC server options.
[grpc]
@@ -229,24 +223,18 @@ max_log_files = 720
default_ratio = 1.0
## The slow query log options.
[slow_query]
[logging.slow_query]
## Whether to enable slow query log.
enable = true
enable = false
## The record type of slow queries. It can be `system_table` or `log`.
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`.
record_type = "system_table"
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`.
threshold = "30s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`.
ttl = "30d"
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]

View File

@@ -67,17 +67,6 @@ node_max_idle_time = "24hours"
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## Procedure storage options.
[procedure]
@@ -229,6 +218,19 @@ max_log_files = 720
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]

View File

@@ -43,13 +43,6 @@ enable_cors = true
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## Whether to enable validation for Prometheus remote write requests.
## Available options:
## - strict: deny invalid UTF-8 strings (default).
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
## - unchecked: do not valid strings.
prom_validation_mode = "strict"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -597,9 +590,6 @@ content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## Cache size for index result.
result_cache_size = "128MiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -734,21 +724,17 @@ max_log_files = 720
default_ratio = 1.0
## The slow query log options.
[slow_query]
[logging.slow_query]
## Whether to enable slow query log.
#+ enable = false
## The record type of slow queries. It can be `system_table` or `log`.
## @toml2docs:none-default
#+ record_type = "system_table"
enable = false
## The threshold of slow query.
## @toml2docs:none-default
#+ threshold = "10s"
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
#+ sample_ratio = 1.0
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.

View File

@@ -11,6 +11,6 @@ And database will reply with something like:
Log Level changed from Some("info") to "trace,flow=debug"%
```
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).

View File

@@ -14,7 +14,7 @@ impl SqlQueryHandler for Instance {
```
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
will be fed into `StatementExecutor`:
will be feed into `StatementExecutor`:
```rust
// in Frontend Instance:
@@ -27,7 +27,7 @@ an example.
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two modes (
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
Summarize as the diagram below:

View File

@@ -1,6 +1,6 @@
# Profile memory usage of GreptimeDB
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](./memory-profile-scripts/scripts).
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](docs/how-to/memory-profile-scripts).
## Prerequisites
### jemalloc
@@ -44,10 +44,6 @@ Dump memory profiling data through HTTP API:
```bash
curl -X POST localhost:4000/debug/prof/mem > greptime.hprof
# or output flamegraph directly
curl -X POST "localhost:4000/debug/prof/mem?output=flamegraph" > greptime.svg
# or output pprof format
curl -X POST "localhost:4000/debug/prof/mem?output=proto" > greptime.pprof
```
You can periodically dump profiling data and compare them to find the delta memory usage.

View File

@@ -1,8 +1,8 @@
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's preferred way to write UDAF.)
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's prefer way to write UDAF.)
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we find a way to bypass DataFusion's restriction, a restriction that DataFusion simply doesn't pass the input data's type when creating an Accumulator.
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we found a way to bypassing DataFusion's restriction, a restriction that DataFusion simply don't pass the input data's type when creating an Accumulator.
> There's an example in `my_sum_udaf_example.rs`, take that as quick start.
@@ -16,7 +16,7 @@ You must first define a struct that will be used to create your accumulator. For
struct MySumAccumulatorCreator {}
```
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both be annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
@@ -32,11 +32,11 @@ pub trait AggregateFunctionCreator: Send + Sync + Debug {
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, its state types are `i64` (for sum) and `u64` (for count).
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, it's state types are `i64` (for sum) and `u64` (for count).
The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically.
# 2. Impl `Accumulator` trait for your accumulator.
# 2. Impl `Accumulator` trait for you accumulator.
The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is:
@@ -49,7 +49,7 @@ pub trait Accumulator: Send + Sync + Debug {
}
```
The DataFusion basically executes aggregate like this:
The DataFusion basically execute aggregate like this:
1. Partitioning all input data for aggregate. Create an accumulator for each part.
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
@@ -57,16 +57,16 @@ The DataFusion basically executes aggregate like this:
4. Call `merge_batch` to merge all accumulator's internal state to one.
5. Execute `evaluate` on the chosen one to get the final calculation result.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
# 3. Register your aggregate function to our query engine.
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to calculate, and so the count of the arguments is two.
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.
# (Optional) 4. Make your aggregate function automatically registered.
If you've written a great aggregate function that wants to let everyone use it, you can make it automatically register to our query engine at start time. It's quick and simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.
If you've written a great aggregate function that want to let everyone use it, you can make it automatically registered to our query engine at start time. It's quick simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.

View File

@@ -3,7 +3,7 @@
This document introduces how to write fuzz tests in GreptimeDB.
## What is a fuzz test
Fuzz test is tool that leverages deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
## Why we need them
- Find bugs by leveraging random generation

8
flake.lock generated
View File

@@ -41,16 +41,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1748162331,
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
"lastModified": 1745487689,
"narHash": "sha256-FQoi3R0NjQeBAsEOo49b5tbDPcJSMWc3QhhaIi9eddw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
"rev": "5630cf13cceac06cefe9fc607e8dfa8fb342dde3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}

View File

@@ -2,7 +2,7 @@
description = "Development environment flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
@@ -21,7 +21,7 @@
lib = nixpkgs.lib;
rustToolchain = fenix.packages.${system}.fromToolchainName {
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
sha256 = "sha256-tJJr8oqX3YD+ohhPK7jlt/7kvKBnBqJVjYtoFr520d4=";
sha256 = "sha256-arzEYlWLGGYeOhECHpBxQd2joZ4rPKV3qLNnZ+eql6A=";
};
in
{
@@ -51,7 +51,6 @@
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
NIX_HARDENING_ENABLE = "";
};
});
}

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,6 @@
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
@@ -60,7 +59,7 @@
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))`<br/>`sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
@@ -68,9 +67,6 @@
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |

View File

@@ -371,21 +371,6 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: 'Frontend Handle Bulk Insert Elapsed Time '
type: timeseries
description: Per-stage time for frontend to handle bulk insert requests
unit: s
queries:
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- title: Mito Engine
panels:
- title: Request OPS per Instance
@@ -487,7 +472,7 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{pod}}]'
- title: Compaction Elapsed Time per Instance by Stage
- title: Compaction P99 per Instance by Stage
type: timeseries
description: Compaction latency by stage
unit: s
@@ -497,11 +482,6 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
- expr: sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{instance=~"$datanode"}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{instance=~"$datanode"}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
- title: Compaction P99 per Instance
type: timeseries
description: Compaction P99 per Instance.
@@ -582,51 +562,6 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Compaction Input/Output Bytes
type: timeseries
description: Compaction oinput output bytes
unit: bytes
queries:
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-input'
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-output'
- title: Region Worker Handle Bulk Insert Requests
type: timeseries
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: Region Worker Convert Requests
type: timeseries
description: Per-stage elapsed time for region worker to decode requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: OpenDAL
panels:
- title: QPS per Instance

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,6 @@
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
| Frontend Handle Bulk Insert Elapsed Time | `sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))`<br/>`histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))` | `timeseries` | Per-stage time for frontend to handle bulk insert requests | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG` |
# Mito Engine
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |
@@ -60,7 +59,7 @@
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
| Compaction Elapsed Time per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))`<br/>`sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
@@ -68,9 +67,6 @@
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
# OpenDAL
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
| --- | --- | --- | --- | --- | --- | --- |

View File

@@ -371,21 +371,6 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{request_type}}]'
- title: 'Frontend Handle Bulk Insert Elapsed Time '
type: timeseries
description: Per-stage time for frontend to handle bulk insert requests
unit: s
queries:
- expr: sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_sum[$__rate_interval]))/sum by(instance, pod, stage) (rate(greptime_table_operator_handle_bulk_insert_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- expr: histogram_quantile(0.99, sum by(instance, pod, stage, le) (rate(greptime_table_operator_handle_bulk_insert_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- title: Mito Engine
panels:
- title: Request OPS per Instance
@@ -487,7 +472,7 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{ instance }}]-[{{pod}}]'
- title: Compaction Elapsed Time per Instance by Stage
- title: Compaction P99 per Instance by Stage
type: timeseries
description: Compaction latency by stage
unit: s
@@ -497,11 +482,6 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-p99'
- expr: sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_sum{}[$__rate_interval]))/sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_count{}[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-avg'
- title: Compaction P99 per Instance
type: timeseries
description: Compaction P99 per Instance.
@@ -582,51 +562,6 @@ groups:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]'
- title: Compaction Input/Output Bytes
type: timeseries
description: Compaction oinput output bytes
unit: bytes
queries:
- expr: sum by(instance, pod) (greptime_mito_compaction_input_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-input'
- expr: sum by(instance, pod) (greptime_mito_compaction_output_bytes)
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-output'
- title: Region Worker Handle Bulk Insert Requests
type: timeseries
description: Per-stage elapsed time for region worker to handle bulk insert region requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: Region Worker Convert Requests
type: timeseries
description: Per-stage elapsed time for region worker to decode requests.
unit: s
queries:
- expr: histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-P95'
- expr: sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))
datasource:
type: prometheus
uid: ${metrics}
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
- title: OpenDAL
panels:
- title: QPS per Instance

View File

@@ -6,7 +6,7 @@ DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
remove_instance_filters() {
# Remove the instance filters for the standalone dashboards.
sed -E 's/instance=~\\"(\$datanode|\$frontend|\$metasrv|\$flownode)\\",?//g' "$CLUSTER_DASHBOARD_DIR/dashboard.json" > "$STANDALONE_DASHBOARD_DIR/dashboard.json"
sed 's/instance=~\\"$datanode\\",//; s/instance=~\\"$datanode\\"//; s/instance=~\\"$frontend\\",//; s/instance=~\\"$frontend\\"//; s/instance=~\\"$metasrv\\",//; s/instance=~\\"$metasrv\\"//; s/instance=~\\"$flownode\\",//; s/instance=~\\"$flownode\\"//;' $CLUSTER_DASHBOARD_DIR/dashboard.json > $STANDALONE_DASHBOARD_DIR/dashboard.json
}
generate_intermediate_dashboards_and_docs() {

View File

@@ -26,13 +26,6 @@ excludes = [
"src/common/base/src/secrets.rs",
"src/servers/src/repeated_field.rs",
"src/servers/src/http/test_helpers.rs",
# enterprise
"src/common/meta/src/rpc/ddl/trigger.rs",
"src/operator/src/expr_helper/trigger.rs",
"src/sql/src/statements/create/trigger.rs",
"src/sql/src/statements/show/trigger.rs",
"src/sql/src/parsers/create_parser/trigger.rs",
"src/sql/src/parsers/show_parser/trigger.rs",
]
[properties]

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2025-05-19"
channel = "nightly-2025-04-15"

View File

@@ -5,12 +5,8 @@ edition.workspace = true
license.workspace = true
[features]
default = [
"pg_kvbackend",
"mysql_kvbackend",
]
pg_kvbackend = ["common-meta/pg_kvbackend", "meta-srv/pg_kvbackend"]
mysql_kvbackend = ["common-meta/mysql_kvbackend", "meta-srv/mysql_kvbackend"]
pg_kvbackend = ["common-meta/pg_kvbackend"]
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
[lints]
workspace = true
@@ -47,9 +43,11 @@ etcd-client.workspace = true
futures.workspace = true
humantime.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
nu-ansi-term = "0.46"
object-store.workspace = true
opendal = { version = "0.51.1", features = [
"services-fs",
"services-s3",
] }
query.workspace = true
rand.workspace = true
reqwest.workspace = true

View File

@@ -17,7 +17,6 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use object_store::Error as ObjectStoreError;
use snafu::{Location, Snafu};
#[derive(Snafu)]
@@ -226,7 +225,7 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: ObjectStoreError,
error: opendal::Error,
},
#[snafu(display("S3 config need be set"))]
S3ConfigNotSet {
@@ -238,12 +237,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("KV backend not set: {}", backend))]
KvBackendNotSet {
backend: String,
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -280,9 +273,8 @@ impl ErrorExt for Error {
Error::Other { source, .. } => source.status_code(),
Error::OpenDal { .. } => StatusCode::Internal,
Error::S3ConfigNotSet { .. }
| Error::OutputDirNotSet { .. }
| Error::KvBackendNotSet { .. } => StatusCode::InvalidArguments,
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
Error::BuildRuntime { source, .. } => source.status_code(),

View File

@@ -21,8 +21,8 @@ use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use common_error::ext::BoxedError;
use common_telemetry::{debug, error, info};
use object_store::layers::LoggingLayer;
use object_store::{services, ObjectStore};
use opendal::layers::LoggingLayer;
use opendal::{services, Operator};
use serde_json::Value;
use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
@@ -110,26 +110,11 @@ pub struct ExportCommand {
#[clap(long)]
s3: bool,
/// if both `s3_ddl_local_dir` and `s3` are set, `s3_ddl_local_dir` will be only used for
/// exported SQL files, and the data will be exported to s3.
///
/// Note that `s3_ddl_local_dir` export sql files to **LOCAL** file system, this is useful if export client don't have
/// direct access to s3.
///
/// if `s3` is set but `s3_ddl_local_dir` is not set, both SQL&data will be exported to s3.
#[clap(long)]
s3_ddl_local_dir: Option<String>,
/// The s3 bucket name
/// if s3 is set, this is required
#[clap(long)]
s3_bucket: Option<String>,
// The s3 root path
/// if s3 is set, this is required
#[clap(long)]
s3_root: Option<String>,
/// The s3 endpoint
/// if s3 is set, this is required
#[clap(long)]
@@ -187,9 +172,7 @@ impl ExportCommand {
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
s3: self.s3,
s3_ddl_local_dir: self.s3_ddl_local_dir.clone(),
s3_bucket: self.s3_bucket.clone(),
s3_root: self.s3_root.clone(),
s3_endpoint: self.s3_endpoint.clone(),
s3_access_key: self.s3_access_key.clone(),
s3_secret_key: self.s3_secret_key.clone(),
@@ -209,9 +192,7 @@ pub struct Export {
start_time: Option<String>,
end_time: Option<String>,
s3: bool,
s3_ddl_local_dir: Option<String>,
s3_bucket: Option<String>,
s3_root: Option<String>,
s3_endpoint: Option<String>,
s3_access_key: Option<String>,
s3_secret_key: Option<String>,
@@ -383,7 +364,7 @@ impl Export {
let timer = Instant::now();
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = self.build_prefer_fs_operator().await?;
let operator = self.build_operator().await?;
for schema in db_names {
let create_database = self
@@ -413,7 +394,7 @@ impl Export {
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = Arc::new(self.build_prefer_fs_operator().await?);
let operator = Arc::new(self.build_operator().await?);
let mut tasks = Vec::with_capacity(db_names.len());
for schema in db_names {
@@ -470,7 +451,7 @@ impl Export {
Ok(())
}
async fn build_operator(&self) -> Result<ObjectStore> {
async fn build_operator(&self) -> Result<Operator> {
if self.s3 {
self.build_s3_operator().await
} else {
@@ -478,34 +459,13 @@ impl Export {
}
}
/// build operator with preference for file system
async fn build_prefer_fs_operator(&self) -> Result<ObjectStore> {
// is under s3 mode and s3_ddl_dir is set, use it as root
if self.s3 && self.s3_ddl_local_dir.is_some() {
let root = self.s3_ddl_local_dir.as_ref().unwrap().clone();
let op = ObjectStore::new(services::Fs::default().root(&root))
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
Ok(op)
} else if self.s3 {
self.build_s3_operator().await
} else {
self.build_fs_operator().await
}
}
async fn build_s3_operator(&self) -> Result<ObjectStore> {
let mut builder = services::S3::default().bucket(
async fn build_s3_operator(&self) -> Result<Operator> {
let mut builder = services::S3::default().root("").bucket(
self.s3_bucket
.as_ref()
.expect("s3_bucket must be provided when s3 is enabled"),
);
if let Some(root) = self.s3_root.as_ref() {
builder = builder.root(root);
}
if let Some(endpoint) = self.s3_endpoint.as_ref() {
builder = builder.endpoint(endpoint);
}
@@ -522,20 +482,20 @@ impl Export {
builder = builder.secret_access_key(secret_key);
}
let op = ObjectStore::new(builder)
let op = Operator::new(builder)
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
Ok(op)
}
async fn build_fs_operator(&self) -> Result<ObjectStore> {
async fn build_fs_operator(&self) -> Result<Operator> {
let root = self
.output_dir
.as_ref()
.context(OutputDirNotSetSnafu)?
.clone();
let op = ObjectStore::new(services::Fs::default().root(&root))
let op = Operator::new(services::Fs::default().root(&root))
.context(OpenDalSnafu)?
.layer(LoggingLayer::default())
.finish();
@@ -549,7 +509,6 @@ impl Export {
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
let operator = Arc::new(self.build_operator().await?);
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
let with_options = build_with_options(&self.start_time, &self.end_time);
for schema in db_names {
@@ -557,7 +516,6 @@ impl Export {
let export_self = self.clone();
let with_options_clone = with_options.clone();
let operator = operator.clone();
let fs_first_operator = fs_first_operator.clone();
tasks.push(async move {
let _permit = semaphore_moved.acquire().await.unwrap();
@@ -591,7 +549,7 @@ impl Export {
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
export_self
.write_to_storage(
&fs_first_operator,
&operator,
&copy_from_path,
copy_database_from_sql.into_bytes(),
)
@@ -622,13 +580,8 @@ impl Export {
fn format_output_path(&self, file_path: &str) -> String {
if self.s3 {
format!(
"s3://{}{}/{}",
"s3://{}/{}",
self.s3_bucket.as_ref().unwrap_or(&String::new()),
if let Some(root) = &self.s3_root {
format!("/{}", root)
} else {
String::new()
},
file_path
)
} else {
@@ -642,27 +595,19 @@ impl Export {
async fn write_to_storage(
&self,
op: &ObjectStore,
op: &Operator,
file_path: &str,
content: Vec<u8>,
) -> Result<()> {
op.write(file_path, content)
.await
.context(OpenDalSnafu)
.map(|_| ())
op.write(file_path, content).await.context(OpenDalSnafu)
}
fn get_storage_params(&self, schema: &str) -> (String, String) {
if self.s3 {
let s3_path = format!(
"s3://{}{}/{}/{}/",
"s3://{}/{}/{}/",
// Safety: s3_bucket is required when s3 is enabled
self.s3_bucket.as_ref().unwrap(),
if let Some(root) = &self.s3_root {
format!("/{}", root)
} else {
String::new()
},
self.catalog,
schema
);

View File

@@ -17,7 +17,6 @@ mod database;
pub mod error;
mod export;
mod import;
mod meta_snapshot;
use async_trait::async_trait;
use clap::Parser;
@@ -28,7 +27,6 @@ use error::Result;
pub use crate::bench::BenchTableMetadataCommand;
pub use crate::export::ExportCommand;
pub use crate::import::ImportCommand;
pub use crate::meta_snapshot::{MetaRestoreCommand, MetaSnapshotCommand};
#[async_trait]
pub trait Tool: Send + Sync {

View File

@@ -1,329 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use clap::Parser;
use common_base::secrets::{ExposeSecret, SecretString};
use common_error::ext::BoxedError;
use common_meta::kv_backend::chroot::ChrootKvBackend;
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use common_meta::snapshot::MetadataSnapshotManager;
use meta_srv::bootstrap::create_etcd_client;
use meta_srv::metasrv::BackendImpl;
use object_store::services::{Fs, S3};
use object_store::ObjectStore;
use snafu::ResultExt;
use crate::error::{KvBackendNotSetSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
use crate::Tool;
#[derive(Debug, Default, Parser)]
struct MetaConnection {
/// The endpoint of store. one of etcd, pg or mysql.
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
store_addrs: Vec<String>,
/// The database backend.
#[clap(long, value_enum)]
backend: Option<BackendImpl>,
#[clap(long, default_value = "")]
store_key_prefix: String,
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
#[clap(long,default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
meta_table_name: String,
#[clap(long, default_value = "128")]
max_txn_ops: usize,
}
impl MetaConnection {
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
let max_txn_ops = self.max_txn_ops;
let store_addrs = &self.store_addrs;
if store_addrs.is_empty() {
KvBackendNotSetSnafu { backend: "all" }
.fail()
.map_err(BoxedError::new)
} else {
let kvbackend = match self.backend {
Some(BackendImpl::EtcdStore) => {
let etcd_client = create_etcd_client(store_addrs)
.await
.map_err(BoxedError::new)?;
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
}
#[cfg(feature = "pg_kvbackend")]
Some(BackendImpl::PostgresStore) => {
let table_name = &self.meta_table_name;
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
.await
.map_err(BoxedError::new)?;
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
pool,
table_name,
max_txn_ops,
)
.await
.map_err(BoxedError::new)?)
}
#[cfg(feature = "mysql_kvbackend")]
Some(BackendImpl::MysqlStore) => {
let table_name = &self.meta_table_name;
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
.await
.map_err(BoxedError::new)?;
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
pool,
table_name,
max_txn_ops,
)
.await
.map_err(BoxedError::new)?)
}
_ => KvBackendNotSetSnafu { backend: "all" }
.fail()
.map_err(BoxedError::new),
};
if self.store_key_prefix.is_empty() {
kvbackend
} else {
let chroot_kvbackend =
ChrootKvBackend::new(self.store_key_prefix.as_bytes().to_vec(), kvbackend?);
Ok(Arc::new(chroot_kvbackend))
}
}
}
}
// TODO(qtang): Abstract a generic s3 config for export import meta snapshot restore
#[derive(Debug, Default, Parser)]
struct S3Config {
/// whether to use s3 as the output directory. default is false.
#[clap(long, default_value = "false")]
s3: bool,
/// The s3 bucket name.
#[clap(long)]
s3_bucket: Option<String>,
/// The s3 region.
#[clap(long)]
s3_region: Option<String>,
/// The s3 access key.
#[clap(long)]
s3_access_key: Option<SecretString>,
/// The s3 secret key.
#[clap(long)]
s3_secret_key: Option<SecretString>,
/// The s3 endpoint. we will automatically use the default s3 decided by the region if not set.
#[clap(long)]
s3_endpoint: Option<String>,
}
impl S3Config {
pub fn build(&self, root: &str) -> Result<Option<ObjectStore>, BoxedError> {
if !self.s3 {
Ok(None)
} else {
if self.s3_region.is_none()
|| self.s3_access_key.is_none()
|| self.s3_secret_key.is_none()
|| self.s3_bucket.is_none()
{
return S3ConfigNotSetSnafu.fail().map_err(BoxedError::new);
}
// Safety, unwrap is safe because we have checked the options above.
let mut config = S3::default()
.bucket(self.s3_bucket.as_ref().unwrap())
.region(self.s3_region.as_ref().unwrap())
.access_key_id(self.s3_access_key.as_ref().unwrap().expose_secret())
.secret_access_key(self.s3_secret_key.as_ref().unwrap().expose_secret());
if !root.is_empty() && root != "." {
config = config.root(root);
}
if let Some(endpoint) = &self.s3_endpoint {
config = config.endpoint(endpoint);
}
Ok(Some(
ObjectStore::new(config)
.context(OpenDalSnafu)
.map_err(BoxedError::new)?
.finish(),
))
}
}
}
/// Export metadata snapshot tool.
/// This tool is used to export metadata snapshot from etcd, pg or mysql.
/// It will dump the metadata snapshot to local file or s3 bucket.
/// The snapshot file will be in binary format.
#[derive(Debug, Default, Parser)]
pub struct MetaSnapshotCommand {
/// The connection to the metadata store.
#[clap(flatten)]
connection: MetaConnection,
/// The s3 config.
#[clap(flatten)]
s3_config: S3Config,
/// The name of the target snapshot file. we will add the file extension automatically.
#[clap(long, default_value = "metadata_snapshot")]
file_name: String,
/// The directory to store the snapshot file.
/// if target output is s3 bucket, this is the root directory in the bucket.
/// if target output is local file, this is the local directory.
#[clap(long, default_value = "")]
output_dir: String,
}
fn create_local_file_object_store(root: &str) -> Result<ObjectStore, BoxedError> {
let root = if root.is_empty() { "." } else { root };
let object_store = ObjectStore::new(Fs::default().root(root))
.context(OpenDalSnafu)
.map_err(BoxedError::new)?
.finish();
Ok(object_store)
}
impl MetaSnapshotCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let kvbackend = self.connection.build().await?;
let output_dir = &self.output_dir;
let object_store = self.s3_config.build(output_dir).map_err(BoxedError::new)?;
if let Some(store) = object_store {
let tool = MetaSnapshotTool {
inner: MetadataSnapshotManager::new(kvbackend, store),
target_file: self.file_name.clone(),
};
Ok(Box::new(tool))
} else {
let object_store = create_local_file_object_store(output_dir)?;
let tool = MetaSnapshotTool {
inner: MetadataSnapshotManager::new(kvbackend, object_store),
target_file: self.file_name.clone(),
};
Ok(Box::new(tool))
}
}
}
pub struct MetaSnapshotTool {
inner: MetadataSnapshotManager,
target_file: String,
}
#[async_trait]
impl Tool for MetaSnapshotTool {
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
self.inner
.dump("", &self.target_file)
.await
.map_err(BoxedError::new)?;
Ok(())
}
}
/// Restore metadata snapshot tool.
/// This tool is used to restore metadata snapshot from etcd, pg or mysql.
/// It will restore the metadata snapshot from local file or s3 bucket.
#[derive(Debug, Default, Parser)]
pub struct MetaRestoreCommand {
/// The connection to the metadata store.
#[clap(flatten)]
connection: MetaConnection,
/// The s3 config.
#[clap(flatten)]
s3_config: S3Config,
/// The name of the target snapshot file.
#[clap(long, default_value = "metadata_snapshot.metadata.fb")]
file_name: String,
/// The directory to store the snapshot file.
#[clap(long, default_value = ".")]
input_dir: String,
#[clap(long, default_value = "false")]
force: bool,
}
impl MetaRestoreCommand {
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
let kvbackend = self.connection.build().await?;
let input_dir = &self.input_dir;
let object_store = self.s3_config.build(input_dir).map_err(BoxedError::new)?;
if let Some(store) = object_store {
let tool = MetaRestoreTool::new(
MetadataSnapshotManager::new(kvbackend, store),
self.file_name.clone(),
self.force,
);
Ok(Box::new(tool))
} else {
let object_store = create_local_file_object_store(input_dir)?;
let tool = MetaRestoreTool::new(
MetadataSnapshotManager::new(kvbackend, object_store),
self.file_name.clone(),
self.force,
);
Ok(Box::new(tool))
}
}
}
pub struct MetaRestoreTool {
inner: MetadataSnapshotManager,
source_file: String,
force: bool,
}
impl MetaRestoreTool {
pub fn new(inner: MetadataSnapshotManager, source_file: String, force: bool) -> Self {
Self {
inner,
source_file,
force,
}
}
}
#[async_trait]
impl Tool for MetaRestoreTool {
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
let clean = self
.inner
.check_target_source_clean()
.await
.map_err(BoxedError::new)?;
if clean {
common_telemetry::info!(
"The target source is clean, we will restore the metadata snapshot."
);
self.inner
.restore(&self.source_file)
.await
.map_err(BoxedError::new)?;
Ok(())
} else if !self.force {
common_telemetry::warn!(
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
);
Ok(())
} else {
common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force.");
self.inner
.restore(&self.source_file)
.await
.map_err(BoxedError::new)?;
Ok(())
}
}
}

View File

@@ -25,7 +25,6 @@ common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
datatypes.workspace = true
enum_dispatch = "0.3"
futures.workspace = true
futures-util.workspace = true

View File

@@ -14,7 +14,6 @@
use std::pin::Pin;
use std::str::FromStr;
use std::sync::Arc;
use api::v1::auth_header::AuthScheme;
use api::v1::ddl_request::Expr as DdlExpr;
@@ -36,7 +35,7 @@ use common_grpc::flight::do_put::DoPutResponse;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper};
use common_recordbatch::RecordBatchStreamWrapper;
use common_telemetry::tracing_context::W3cTrace;
use common_telemetry::{error, warn};
use futures::future;
@@ -50,7 +49,7 @@ use crate::error::{
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
InvalidTonicMetadataValueSnafu, ServerSnafu,
};
use crate::{error, from_grpc_response, Client, Result};
use crate::{from_grpc_response, Client, Result};
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
@@ -316,7 +315,7 @@ impl Database {
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
flight_data
.map_err(Error::from)
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))
.and_then(|data| decoder.try_decode(data).context(ConvertFlightDataSnafu))
});
let Some(first_flight_message) = flight_message_stream.next().await else {
@@ -338,30 +337,20 @@ impl Database {
);
Ok(Output::new_with_affected_rows(rows))
}
FlightMessage::RecordBatch(_) | FlightMessage::Metrics(_) => {
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
IllegalFlightMessagesSnafu {
reason: "The first flight message cannot be a RecordBatch or Metrics message",
}
.fail()
}
FlightMessage::Schema(schema) => {
let schema = Arc::new(
datatypes::schema::Schema::try_from(schema)
.context(error::ConvertSchemaSnafu)?,
);
let schema_cloned = schema.clone();
let stream = Box::pin(stream!({
while let Some(flight_message) = flight_message_stream.next().await {
let flight_message = flight_message
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
match flight_message {
FlightMessage::RecordBatch(arrow_batch) => {
yield RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
arrow_batch,
)
}
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
FlightMessage::Metrics(_) => {}
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
yield IllegalFlightMessagesSnafu {reason: format!("A Schema message must be succeeded exclusively by a set of RecordBatch messages, flight_message: {:?}", flight_message)}

View File

@@ -117,13 +117,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to convert Schema"))]
ConvertSchema {
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -144,7 +137,6 @@ impl ErrorExt for Error {
| Error::CreateTlsChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::InvalidTonicMetadataValue { .. } => StatusCode::InvalidArguments,
Error::ConvertSchema { source, .. } => source.status_code(),
}
}

View File

@@ -28,7 +28,7 @@ use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::node_manager::Datanode;
use common_query::request::QueryRequest;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
use common_telemetry::tracing_context::TracingContext;
use prost::Message;
@@ -55,7 +55,6 @@ impl Datanode for RegionRequester {
if err.should_retry() {
meta_error::Error::RetryLater {
source: BoxedError::new(err),
clean_poisons: false,
}
} else {
meta_error::Error::External {
@@ -126,7 +125,7 @@ impl RegionRequester {
let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
flight_data
.map_err(Error::from)
.and_then(|data| decoder.try_decode(&data).context(ConvertFlightDataSnafu))
.and_then(|data| decoder.try_decode(data).context(ConvertFlightDataSnafu))
});
let Some(first_flight_message) = flight_message_stream.next().await else {
@@ -147,10 +146,6 @@ impl RegionRequester {
let tracing_context = TracingContext::from_current_span();
let schema = Arc::new(
datatypes::schema::Schema::try_from(schema).context(error::ConvertSchemaSnafu)?,
);
let schema_cloned = schema.clone();
let stream = Box::pin(stream!({
let _span = tracing_context.attach(common_telemetry::tracing::info_span!(
"poll_flight_data_stream"
@@ -161,12 +156,7 @@ impl RegionRequester {
.context(ExternalSnafu)?;
match flight_message {
FlightMessage::RecordBatch(record_batch) => {
yield RecordBatch::try_from_df_record_batch(
schema_cloned.clone(),
record_batch,
)
}
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
FlightMessage::Metrics(s) => {
let m = serde_json::from_str(&s).ok().map(Arc::new);
metrics_ref.swap(m);

View File

@@ -10,13 +10,7 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
default = [
"servers/pprof",
"servers/mem-prof",
"meta-srv/pg_kvbackend",
"meta-srv/mysql_kvbackend",
]
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
default = ["servers/pprof", "servers/mem-prof"]
tokio-console = ["common-telemetry/tokio-console"]
[lints]

View File

@@ -76,7 +76,6 @@ impl Command {
&opts,
&TracingOptions::default(),
None,
None,
);
let tool = self.cmd.build().await.context(error::BuildCliSnafu)?;

View File

@@ -156,7 +156,6 @@ impl StartCommand {
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
opts.component.sanitize();
Ok(opts)
}

View File

@@ -64,7 +64,6 @@ impl InstanceBuilder {
&dn_opts.logging,
&dn_opts.tracing,
dn_opts.node_id.map(|x| x.to_string()),
None,
);
log_versions(version(), short_version(), APP_NAME);

View File

@@ -244,7 +244,6 @@ impl StartCommand {
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
None,
);
log_versions(version(), short_version(), APP_NAME);

View File

@@ -37,6 +37,7 @@ use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientType};
use query::stats::StatementStatistics;
use servers::export_metrics::ExportMetricsTask;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
@@ -268,7 +269,6 @@ impl StartCommand {
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.clone(),
opts.component.slow_query.as_ref(),
);
log_versions(version(), short_version(), APP_NAME);
@@ -368,6 +368,7 @@ impl StartCommand {
catalog_manager,
Arc::new(client),
meta_client,
StatementStatistics::new(opts.logging.slow_query.clone()),
)
.with_plugin(plugins.clone())
.with_local_cache_invalidator(layered_cache_registry)

View File

@@ -300,7 +300,6 @@ impl StartCommand {
&opts.component.logging,
&opts.component.tracing,
None,
None,
);
log_versions(version(), short_version(), APP_NAME);

View File

@@ -35,8 +35,6 @@ use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRe
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
use common_meta::ddl_manager::DdlManager;
#[cfg(feature = "enterprise")]
use common_meta::ddl_manager::TriggerDdlManagerRef;
use common_meta::key::flow::flow_state::FlowStat;
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
@@ -49,7 +47,7 @@ use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
use common_telemetry::info;
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, TracingOptions};
use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
use common_wal::config::DatanodeWalConfig;
@@ -71,7 +69,7 @@ use frontend::service_config::{
};
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use mito2::config::MitoConfig;
use query::options::QueryOptions;
use query::stats::StatementStatistics;
use serde::{Deserialize, Serialize};
use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
use servers::grpc::GrpcOptions;
@@ -155,8 +153,6 @@ pub struct StandaloneOptions {
pub init_regions_in_background: bool,
pub init_regions_parallelism: usize,
pub max_in_flight_write_bytes: Option<ReadableSize>,
pub slow_query: Option<SlowQueryOptions>,
pub query: QueryOptions,
}
impl Default for StandaloneOptions {
@@ -188,8 +184,6 @@ impl Default for StandaloneOptions {
init_regions_in_background: false,
init_regions_parallelism: 16,
max_in_flight_write_bytes: None,
slow_query: Some(SlowQueryOptions::default()),
query: QueryOptions::default(),
}
}
}
@@ -229,7 +223,6 @@ impl StandaloneOptions {
// Handle the export metrics task run by standalone to frontend for execution
export_metrics: cloned_opts.export_metrics,
max_in_flight_write_bytes: cloned_opts.max_in_flight_write_bytes,
slow_query: cloned_opts.slow_query,
..Default::default()
}
}
@@ -245,7 +238,6 @@ impl StandaloneOptions {
grpc: cloned_opts.grpc,
init_regions_in_background: cloned_opts.init_regions_in_background,
init_regions_parallelism: cloned_opts.init_regions_parallelism,
query: cloned_opts.query,
..Default::default()
}
}
@@ -455,7 +447,6 @@ impl StartCommand {
&opts.component.logging,
&opts.component.tracing,
None,
opts.component.slow_query.as_ref(),
);
log_versions(version(), short_version(), APP_NAME);
@@ -585,8 +576,6 @@ impl StartCommand {
flow_id_sequence,
));
#[cfg(feature = "enterprise")]
let trigger_ddl_manager: Option<TriggerDdlManagerRef> = plugins.get();
let ddl_task_executor = Self::create_ddl_task_executor(
procedure_manager.clone(),
node_manager.clone(),
@@ -595,8 +584,6 @@ impl StartCommand {
table_meta_allocator,
flow_metadata_manager,
flow_meta_allocator,
#[cfg(feature = "enterprise")]
trigger_ddl_manager,
)
.await?;
@@ -607,6 +594,7 @@ impl StartCommand {
catalog_manager.clone(),
node_manager.clone(),
ddl_task_executor.clone(),
StatementStatistics::new(opts.logging.slow_query.clone()),
)
.with_plugin(plugins.clone())
.try_build()
@@ -661,7 +649,6 @@ impl StartCommand {
})
}
#[allow(clippy::too_many_arguments)]
pub async fn create_ddl_task_executor(
procedure_manager: ProcedureManagerRef,
node_manager: NodeManagerRef,
@@ -670,7 +657,6 @@ impl StartCommand {
table_metadata_allocator: TableMetadataAllocatorRef,
flow_metadata_manager: FlowMetadataManagerRef,
flow_metadata_allocator: FlowMetadataAllocatorRef,
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new(
@@ -687,8 +673,6 @@ impl StartCommand {
},
procedure_manager,
true,
#[cfg(feature = "enterprise")]
trigger_ddl_manager,
)
.context(error::InitDdlManagerSnafu)?,
);

View File

@@ -18,7 +18,7 @@ use cmd::options::GreptimeOptions;
use cmd::standalone::StandaloneOptions;
use common_config::Configurable;
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
use common_telemetry::logging::{LoggingOptions, DEFAULT_OTLP_ENDPOINT};
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_ENDPOINT};
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::config::DatanodeWalConfig;
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
@@ -167,6 +167,11 @@ fn test_load_metasrv_example_config() {
level: Some("info".to_string()),
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
tracing_sample_ratio: Some(Default::default()),
slow_query: SlowQueryOptions {
enable: false,
threshold: None,
sample_ratio: None,
},
..Default::default()
},
datanode: DatanodeClientOptions {

View File

@@ -1,90 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_macro::admin_fn;
use common_query::error::{
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::Signature;
use datafusion::logical_expr::Volatility;
use datatypes::value::{Value, ValueRef};
use session::context::QueryContextRef;
use snafu::ensure;
use store_api::storage::ConcreteDataType;
use crate::handlers::FlowServiceHandlerRef;
use crate::helper::parse_catalog_flow;
fn adjust_signature() -> Signature {
Signature::exact(
vec![
ConcreteDataType::string_datatype(), // flow name
ConcreteDataType::uint64_datatype(), // min_run_interval in seconds
ConcreteDataType::uint64_datatype(), // max filter number per query
],
Volatility::Immutable,
)
}
#[admin_fn(
name = AdjustFlowFunction,
display_name = adjust_flow,
sig_fn = adjust_signature,
ret = uint64
)]
pub(crate) async fn adjust_flow(
flow_service_handler: &FlowServiceHandlerRef,
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
ensure!(
params.len() == 3,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect 3, have: {}",
params.len()
),
}
);
let (flow_name, min_run_interval, max_filter_num) = match (params[0], params[1], params[2]) {
(
ValueRef::String(flow_name),
ValueRef::UInt64(min_run_interval),
ValueRef::UInt64(max_filter_num),
) => (flow_name, min_run_interval, max_filter_num),
_ => {
return UnsupportedInputDataTypeSnafu {
function: "adjust_flow",
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
}
.fail();
}
};
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
let res = flow_service_handler
.adjust(
&catalog_name,
&flow_name,
min_run_interval,
max_filter_num as usize,
query_ctx.clone(),
)
.await?;
let affected_rows = res.affected_rows;
Ok(Value::from(affected_rows))
}

View File

@@ -26,7 +26,6 @@ use flush_compact_table::{CompactTableFunction, FlushTableFunction};
use migrate_region::MigrateRegionFunction;
use remove_region_follower::RemoveRegionFollowerFunction;
use crate::adjust_flow::AdjustFlowFunction;
use crate::flush_flow::FlushFlowFunction;
use crate::function_registry::FunctionRegistry;
@@ -44,6 +43,5 @@ impl AdminFunction {
registry.register_async(Arc::new(FlushTableFunction));
registry.register_async(Arc::new(CompactTableFunction));
registry.register_async(Arc::new(FlushFlowFunction));
registry.register_async(Arc::new(AdjustFlowFunction));
}
}

View File

@@ -163,7 +163,7 @@ impl DfAccumulator for UddSketchState {
}
}
// meaning instantiate as `uddsketch_merge`
DataType::Binary => self.merge_batch(std::slice::from_ref(array))?,
DataType::Binary => self.merge_batch(&[array.clone()])?,
_ => {
return not_impl_err!(
"UDDSketch functions do not support data type: {}",

View File

@@ -12,19 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::BoxedError;
use common_macro::admin_fn;
use common_query::error::{
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::Signature;
use datafusion::logical_expr::Volatility;
use datatypes::value::{Value, ValueRef};
use session::context::QueryContextRef;
use snafu::ensure;
use snafu::{ensure, ResultExt};
use sql::parser::ParserContext;
use store_api::storage::ConcreteDataType;
use crate::handlers::FlowServiceHandlerRef;
use crate::helper::parse_catalog_flow;
fn flush_signature() -> Signature {
Signature::uniform(
@@ -45,6 +47,20 @@ pub(crate) async fn flush_flow(
query_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
let res = flow_service_handler
.flush(&catalog_name, &flow_name, query_ctx.clone())
.await?;
let affected_rows = res.affected_rows;
Ok(Value::from(affected_rows))
}
fn parse_flush_flow(
params: &[ValueRef<'_>],
query_ctx: &QueryContextRef,
) -> Result<(String, String)> {
ensure!(
params.len() == 1,
InvalidFuncArgsSnafu {
@@ -54,6 +70,7 @@ pub(crate) async fn flush_flow(
),
}
);
let ValueRef::String(flow_name) = params[0] else {
return UnsupportedInputDataTypeSnafu {
function: "flush_flow",
@@ -61,14 +78,27 @@ pub(crate) async fn flush_flow(
}
.fail();
};
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
.map_err(BoxedError::new)
.context(ExecuteSnafu)?;
let res = flow_service_handler
.flush(&catalog_name, &flow_name, query_ctx.clone())
.await?;
let affected_rows = res.affected_rows;
Ok(Value::from(affected_rows))
let (catalog_name, flow_name) = match &obj_name.0[..] {
[flow_name] => (
query_ctx.current_catalog().to_string(),
flow_name.value.clone(),
),
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
_ => {
return InvalidFuncArgsSnafu {
err_msg: format!(
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
obj_name
),
}
.fail()
}
};
Ok((catalog_name, flow_name))
}
#[cfg(test)]
@@ -124,7 +154,10 @@ mod test {
("catalog.flow_name", ("catalog", "flow_name")),
];
for (input, expected) in testcases.iter() {
let result = parse_catalog_flow(input, &QueryContext::arc()).unwrap();
let args = vec![*input];
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
}
}

View File

@@ -87,15 +87,6 @@ pub trait FlowServiceHandler: Send + Sync {
flow: &str,
ctx: QueryContextRef,
) -> Result<api::v1::flow::FlowResponse>;
async fn adjust(
&self,
catalog: &str,
flow: &str,
min_run_interval_secs: u64,
max_filter_num_per_query: usize,
ctx: QueryContextRef,
) -> Result<api::v1::flow::FlowResponse>;
}
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;

View File

@@ -12,15 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::BoxedError;
use common_query::error::{ExecuteSnafu, InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result};
use common_query::error::{InvalidInputTypeSnafu, Result};
use common_query::prelude::{Signature, TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::cast::cast;
use datatypes::value::ValueRef;
use session::context::QueryContextRef;
use snafu::ResultExt;
use sql::parser::ParserContext;
/// Create a function signature with oneof signatures of interleaving two arguments.
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
@@ -46,30 +43,3 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
})
.map(|v| v.as_u64())
}
pub fn parse_catalog_flow(
flow_name: &str,
query_ctx: &QueryContextRef,
) -> Result<(String, String)> {
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
.map_err(BoxedError::new)
.context(ExecuteSnafu)?;
let (catalog_name, flow_name) = match &obj_name.0[..] {
[flow_name] => (
query_ctx.current_catalog().to_string(),
flow_name.value.clone(),
),
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
_ => {
return InvalidFuncArgsSnafu {
err_msg: format!(
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
obj_name
),
}
.fail()
}
};
Ok((catalog_name, flow_name))
}

View File

@@ -15,7 +15,6 @@
#![feature(let_chains)]
#![feature(try_blocks)]
mod adjust_flow;
mod admin;
mod flush_flow;
mod macros;

View File

@@ -468,8 +468,8 @@ mod tests {
let empty_values = vec![""];
let empty_input = Arc::new(StringVector::from_slice(&empty_values)) as VectorRef;
let ipv4_result = ipv4_func.eval(&ctx, std::slice::from_ref(&empty_input));
let ipv6_result = ipv6_func.eval(&ctx, std::slice::from_ref(&empty_input));
let ipv4_result = ipv4_func.eval(&ctx, &[empty_input.clone()]);
let ipv6_result = ipv6_func.eval(&ctx, &[empty_input.clone()]);
assert!(ipv4_result.is_err());
assert!(ipv6_result.is_err());
@@ -478,7 +478,7 @@ mod tests {
let invalid_values = vec!["not an ip", "192.168.1.256", "zzzz::ffff"];
let invalid_input = Arc::new(StringVector::from_slice(&invalid_values)) as VectorRef;
let ipv4_result = ipv4_func.eval(&ctx, std::slice::from_ref(&invalid_input));
let ipv4_result = ipv4_func.eval(&ctx, &[invalid_input.clone()]);
assert!(ipv4_result.is_err());
}

View File

@@ -294,7 +294,7 @@ mod tests {
let input = Arc::new(StringVector::from_slice(&values)) as VectorRef;
// Convert IPv6 addresses to binary
let binary_result = to_num.eval(&ctx, std::slice::from_ref(&input)).unwrap();
let binary_result = to_num.eval(&ctx, &[input.clone()]).unwrap();
// Convert binary to hex string representation (for ipv6_num_to_string)
let mut hex_strings = Vec::new();

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod clamp;
mod clamp;
mod modulo;
mod pow;
mod rate;
@@ -20,7 +20,7 @@ mod rate;
use std::fmt;
use std::sync::Arc;
pub use clamp::{ClampFunction, ClampMaxFunction, ClampMinFunction};
pub use clamp::ClampFunction;
use common_query::error::{GeneralDataFusionSnafu, Result};
use common_query::prelude::Signature;
use datafusion::error::DataFusionError;
@@ -44,8 +44,6 @@ impl MathFunction {
registry.register(Arc::new(RateFunction));
registry.register(Arc::new(RangeFunction));
registry.register(Arc::new(ClampFunction));
registry.register(Arc::new(ClampMinFunction));
registry.register(Arc::new(ClampMaxFunction));
}
}

View File

@@ -155,182 +155,6 @@ fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: b
Ok(Arc::new(PrimitiveVector::<T>::from(result)))
}
#[derive(Clone, Debug, Default)]
pub struct ClampMinFunction;
const CLAMP_MIN_NAME: &str = "clamp_min";
impl Function for ClampMinFunction {
fn name(&self) -> &str {
CLAMP_MIN_NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(input_types[0].clone())
}
fn signature(&self) -> Signature {
// input, min
Signature::uniform(2, ConcreteDataType::numerics(), Volatility::Immutable)
}
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly 2, have: {}",
columns.len()
),
}
);
ensure!(
columns[0].data_type().is_numeric(),
InvalidFuncArgsSnafu {
err_msg: format!(
"The first arg's type is not numeric, have: {}",
columns[0].data_type()
),
}
);
ensure!(
columns[0].data_type() == columns[1].data_type(),
InvalidFuncArgsSnafu {
err_msg: format!(
"Arguments don't have identical types: {}, {}",
columns[0].data_type(),
columns[1].data_type()
),
}
);
ensure!(
columns[1].len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The second arg (min) should be scalar, have: {:?}",
columns[1]
),
}
);
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
let input_array = columns[0].to_arrow_array();
let input = input_array
.as_any()
.downcast_ref::<PrimitiveArray<<$S as LogicalPrimitiveType>::ArrowPrimitive>>()
.unwrap();
let min = TryAsPrimitive::<$S>::try_as_primitive(&columns[1].get(0))
.with_context(|| {
InvalidFuncArgsSnafu {
err_msg: "The second arg (min) should not be none",
}
})?;
// For clamp_min, max is effectively infinity, so we don't use it in the clamp_impl logic.
// We pass a default/dummy value for max.
let max_dummy = <$S as LogicalPrimitiveType>::Native::default();
clamp_impl::<$S, true, false>(input, min, max_dummy)
},{
unreachable!()
})
}
}
impl Display for ClampMinFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", CLAMP_MIN_NAME.to_ascii_uppercase())
}
}
#[derive(Clone, Debug, Default)]
pub struct ClampMaxFunction;
const CLAMP_MAX_NAME: &str = "clamp_max";
impl Function for ClampMaxFunction {
fn name(&self) -> &str {
CLAMP_MAX_NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(input_types[0].clone())
}
fn signature(&self) -> Signature {
// input, max
Signature::uniform(2, ConcreteDataType::numerics(), Volatility::Immutable)
}
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly 2, have: {}",
columns.len()
),
}
);
ensure!(
columns[0].data_type().is_numeric(),
InvalidFuncArgsSnafu {
err_msg: format!(
"The first arg's type is not numeric, have: {}",
columns[0].data_type()
),
}
);
ensure!(
columns[0].data_type() == columns[1].data_type(),
InvalidFuncArgsSnafu {
err_msg: format!(
"Arguments don't have identical types: {}, {}",
columns[0].data_type(),
columns[1].data_type()
),
}
);
ensure!(
columns[1].len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The second arg (max) should be scalar, have: {:?}",
columns[1]
),
}
);
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
let input_array = columns[0].to_arrow_array();
let input = input_array
.as_any()
.downcast_ref::<PrimitiveArray<<$S as LogicalPrimitiveType>::ArrowPrimitive>>()
.unwrap();
let max = TryAsPrimitive::<$S>::try_as_primitive(&columns[1].get(0))
.with_context(|| {
InvalidFuncArgsSnafu {
err_msg: "The second arg (max) should not be none",
}
})?;
// For clamp_max, min is effectively -infinity, so we don't use it in the clamp_impl logic.
// We pass a default/dummy value for min.
let min_dummy = <$S as LogicalPrimitiveType>::Native::default();
clamp_impl::<$S, false, true>(input, min_dummy, max)
},{
unreachable!()
})
}
}
impl Display for ClampMaxFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", CLAMP_MAX_NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod test {
@@ -570,134 +394,4 @@ mod test {
let result = func.eval(&FunctionContext::default(), args.as_slice());
assert!(result.is_err());
}
#[test]
fn clamp_min_i64() {
let inputs = [
(
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
-1,
vec![Some(-1), Some(-1), Some(-1), Some(0), Some(1), Some(2)],
),
(
vec![Some(-3), None, Some(-1), None, None, Some(2)],
-2,
vec![Some(-2), None, Some(-1), None, None, Some(2)],
),
];
let func = ClampMinFunction;
for (in_data, min, expected) in inputs {
let args = [
Arc::new(Int64Vector::from(in_data)) as _,
Arc::new(Int64Vector::from_vec(vec![min])) as _,
];
let result = func
.eval(&FunctionContext::default(), args.as_slice())
.unwrap();
let expected: VectorRef = Arc::new(Int64Vector::from(expected));
assert_eq!(expected, result);
}
}
#[test]
fn clamp_max_i64() {
let inputs = [
(
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
1,
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(1)],
),
(
vec![Some(-3), None, Some(-1), None, None, Some(2)],
0,
vec![Some(-3), None, Some(-1), None, None, Some(0)],
),
];
let func = ClampMaxFunction;
for (in_data, max, expected) in inputs {
let args = [
Arc::new(Int64Vector::from(in_data)) as _,
Arc::new(Int64Vector::from_vec(vec![max])) as _,
];
let result = func
.eval(&FunctionContext::default(), args.as_slice())
.unwrap();
let expected: VectorRef = Arc::new(Int64Vector::from(expected));
assert_eq!(expected, result);
}
}
#[test]
fn clamp_min_f64() {
let inputs = [(
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
-1.0,
vec![Some(-1.0), Some(-1.0), Some(-1.0), Some(0.0), Some(1.0)],
)];
let func = ClampMinFunction;
for (in_data, min, expected) in inputs {
let args = [
Arc::new(Float64Vector::from(in_data)) as _,
Arc::new(Float64Vector::from_vec(vec![min])) as _,
];
let result = func
.eval(&FunctionContext::default(), args.as_slice())
.unwrap();
let expected: VectorRef = Arc::new(Float64Vector::from(expected));
assert_eq!(expected, result);
}
}
#[test]
fn clamp_max_f64() {
let inputs = [(
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
0.0,
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(0.0)],
)];
let func = ClampMaxFunction;
for (in_data, max, expected) in inputs {
let args = [
Arc::new(Float64Vector::from(in_data)) as _,
Arc::new(Float64Vector::from_vec(vec![max])) as _,
];
let result = func
.eval(&FunctionContext::default(), args.as_slice())
.unwrap();
let expected: VectorRef = Arc::new(Float64Vector::from(expected));
assert_eq!(expected, result);
}
}
#[test]
fn clamp_min_type_not_match() {
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
let min = -1;
let func = ClampMinFunction;
let args = [
Arc::new(Float64Vector::from(input)) as _,
Arc::new(Int64Vector::from_vec(vec![min])) as _,
];
let result = func.eval(&FunctionContext::default(), args.as_slice());
assert!(result.is_err());
}
#[test]
fn clamp_max_type_not_match() {
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
let max = 1;
let func = ClampMaxFunction;
let args = [
Arc::new(Float64Vector::from(input)) as _,
Arc::new(Int64Vector::from_vec(vec![max])) as _,
];
let result = func.eval(&FunctionContext::default(), args.as_slice());
assert!(result.is_err());
}
}

View File

@@ -37,7 +37,7 @@ impl fmt::Display for RateFunction {
impl Function for RateFunction {
fn name(&self) -> &str {
"rate"
"prom_rate"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
@@ -82,7 +82,7 @@ mod tests {
#[test]
fn test_rate_function() {
let rate = RateFunction;
assert_eq!("rate", rate.name());
assert_eq!("prom_rate", rate.name());
assert_eq!(
ConcreteDataType::float64_datatype(),
rate.return_type(&[]).unwrap()

View File

@@ -148,17 +148,6 @@ impl FunctionState {
) -> Result<api::v1::flow::FlowResponse> {
todo!()
}
async fn adjust(
&self,
_catalog: &str,
_flow: &str,
_min_run_interval_secs: u64,
_max_filter_num_per_query: usize,
_ctx: QueryContextRef,
) -> Result<api::v1::flow::FlowResponse> {
todo!()
}
}
Self {

View File

@@ -10,7 +10,6 @@ workspace = true
[dependencies]
api.workspace = true
arrow-flight.workspace = true
bytes.workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true

View File

@@ -1,146 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use arrow_flight::FlightData;
use bytes::Bytes;
use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage};
use common_recordbatch::DfRecordBatch;
use criterion::{criterion_group, criterion_main, Criterion};
use datatypes::arrow;
use datatypes::arrow::array::{ArrayRef, Int64Array, StringArray, TimestampMillisecondArray};
use datatypes::arrow::datatypes::DataType;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use prost::Message;
fn schema() -> arrow::datatypes::SchemaRef {
let schema = Schema::new(vec![
ColumnSchema::new("k0", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("k1", ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_millisecond_datatype(),
false,
),
ColumnSchema::new("v0", ConcreteDataType::int64_datatype(), false),
ColumnSchema::new("v1", ConcreteDataType::int64_datatype(), false),
]);
schema.arrow_schema().clone()
}
/// Generate record batch according to provided schema and num rows.
fn prepare_random_record_batch(
schema: arrow::datatypes::SchemaRef,
num_rows: usize,
) -> DfRecordBatch {
let tag_candidates = (0..10000).map(|i| i.to_string()).collect::<Vec<_>>();
let columns: Vec<ArrayRef> = schema
.fields
.iter()
.map(|col| match col.data_type() {
DataType::Utf8 => {
let array = StringArray::from(
(0..num_rows)
.map(|_| {
let idx: usize = rand::random_range(0..10000);
format!("tag-{}", tag_candidates[idx])
})
.collect::<Vec<_>>(),
);
Arc::new(array) as ArrayRef
}
DataType::Timestamp(_, _) => {
let now = common_time::util::current_time_millis();
let array = TimestampMillisecondArray::from(
(0..num_rows).map(|i| now + i as i64).collect::<Vec<_>>(),
);
Arc::new(array) as ArrayRef
}
DataType::Int64 => {
let array = Int64Array::from((0..num_rows).map(|i| i as i64).collect::<Vec<_>>());
Arc::new(array) as ArrayRef
}
_ => unreachable!(),
})
.collect();
DfRecordBatch::try_new(schema, columns).unwrap()
}
fn prepare_flight_data(num_rows: usize) -> (FlightData, FlightData) {
let schema = schema();
let mut encoder = FlightEncoder::default();
let schema_data = encoder.encode(FlightMessage::Schema(schema.clone()));
let rb = prepare_random_record_batch(schema, num_rows);
let rb_data = encoder.encode(FlightMessage::RecordBatch(rb));
(schema_data, rb_data)
}
fn decode_flight_data_from_protobuf(schema: &Bytes, payload: &Bytes) -> DfRecordBatch {
let schema = FlightData::decode(&schema[..]).unwrap();
let payload = FlightData::decode(&payload[..]).unwrap();
let mut decoder = FlightDecoder::default();
let _schema = decoder.try_decode(&schema).unwrap();
let message = decoder.try_decode(&payload).unwrap();
let FlightMessage::RecordBatch(batch) = message else {
unreachable!("unexpected message");
};
batch
}
fn decode_flight_data_from_header_and_body(
schema: &Bytes,
data_header: &Bytes,
data_body: &Bytes,
) -> DfRecordBatch {
let mut decoder = FlightDecoder::try_from_schema_bytes(schema).unwrap();
decoder
.try_decode_record_batch(data_header, data_body)
.unwrap()
}
fn bench_decode_flight_data(c: &mut Criterion) {
let row_counts = [100000, 200000, 1000000];
for row_count in row_counts {
let (schema, payload) = prepare_flight_data(row_count);
// arguments for decode_flight_data_from_protobuf
let schema_bytes = Bytes::from(schema.encode_to_vec());
let payload_bytes = Bytes::from(payload.encode_to_vec());
let mut group = c.benchmark_group(format!("flight_decoder_{}_rows", row_count));
group.bench_function("decode_from_protobuf", |b| {
b.iter(|| decode_flight_data_from_protobuf(&schema_bytes, &payload_bytes));
});
group.bench_function("decode_from_header_and_body", |b| {
b.iter(|| {
decode_flight_data_from_header_and_body(
&schema.data_header,
&payload.data_header,
&payload.data_body,
)
});
});
group.finish();
}
}
criterion_group!(benches, bench_decode_flight_data);
criterion_main!(benches);

View File

@@ -14,10 +14,8 @@
use criterion::criterion_main;
mod bench_flight_decoder;
mod channel_manager;
criterion_main! {
channel_manager::benches,
bench_flight_decoder::benches
channel_manager::benches
}

View File

@@ -18,7 +18,6 @@ use std::io;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datatypes::arrow::error::ArrowError;
use snafu::{Location, Snafu};
pub type Result<T> = std::result::Result<T, Error>;
@@ -60,6 +59,13 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to create RecordBatch"))]
CreateRecordBatch {
#[snafu(implicit)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to convert Arrow type: {}", from))]
Conversion {
from: String,
@@ -82,6 +88,13 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to convert Arrow Schema"))]
ConvertArrowSchema {
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
},
#[snafu(display("Not supported: {}", feat))]
NotSupported { feat: String },
@@ -92,14 +105,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed arrow operation"))]
Arrow {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: ArrowError,
},
}
impl ErrorExt for Error {
@@ -116,7 +121,8 @@ impl ErrorExt for Error {
| Error::DecodeFlightData { .. }
| Error::SerdeJson { .. } => StatusCode::Internal,
Error::Arrow { .. } => StatusCode::Internal,
Error::CreateRecordBatch { source, .. } => source.status_code(),
Error::ConvertArrowSchema { source, .. } => source.status_code(),
}
}

View File

@@ -21,24 +21,25 @@ use api::v1::{AffectedRows, FlightMetadata, Metrics};
use arrow_flight::utils::flight_data_to_arrow_batch;
use arrow_flight::{FlightData, SchemaAsIpc};
use common_base::bytes::Bytes;
use common_recordbatch::DfRecordBatch;
use common_recordbatch::{RecordBatch, RecordBatches};
use datatypes::arrow;
use datatypes::arrow::buffer::Buffer;
use datatypes::arrow::datatypes::{Schema as ArrowSchema, SchemaRef};
use datatypes::arrow::error::ArrowError;
use datatypes::arrow::ipc::{convert, reader, root_as_message, writer, MessageHeader};
use datatypes::arrow::datatypes::Schema as ArrowSchema;
use datatypes::arrow::ipc::{root_as_message, writer, MessageHeader};
use datatypes::schema::{Schema, SchemaRef};
use flatbuffers::FlatBufferBuilder;
use prost::bytes::Bytes as ProstBytes;
use prost::Message;
use snafu::{OptionExt, ResultExt};
use crate::error;
use crate::error::{DecodeFlightDataSnafu, InvalidFlightDataSnafu, Result};
use crate::error::{
ConvertArrowSchemaSnafu, CreateRecordBatchSnafu, DecodeFlightDataSnafu, InvalidFlightDataSnafu,
Result,
};
#[derive(Debug, Clone)]
pub enum FlightMessage {
Schema(SchemaRef),
RecordBatch(DfRecordBatch),
Recordbatch(RecordBatch),
AffectedRows(usize),
Metrics(String),
}
@@ -66,12 +67,14 @@ impl Default for FlightEncoder {
impl FlightEncoder {
pub fn encode(&mut self, flight_message: FlightMessage) -> FlightData {
match flight_message {
FlightMessage::Schema(schema) => SchemaAsIpc::new(&schema, &self.write_options).into(),
FlightMessage::RecordBatch(record_batch) => {
FlightMessage::Schema(schema) => {
SchemaAsIpc::new(schema.arrow_schema(), &self.write_options).into()
}
FlightMessage::Recordbatch(recordbatch) => {
let (encoded_dictionaries, encoded_batch) = self
.data_gen
.encoded_batch(
&record_batch,
recordbatch.df_record_batch(),
&mut self.dictionary_tracker,
&self.write_options,
)
@@ -121,59 +124,10 @@ impl FlightEncoder {
#[derive(Default)]
pub struct FlightDecoder {
schema: Option<SchemaRef>,
schema_bytes: Option<bytes::Bytes>,
}
impl FlightDecoder {
/// Build a [FlightDecoder] instance from provided schema bytes.
pub fn try_from_schema_bytes(schema_bytes: &bytes::Bytes) -> Result<Self> {
let arrow_schema = convert::try_schema_from_flatbuffer_bytes(&schema_bytes[..])
.context(error::ArrowSnafu)?;
Ok(Self {
schema: Some(Arc::new(arrow_schema)),
schema_bytes: Some(schema_bytes.clone()),
})
}
pub fn try_decode_record_batch(
&mut self,
data_header: &bytes::Bytes,
data_body: &bytes::Bytes,
) -> Result<DfRecordBatch> {
let schema = self
.schema
.as_ref()
.context(InvalidFlightDataSnafu {
reason: "Should have decoded schema first!",
})?
.clone();
let message = root_as_message(&data_header[..])
.map_err(|err| {
ArrowError::ParseError(format!("Unable to get root as message: {err:?}"))
})
.context(error::ArrowSnafu)?;
let result = message
.header_as_record_batch()
.ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data header to a record batch".to_string(),
)
})
.and_then(|batch| {
reader::read_record_batch(
&Buffer::from(data_body.as_ref()),
batch,
schema,
&HashMap::new(),
None,
&message.version(),
)
})
.context(error::ArrowSnafu)?;
Ok(result)
}
pub fn try_decode(&mut self, flight_data: &FlightData) -> Result<FlightMessage> {
pub fn try_decode(&mut self, flight_data: FlightData) -> Result<FlightMessage> {
let message = root_as_message(&flight_data.data_header).map_err(|e| {
InvalidFlightDataSnafu {
reason: e.to_string(),
@@ -182,7 +136,7 @@ impl FlightDecoder {
})?;
match message.header_type() {
MessageHeader::NONE => {
let metadata = FlightMetadata::decode(flight_data.app_metadata.clone())
let metadata = FlightMetadata::decode(flight_data.app_metadata)
.context(DecodeFlightDataSnafu)?;
if let Some(AffectedRows { value }) = metadata.affected_rows {
return Ok(FlightMessage::AffectedRows(value as _));
@@ -198,29 +152,36 @@ impl FlightDecoder {
.fail()
}
MessageHeader::Schema => {
let arrow_schema = Arc::new(ArrowSchema::try_from(flight_data).map_err(|e| {
let arrow_schema = ArrowSchema::try_from(&flight_data).map_err(|e| {
InvalidFlightDataSnafu {
reason: e.to_string(),
}
.build()
})?);
self.schema = Some(arrow_schema.clone());
self.schema_bytes = Some(flight_data.data_header.clone());
Ok(FlightMessage::Schema(arrow_schema))
})?;
let schema =
Arc::new(Schema::try_from(arrow_schema).context(ConvertArrowSchemaSnafu)?);
self.schema = Some(schema.clone());
Ok(FlightMessage::Schema(schema))
}
MessageHeader::RecordBatch => {
let schema = self.schema.clone().context(InvalidFlightDataSnafu {
reason: "Should have decoded schema first!",
})?;
let arrow_schema = schema.arrow_schema().clone();
let arrow_batch =
flight_data_to_arrow_batch(flight_data, schema.clone(), &HashMap::new())
flight_data_to_arrow_batch(&flight_data, arrow_schema, &HashMap::new())
.map_err(|e| {
InvalidFlightDataSnafu {
reason: e.to_string(),
}
.build()
})?;
Ok(FlightMessage::RecordBatch(arrow_batch))
let recordbatch = RecordBatch::try_from_df_record_batch(schema, arrow_batch)
.context(CreateRecordBatchSnafu)?;
Ok(FlightMessage::Recordbatch(recordbatch))
}
other => {
let name = other.variant_name().unwrap_or("UNKNOWN");
@@ -235,22 +196,16 @@ impl FlightDecoder {
pub fn schema(&self) -> Option<&SchemaRef> {
self.schema.as_ref()
}
pub fn schema_bytes(&self) -> Option<bytes::Bytes> {
self.schema_bytes.clone()
}
}
pub fn flight_messages_to_recordbatches(
messages: Vec<FlightMessage>,
) -> Result<Vec<DfRecordBatch>> {
pub fn flight_messages_to_recordbatches(messages: Vec<FlightMessage>) -> Result<RecordBatches> {
if messages.is_empty() {
Ok(vec![])
Ok(RecordBatches::empty())
} else {
let mut recordbatches = Vec::with_capacity(messages.len() - 1);
match &messages[0] {
FlightMessage::Schema(_schema) => {}
let schema = match &messages[0] {
FlightMessage::Schema(schema) => schema.clone(),
_ => {
return InvalidFlightDataSnafu {
reason: "First Flight Message must be schema!",
@@ -261,7 +216,7 @@ pub fn flight_messages_to_recordbatches(
for message in messages.into_iter().skip(1) {
match message {
FlightMessage::RecordBatch(recordbatch) => recordbatches.push(recordbatch),
FlightMessage::Recordbatch(recordbatch) => recordbatches.push(recordbatch),
_ => {
return InvalidFlightDataSnafu {
reason: "Expect the following Flight Messages are all Recordbatches!",
@@ -271,7 +226,7 @@ pub fn flight_messages_to_recordbatches(
}
}
Ok(recordbatches)
RecordBatches::try_new(schema, recordbatches).context(CreateRecordBatchSnafu)
}
}
@@ -292,33 +247,38 @@ fn build_none_flight_msg() -> Bytes {
#[cfg(test)]
mod test {
use arrow_flight::utils::batches_to_flight_data;
use datatypes::arrow::array::Int32Array;
use datatypes::arrow::datatypes::{DataType, Field, Schema};
use datatypes::arrow::datatypes::{DataType, Field};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use datatypes::vectors::Int32Vector;
use super::*;
use crate::Error;
#[test]
fn test_try_decode() {
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"n",
DataType::Int32,
true,
)]));
let arrow_schema = ArrowSchema::new(vec![Field::new("n", DataType::Int32, true)]);
let schema = Arc::new(Schema::try_from(arrow_schema.clone()).unwrap());
let batch1 = DfRecordBatch::try_new(
let batch1 = RecordBatch::new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![Some(1), None, Some(3)])) as _],
vec![Arc::new(Int32Vector::from(vec![Some(1), None, Some(3)])) as _],
)
.unwrap();
let batch2 = DfRecordBatch::try_new(
let batch2 = RecordBatch::new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![None, Some(5)])) as _],
vec![Arc::new(Int32Vector::from(vec![None, Some(5)])) as _],
)
.unwrap();
let flight_data =
batches_to_flight_data(&schema, vec![batch1.clone(), batch2.clone()]).unwrap();
let flight_data = batches_to_flight_data(
&arrow_schema,
vec![
batch1.clone().into_df_record_batch(),
batch2.clone().into_df_record_batch(),
],
)
.unwrap();
assert_eq!(flight_data.len(), 3);
let [d1, d2, d3] = flight_data.as_slice() else {
unreachable!()
@@ -327,14 +287,14 @@ mod test {
let decoder = &mut FlightDecoder::default();
assert!(decoder.schema.is_none());
let result = decoder.try_decode(d2);
let result = decoder.try_decode(d2.clone());
assert!(matches!(result, Err(Error::InvalidFlightData { .. })));
assert!(result
.unwrap_err()
.to_string()
.contains("Should have decoded schema first!"));
let message = decoder.try_decode(d1).unwrap();
let message = decoder.try_decode(d1.clone()).unwrap();
assert!(matches!(message, FlightMessage::Schema(_)));
let FlightMessage::Schema(decoded_schema) = message else {
unreachable!()
@@ -343,16 +303,16 @@ mod test {
let _ = decoder.schema.as_ref().unwrap();
let message = decoder.try_decode(d2).unwrap();
assert!(matches!(message, FlightMessage::RecordBatch(_)));
let FlightMessage::RecordBatch(actual_batch) = message else {
let message = decoder.try_decode(d2.clone()).unwrap();
assert!(matches!(message, FlightMessage::Recordbatch(_)));
let FlightMessage::Recordbatch(actual_batch) = message else {
unreachable!()
};
assert_eq!(actual_batch, batch1);
let message = decoder.try_decode(d3).unwrap();
assert!(matches!(message, FlightMessage::RecordBatch(_)));
let FlightMessage::RecordBatch(actual_batch) = message else {
let message = decoder.try_decode(d3.clone()).unwrap();
assert!(matches!(message, FlightMessage::Recordbatch(_)));
let FlightMessage::Recordbatch(actual_batch) = message else {
unreachable!()
};
assert_eq!(actual_batch, batch2);
@@ -360,22 +320,27 @@ mod test {
#[test]
fn test_flight_messages_to_recordbatches() {
let schema = Arc::new(Schema::new(vec![Field::new("m", DataType::Int32, true)]));
let batch1 = DfRecordBatch::try_new(
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
"m",
ConcreteDataType::int32_datatype(),
true,
)]));
let batch1 = RecordBatch::new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![Some(2), None, Some(4)])) as _],
vec![Arc::new(Int32Vector::from(vec![Some(2), None, Some(4)])) as _],
)
.unwrap();
let batch2 = DfRecordBatch::try_new(
let batch2 = RecordBatch::new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![None, Some(6)])) as _],
vec![Arc::new(Int32Vector::from(vec![None, Some(6)])) as _],
)
.unwrap();
let recordbatches = vec![batch1.clone(), batch2.clone()];
let recordbatches =
RecordBatches::try_new(schema.clone(), vec![batch1.clone(), batch2.clone()]).unwrap();
let m1 = FlightMessage::Schema(schema);
let m2 = FlightMessage::RecordBatch(batch1);
let m3 = FlightMessage::RecordBatch(batch2);
let m2 = FlightMessage::Recordbatch(batch1);
let m3 = FlightMessage::Recordbatch(batch2);
let result = flight_messages_to_recordbatches(vec![m2.clone(), m1.clone(), m3.clone()]);
assert!(matches!(result, Err(Error::InvalidFlightData { .. })));

View File

@@ -8,7 +8,6 @@ license.workspace = true
workspace = true
[dependencies]
anyhow = "1"
common-error.workspace = true
common-macro.workspace = true
snafu.workspace = true
@@ -17,11 +16,6 @@ tokio.workspace = true
[target.'cfg(not(windows))'.dependencies]
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats"] }
jemalloc-pprof-utils = { version = "0.7", package = "pprof_util", features = [
"flamegraph",
"symbolize",
] } # for parsing jemalloc prof dump
jemalloc-pprof-mappings = { version = "0.7", package = "mappings" } # for get the name of functions in the prof dump
[target.'cfg(not(windows))'.dependencies.tikv-jemalloc-sys]
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]

View File

@@ -30,25 +30,12 @@ pub enum Error {
#[snafu(display("Memory profiling is not supported"))]
ProfilingNotSupported,
#[snafu(display("Failed to parse jeheap profile: {}", err))]
ParseJeHeap {
#[snafu(source)]
err: anyhow::Error,
},
#[snafu(display("Failed to dump profile data to flamegraph: {}", err))]
Flamegraph {
#[snafu(source)]
err: anyhow::Error,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::Internal { source } => source.status_code(),
Error::ParseJeHeap { .. } | Error::Flamegraph { .. } => StatusCode::Internal,
Error::ProfilingNotSupported => StatusCode::Unsupported,
}
}

View File

@@ -15,19 +15,16 @@
mod error;
use std::ffi::{c_char, CString};
use std::io::BufReader;
use std::path::PathBuf;
use error::{
BuildTempPathSnafu, DumpProfileDataSnafu, OpenTempFileSnafu, ProfilingNotEnabledSnafu,
ReadOptProfSnafu,
};
use jemalloc_pprof_mappings::MAPPINGS;
use jemalloc_pprof_utils::{parse_jeheap, FlamegraphOptions, StackProfile};
use snafu::{ensure, ResultExt};
use tokio::io::AsyncReadExt;
use crate::error::{FlamegraphSnafu, ParseJeHeapSnafu, Result};
use crate::error::Result;
const PROF_DUMP: &[u8] = b"prof.dump\0";
const OPT_PROF: &[u8] = b"opt.prof\0";
@@ -73,26 +70,6 @@ pub async fn dump_profile() -> Result<Vec<u8>> {
Ok(buf)
}
async fn dump_profile_to_stack_profile() -> Result<StackProfile> {
let profile = dump_profile().await?;
let profile = BufReader::new(profile.as_slice());
parse_jeheap(profile, MAPPINGS.as_deref()).context(ParseJeHeapSnafu)
}
pub async fn dump_pprof() -> Result<Vec<u8>> {
let profile = dump_profile_to_stack_profile().await?;
let pprof = profile.to_pprof(("inuse_space", "bytes"), ("space", "bytes"), None);
Ok(pprof)
}
pub async fn dump_flamegraph() -> Result<Vec<u8>> {
let profile = dump_profile_to_stack_profile().await?;
let mut opts = FlamegraphOptions::default();
opts.title = "inuse_space".to_string();
opts.count_name = "bytes".to_string();
let flamegraph = profile.to_flamegraph(&mut opts).context(FlamegraphSnafu)?;
Ok(flamegraph)
}
fn is_prof_enabled() -> Result<bool> {
// safety: OPT_PROF variable, if present, is always a boolean value.
Ok(unsafe { tikv_jemalloc_ctl::raw::read::<bool>(OPT_PROF).context(ReadOptProfSnafu)? })

View File

@@ -17,19 +17,9 @@ pub mod error;
#[cfg(not(windows))]
mod jemalloc;
#[cfg(not(windows))]
pub use jemalloc::{dump_flamegraph, dump_pprof, dump_profile};
pub use jemalloc::dump_profile;
#[cfg(windows)]
pub async fn dump_profile() -> error::Result<Vec<u8>> {
error::ProfilingNotSupportedSnafu.fail()
}
#[cfg(windows)]
pub async fn dump_pprof() -> error::Result<Vec<u8>> {
error::ProfilingNotSupportedSnafu.fail()
}
#[cfg(windows)]
pub async fn dump_flamegraph() -> error::Result<Vec<u8>> {
error::ProfilingNotSupportedSnafu.fail()
}

View File

@@ -8,7 +8,6 @@ license.workspace = true
testing = []
pg_kvbackend = ["dep:tokio-postgres", "dep:backon", "dep:deadpool-postgres", "dep:deadpool"]
mysql_kvbackend = ["dep:sqlx", "dep:backon"]
enterprise = []
[lints]
workspace = true
@@ -35,7 +34,6 @@ common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-wal.workspace = true
common-workload.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datatypes.workspace = true
@@ -43,7 +41,6 @@ deadpool = { workspace = true, optional = true }
deadpool-postgres = { workspace = true, optional = true }
derive_builder.workspace = true
etcd-client.workspace = true
flexbuffers = "25.2"
futures.workspace = true
futures-util.workspace = true
hex.workspace = true
@@ -51,7 +48,6 @@ humantime-serde.workspace = true
itertools.workspace = true
lazy_static.workspace = true
moka.workspace = true
object-store.workspace = true
prometheus.workspace = true
prost.workspace = true
rand.workspace = true
@@ -74,7 +70,6 @@ typetag.workspace = true
[dev-dependencies]
chrono.workspace = true
common-procedure = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
common-wal = { workspace = true, features = ["testing"] }
datatypes.workspace = true
hyper = { version = "0.14", features = ["full"] }

View File

@@ -24,39 +24,21 @@ use crate::cache::{CacheContainer, Initializer};
use crate::error::Result;
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::key::flow::{TableFlowManager, TableFlowManagerRef};
use crate::key::{FlowId, FlowPartitionId};
use crate::kv_backend::KvBackendRef;
use crate::peer::Peer;
use crate::FlownodeId;
/// Flow id&flow partition key
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FlowIdent {
pub flow_id: FlowId,
pub partition_id: FlowPartitionId,
}
impl FlowIdent {
pub fn new(flow_id: FlowId, partition_id: FlowPartitionId) -> Self {
Self {
flow_id,
partition_id,
}
}
}
/// cache for TableFlowManager, the table_id part is in the outer cache
/// include flownode_id, flow_id, partition_id mapping to Peer
type FlownodeFlowSet = Arc<HashMap<FlowIdent, Peer>>;
type FlownodeSet = Arc<HashMap<FlownodeId, Peer>>;
pub type TableFlownodeSetCacheRef = Arc<TableFlownodeSetCache>;
/// [TableFlownodeSetCache] caches the [TableId] to [FlownodeSet] mapping.
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeFlowSet, CacheIdent>;
pub type TableFlownodeSetCache = CacheContainer<TableId, FlownodeSet, CacheIdent>;
/// Constructs a [TableFlownodeSetCache].
pub fn new_table_flownode_set_cache(
name: String,
cache: Cache<TableId, FlownodeFlowSet>,
cache: Cache<TableId, FlownodeSet>,
kv_backend: KvBackendRef,
) -> TableFlownodeSetCache {
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
@@ -65,7 +47,7 @@ pub fn new_table_flownode_set_cache(
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
}
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeFlowSet> {
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
Arc::new(move |&table_id| {
let table_flow_manager = table_flow_manager.clone();
Box::pin(async move {
@@ -75,12 +57,7 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
.map(|flows| {
flows
.into_iter()
.map(|(key, value)| {
(
FlowIdent::new(key.flow_id(), key.partition_id()),
value.peer,
)
})
.map(|(key, value)| (key.flownode_id(), value.peer))
.collect::<HashMap<_, _>>()
})
// We must cache the `HashSet` even if it's empty,
@@ -94,33 +71,26 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
}
async fn handle_create_flow(
cache: &Cache<TableId, FlownodeFlowSet>,
cache: &Cache<TableId, FlownodeSet>,
CreateFlow {
flow_id,
source_table_ids,
partition_to_peer_mapping: flow_part2nodes,
flownodes: flownode_peers,
}: &CreateFlow,
) {
for table_id in source_table_ids {
let entry = cache.entry(*table_id);
entry
.and_compute_with(
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
Some(entry) => {
let mut map = entry.into_value().as_ref().clone();
map.extend(
flow_part2nodes.iter().map(|(part, peer)| {
(FlowIdent::new(*flow_id, *part), peer.clone())
}),
);
map.extend(flownode_peers.iter().map(|peer| (peer.id, peer.clone())));
Op::Put(Arc::new(map))
}
None => {
Op::Put(Arc::new(HashMap::from_iter(flow_part2nodes.iter().map(
|(part, peer)| (FlowIdent::new(*flow_id, *part), peer.clone()),
))))
}
None => Op::Put(Arc::new(HashMap::from_iter(
flownode_peers.iter().map(|peer| (peer.id, peer.clone())),
))),
},
)
.await;
@@ -128,23 +98,21 @@ async fn handle_create_flow(
}
async fn handle_drop_flow(
cache: &Cache<TableId, FlownodeFlowSet>,
cache: &Cache<TableId, FlownodeSet>,
DropFlow {
flow_id,
source_table_ids,
flow_part2node_id,
flownode_ids,
}: &DropFlow,
) {
for table_id in source_table_ids {
let entry = cache.entry(*table_id);
entry
.and_compute_with(
async |entry: Option<moka::Entry<u32, FlownodeFlowSet>>| match entry {
async |entry: Option<moka::Entry<u32, Arc<HashMap<u64, _>>>>| match entry {
Some(entry) => {
let mut set = entry.into_value().as_ref().clone();
for (part, _node) in flow_part2node_id {
let key = FlowIdent::new(*flow_id, *part);
set.remove(&key);
for flownode_id in flownode_ids {
set.remove(flownode_id);
}
Op::Put(Arc::new(set))
@@ -160,7 +128,7 @@ async fn handle_drop_flow(
}
fn invalidator<'a>(
cache: &'a Cache<TableId, FlownodeFlowSet>,
cache: &'a Cache<TableId, FlownodeSet>,
ident: &'a CacheIdent,
) -> BoxFuture<'a, Result<()>> {
Box::pin(async move {
@@ -186,7 +154,7 @@ mod tests {
use moka::future::CacheBuilder;
use table::table_name::TableName;
use crate::cache::flow::table_flownode::{new_table_flownode_set_cache, FlowIdent};
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
@@ -246,16 +214,12 @@ mod tests {
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
)
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(1..=3).map(|i| { (FlowIdent::new(1024, (i - 1) as u32), Peer::empty(i),) })
)
HashMap::from_iter((1..=3).map(|i| { (i, Peer::empty(i),) }))
);
let result = cache.get(1026).await.unwrap().unwrap();
assert_eq!(result.len(), 0);
@@ -267,9 +231,8 @@ mod tests {
let cache = CacheBuilder::new(128).build();
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
flownodes: (1..=5).map(Peer::empty).collect(),
})];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
@@ -278,54 +241,6 @@ mod tests {
assert_eq!(set.len(), 5);
}
#[tokio::test]
async fn test_replace_flow() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let cache = CacheBuilder::new(128).build();
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
let ident = vec![CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5).map(|i| (i as u32, Peer::empty(i + 1))).collect(),
})];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(set.len(), 5);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(set.len(), 5);
let drop_then_create_flow = vec![
CacheIdent::DropFlow(DropFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
}),
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1026, 1027],
partition_to_peer_mapping: (11..=15)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
}),
CacheIdent::FlowId(2001),
];
cache.invalidate(&drop_then_create_flow).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert!(set.is_empty());
let expected = HashMap::from_iter(
(11..=15).map(|i| (FlowIdent::new(2001, i as u32), Peer::empty(i + 1))),
);
let set = cache.get(1026).await.unwrap().unwrap();
assert_eq!(set.as_ref().clone(), expected);
let set = cache.get(1027).await.unwrap().unwrap();
assert_eq!(set.as_ref().clone(), expected);
}
#[tokio::test]
async fn test_drop_flow() {
let mem_kv = Arc::new(MemoryKvBackend::default());
@@ -333,57 +248,34 @@ mod tests {
let cache = new_table_flownode_set_cache("test".to_string(), cache, mem_kv);
let ident = vec![
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
flownodes: (1..=5).map(Peer::empty).collect(),
}),
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2002,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (11..=12)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
}),
// same flownode that hold multiple flows
CacheIdent::CreateFlow(CreateFlow {
flow_id: 2003,
source_table_ids: vec![1024, 1025],
partition_to_peer_mapping: (1..=5)
.map(|i| (i as u32, Peer::empty(i + 1)))
.collect(),
flownodes: (11..=12).map(Peer::empty).collect(),
}),
];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(set.len(), 12);
assert_eq!(set.len(), 7);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(set.len(), 12);
assert_eq!(set.len(), 7);
let ident = vec![CacheIdent::DropFlow(DropFlow {
flow_id: 2001,
source_table_ids: vec![1024, 1025],
flow_part2node_id: (1..=5).map(|i| (i as u32, i + 1)).collect(),
flownode_ids: vec![1, 2, 3, 4, 5],
})];
cache.invalidate(&ident).await.unwrap();
let set = cache.get(1024).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(11..=12)
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
)
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
);
let set = cache.get(1025).await.unwrap().unwrap();
assert_eq!(
set.as_ref().clone(),
HashMap::from_iter(
(11..=12)
.map(|i| (FlowIdent::new(2002, i as u32), Peer::empty(i + 1)))
.chain((1..=5).map(|i| (FlowIdent::new(2003, i as u32), Peer::empty(i + 1))))
)
HashMap::from_iter((11..=12).map(|i| { (i, Peer::empty(i),) }))
);
}
}

View File

@@ -16,12 +16,9 @@ use std::sync::Arc;
use crate::error::Result;
use crate::flow_name::FlowName;
use crate::instruction::{CacheIdent, DropFlow};
use crate::instruction::CacheIdent;
use crate::key::flow::flow_info::FlowInfoKey;
use crate::key::flow::flow_name::FlowNameKey;
use crate::key::flow::flow_route::FlowRouteKey;
use crate::key::flow::flownode_flow::FlownodeFlowKey;
use crate::key::flow::table_flow::TableFlowKey;
use crate::key::schema_name::SchemaNameKey;
use crate::key::table_info::TableInfoKey;
use crate::key::table_name::TableNameKey;
@@ -92,40 +89,9 @@ where
let key: SchemaNameKey = schema_name.into();
self.invalidate_key(&key.to_bytes()).await;
}
CacheIdent::CreateFlow(_) => {
CacheIdent::CreateFlow(_) | CacheIdent::DropFlow(_) => {
// Do nothing
}
CacheIdent::DropFlow(DropFlow {
flow_id,
source_table_ids,
flow_part2node_id,
}) => {
// invalidate flow route/flownode flow/table flow
let mut keys = Vec::with_capacity(
source_table_ids.len() * flow_part2node_id.len()
+ flow_part2node_id.len() * 2,
);
for table_id in source_table_ids {
for (partition_id, node_id) in flow_part2node_id {
let key =
TableFlowKey::new(*table_id, *node_id, *flow_id, *partition_id)
.to_bytes();
keys.push(key);
}
}
for (partition_id, node_id) in flow_part2node_id {
let key =
FlownodeFlowKey::new(*node_id, *flow_id, *partition_id).to_bytes();
keys.push(key);
let key = FlowRouteKey::new(*flow_id, *partition_id).to_bytes();
keys.push(key);
}
for key in keys {
self.invalidate_key(&key).await;
}
}
CacheIdent::FlowName(FlowName {
catalog_name,
flow_name,

View File

@@ -15,7 +15,7 @@
use std::hash::{DefaultHasher, Hash, Hasher};
use std::str::FromStr;
use api::v1::meta::{DatanodeWorkloads, HeartbeatRequest};
use api::v1::meta::HeartbeatRequest;
use common_error::ext::ErrorExt;
use lazy_static::lazy_static;
use regex::Regex;
@@ -161,8 +161,6 @@ pub struct DatanodeStatus {
pub leader_regions: usize,
/// How many follower regions on this node.
pub follower_regions: usize,
/// The workloads of the datanode.
pub workloads: DatanodeWorkloads,
}
/// The status of a frontend.
@@ -283,8 +281,6 @@ impl TryFrom<i32> for Role {
mod tests {
use std::assert_matches::assert_matches;
use common_workload::DatanodeWorkloadType;
use super::*;
use crate::cluster::Role::{Datanode, Frontend};
use crate::cluster::{DatanodeStatus, NodeInfo, NodeInfoKey, NodeStatus};
@@ -317,9 +313,6 @@ mod tests {
wcus: 2,
leader_regions: 3,
follower_regions: 4,
workloads: DatanodeWorkloads {
types: vec![DatanodeWorkloadType::Hybrid.to_i32()],
},
}),
version: "".to_string(),
git_commit: "".to_string(),
@@ -339,7 +332,6 @@ mod tests {
wcus: 2,
leader_regions: 3,
follower_regions: 4,
..
}),
start_time_ms: 1,
..

View File

@@ -15,7 +15,7 @@
use std::collections::HashSet;
use std::str::FromStr;
use api::v1::meta::{DatanodeWorkloads, HeartbeatRequest, RequestHeader};
use api::v1::meta::{HeartbeatRequest, RequestHeader};
use common_time::util as time_util;
use lazy_static::lazy_static;
use regex::Regex;
@@ -27,7 +27,6 @@ use table::metadata::TableId;
use crate::error;
use crate::error::Result;
use crate::heartbeat::utils::get_datanode_workloads;
pub(crate) const DATANODE_LEASE_PREFIX: &str = "__meta_datanode_lease";
const INACTIVE_REGION_PREFIX: &str = "__meta_inactive_region";
@@ -66,8 +65,6 @@ pub struct Stat {
pub region_stats: Vec<RegionStat>,
// The node epoch is used to check whether the node has restarted or redeployed.
pub node_epoch: u64,
/// The datanode workloads.
pub datanode_workloads: DatanodeWorkloads,
}
/// The statistics of a region.
@@ -200,7 +197,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
peer,
region_stats,
node_epoch,
node_workloads,
..
} = value;
@@ -211,7 +207,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
.map(RegionStat::from)
.collect::<Vec<_>>();
let datanode_workloads = get_datanode_workloads(node_workloads.as_ref());
Ok(Self {
timestamp_millis: time_util::current_time_millis(),
// datanode id
@@ -223,7 +218,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
region_num: region_stats.len() as u64,
region_stats,
node_epoch: *node_epoch,
datanode_workloads,
})
}
(header, _) => Err(header.clone()),

View File

@@ -21,7 +21,7 @@ use snafu::{ensure, ResultExt};
use strum::AsRefStr;
use crate::cache_invalidator::Context;
use crate::ddl::utils::map_to_procedure_error;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::DdlContext;
use crate::error::{Result, SchemaNotFoundSnafu};
use crate::instruction::CacheIdent;
@@ -148,7 +148,7 @@ impl Procedure for AlterDatabaseProcedure {
AlterDatabaseState::UpdateMetadata => self.on_update_metadata().await,
AlterDatabaseState::InvalidateSchemaCache => self.on_invalidate_schema_cache().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -32,12 +32,9 @@ use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
use strum::AsRefStr;
use table::metadata::TableId;
use crate::ddl::utils::{
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
};
use crate::ddl::utils::{add_peer_context_if_needed, sync_follower_regions};
use crate::ddl::DdlContext;
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
use crate::instruction::CacheIdent;
use crate::error::{DecodeJsonSnafu, Error, MetadataCorruptionSnafu, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::DeserializedValueWithBytes;
@@ -69,7 +66,6 @@ impl AlterLogicalTablesProcedure {
physical_table_info: None,
physical_table_route: None,
physical_columns: vec![],
table_cache_keys_to_invalidate: vec![],
},
}
}
@@ -199,19 +195,16 @@ impl AlterLogicalTablesProcedure {
self.update_physical_table_metadata().await?;
self.update_logical_tables_metadata().await?;
self.data.build_cache_keys_to_invalidate();
self.data.clear_metadata_fields();
self.data.state = AlterTablesState::InvalidateTableCache;
Ok(Status::executing(true))
}
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
let to_invalidate = &self.data.table_cache_keys_to_invalidate;
let to_invalidate = self.build_table_cache_keys_to_invalidate();
self.context
.cache_invalidator
.invalidate(&Default::default(), to_invalidate)
.invalidate(&Default::default(), &to_invalidate)
.await?;
Ok(Status::done())
}
@@ -224,6 +217,14 @@ impl Procedure for AlterLogicalTablesProcedure {
}
async fn execute(&mut self, _ctx: &Context) -> ProcedureResult<Status> {
let error_handler = |e: Error| {
if e.is_retry_later() {
common_procedure::Error::retry_later(e)
} else {
common_procedure::Error::external(e)
}
};
let state = &self.data.state;
let step = state.as_ref();
@@ -240,7 +241,7 @@ impl Procedure for AlterLogicalTablesProcedure {
AlterTablesState::UpdateMetadata => self.on_update_metadata().await,
AlterTablesState::InvalidateTableCache => self.on_invalidate_table_cache().await,
}
.map_err(map_to_procedure_error)
.map_err(error_handler)
}
fn dump(&self) -> ProcedureResult<String> {
@@ -279,20 +280,6 @@ pub struct AlterTablesData {
physical_table_info: Option<DeserializedValueWithBytes<TableInfoValue>>,
physical_table_route: Option<PhysicalTableRouteValue>,
physical_columns: Vec<ColumnMetadata>,
table_cache_keys_to_invalidate: Vec<CacheIdent>,
}
impl AlterTablesData {
/// Clears all data fields except `state` and `table_cache_keys_to_invalidate` after metadata update.
/// This is done to avoid persisting unnecessary data after the update metadata step.
fn clear_metadata_fields(&mut self) {
self.tasks.clear();
self.table_info_values.clear();
self.physical_table_id = 0;
self.physical_table_info = None;
self.physical_table_route = None;
self.physical_columns.clear();
}
}
#[derive(Debug, Serialize, Deserialize, AsRefStr)]

View File

@@ -15,12 +15,13 @@
use table::metadata::RawTableInfo;
use table::table_name::TableName;
use crate::ddl::alter_logical_tables::AlterTablesData;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::instruction::CacheIdent;
impl AlterTablesData {
pub(crate) fn build_cache_keys_to_invalidate(&mut self) {
impl AlterLogicalTablesProcedure {
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
let mut cache_keys = self
.data
.table_info_values
.iter()
.flat_map(|table| {
@@ -30,14 +31,14 @@ impl AlterTablesData {
]
})
.collect::<Vec<_>>();
cache_keys.push(CacheIdent::TableId(self.physical_table_id));
cache_keys.push(CacheIdent::TableId(self.data.physical_table_id));
// Safety: physical_table_info already filled in previous steps
let physical_table_info = &self.physical_table_info.as_ref().unwrap().table_info;
let physical_table_info = &self.data.physical_table_info.as_ref().unwrap().table_info;
cache_keys.push(CacheIdent::TableName(extract_table_name(
physical_table_info,
)));
self.table_cache_keys_to_invalidate = cache_keys;
cache_keys
}
}

View File

@@ -40,11 +40,10 @@ use table::table_reference::TableReference;
use crate::cache_invalidator::Context;
use crate::ddl::utils::{
add_peer_context_if_needed, handle_multiple_results, map_to_procedure_error,
sync_follower_regions, MultipleResults,
add_peer_context_if_needed, handle_multiple_results, sync_follower_regions, MultipleResults,
};
use crate::ddl::DdlContext;
use crate::error::{AbortProcedureSnafu, NoLeaderSnafu, PutPoisonSnafu, Result, RetryLaterSnafu};
use crate::error::{AbortProcedureSnafu, Error, NoLeaderSnafu, PutPoisonSnafu, Result};
use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoValue;
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
@@ -196,10 +195,7 @@ impl AlterTableProcedure {
}
MultipleResults::AllRetryable(error) => {
// Just returns the error, and wait for the next try.
let err = BoxedError::new(error);
Err(err).context(RetryLaterSnafu {
clean_poisons: true,
})
Err(error)
}
MultipleResults::Ok(results) => {
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
@@ -327,6 +323,16 @@ impl Procedure for AlterTableProcedure {
}
async fn execute(&mut self, ctx: &ProcedureContext) -> ProcedureResult<Status> {
let error_handler = |e: Error| {
if e.is_retry_later() {
ProcedureError::retry_later(e)
} else if e.need_clean_poisons() {
ProcedureError::external_and_clean_poisons(e)
} else {
ProcedureError::external(e)
}
};
let state = &self.data.state;
let step = state.as_ref();
@@ -344,7 +350,7 @@ impl Procedure for AlterTableProcedure {
AlterTableState::UpdateMetadata => self.on_update_metadata().await,
AlterTableState::InvalidateTableCache => self.on_broadcast().await,
}
.map_err(map_to_procedure_error)
.map_err(error_handler)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -22,7 +22,7 @@ use serde_with::{serde_as, DefaultOnNull};
use snafu::{ensure, ResultExt};
use strum::AsRefStr;
use crate::ddl::utils::map_to_procedure_error;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::key::schema_name::{SchemaNameKey, SchemaNameValue};
@@ -115,7 +115,7 @@ impl Procedure for CreateDatabaseProcedure {
CreateDatabaseState::Prepare => self.on_prepare().await,
CreateDatabaseState::CreateMetadata => self.on_create_metadata().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -36,10 +36,10 @@ use strum::AsRefStr;
use table::metadata::TableId;
use crate::cache_invalidator::Context;
use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error};
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
use crate::ddl::DdlContext;
use crate::error::{self, Result, UnexpectedSnafu};
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::instruction::{CacheIdent, CreateFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
use crate::key::table_name::TableNameKey;
@@ -70,7 +70,6 @@ impl CreateFlowProcedure {
query_context,
state: CreateFlowState::Prepare,
prev_flow_info_value: None,
did_replace: false,
flow_type: None,
},
}
@@ -225,7 +224,6 @@ impl CreateFlowProcedure {
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
.await?;
info!("Replaced flow metadata for flow {flow_id}");
self.data.did_replace = true;
} else {
self.context
.flow_metadata_manager
@@ -242,43 +240,22 @@ impl CreateFlowProcedure {
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
// Safety: The flow id must be allocated.
let flow_id = self.data.flow_id.unwrap();
let did_replace = self.data.did_replace;
let ctx = Context {
subject: Some("Invalidate flow cache by creating flow".to_string()),
};
let mut caches = vec![];
// if did replaced, invalidate the flow cache with drop the old flow
if did_replace {
let old_flow_info = self.data.prev_flow_info_value.as_ref().unwrap();
// only drop flow is needed, since flow name haven't changed, and flow id already invalidated below
caches.extend([CacheIdent::DropFlow(DropFlow {
flow_id,
source_table_ids: old_flow_info.source_table_ids.clone(),
flow_part2node_id: old_flow_info.flownode_ids().clone().into_iter().collect(),
})]);
}
let (_flow_info, flow_routes) = (&self.data).into();
let flow_part2peers = flow_routes
.into_iter()
.map(|(part_id, route)| (part_id, route.peer))
.collect();
caches.extend([
CacheIdent::CreateFlow(CreateFlow {
flow_id,
source_table_ids: self.data.source_table_ids.clone(),
partition_to_peer_mapping: flow_part2peers,
}),
CacheIdent::FlowId(flow_id),
]);
self.context
.cache_invalidator
.invalidate(&ctx, &caches)
.invalidate(
&ctx,
&[
CacheIdent::CreateFlow(CreateFlow {
source_table_ids: self.data.source_table_ids.clone(),
flownodes: self.data.peers.clone(),
}),
CacheIdent::FlowId(flow_id),
],
)
.await?;
Ok(Status::done_with_output(flow_id))
@@ -304,7 +281,7 @@ impl Procedure for CreateFlowProcedure {
CreateFlowState::CreateMetadata => self.on_create_metadata().await,
CreateFlowState::InvalidateFlowCache => self.on_broadcast().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {
@@ -400,10 +377,6 @@ pub struct CreateFlowData {
/// For verify if prev value is consistent when need to update flow metadata.
/// only set when `or_replace` is true.
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
/// Only set to true when replace actually happened.
/// This is used to determine whether to invalidate the cache.
#[serde(default)]
pub(crate) did_replace: bool,
pub(crate) flow_type: Option<FlowType>,
}

View File

@@ -33,9 +33,7 @@ use store_api::storage::{RegionId, RegionNumber};
use strum::AsRefStr;
use table::metadata::{RawTableInfo, TableId};
use crate::ddl::utils::{
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
};
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error, sync_follower_regions};
use crate::ddl::DdlContext;
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
use crate::key::table_route::TableRouteValue;
@@ -240,7 +238,7 @@ impl Procedure for CreateLogicalTablesProcedure {
CreateTablesState::DatanodeCreateRegions => self.on_datanode_create_regions().await,
CreateTablesState::CreateMetadata => self.on_create_metadata().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -34,7 +34,7 @@ use table::table_reference::TableReference;
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
use crate::ddl::utils::{
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, map_to_procedure_error,
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error,
region_storage_path,
};
use crate::ddl::{DdlContext, TableMetadata};
@@ -319,7 +319,7 @@ impl Procedure for CreateTableProcedure {
CreateTableState::DatanodeCreateRegions => self.on_datanode_create_regions().await,
CreateTableState::CreateMetadata => self.on_create_metadata().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -23,7 +23,7 @@ use table::metadata::{RawTableInfo, TableId, TableType};
use table::table_reference::TableReference;
use crate::cache_invalidator::Context;
use crate::ddl::utils::map_to_procedure_error;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::{DdlContext, TableMetadata};
use crate::error::{self, Result};
use crate::instruction::CacheIdent;
@@ -249,7 +249,7 @@ impl Procedure for CreateViewProcedure {
CreateViewState::Prepare => self.on_prepare().await,
CreateViewState::CreateMetadata => self.on_create_metadata(ctx).await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -21,7 +21,7 @@ use std::any::Any;
use std::fmt::Debug;
use common_error::ext::BoxedError;
use common_procedure::error::{ExternalSnafu, FromJsonSnafu, ToJsonSnafu};
use common_procedure::error::{Error as ProcedureError, ExternalSnafu, FromJsonSnafu, ToJsonSnafu};
use common_procedure::{
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
};
@@ -31,7 +31,6 @@ use snafu::ResultExt;
use tonic::async_trait;
use self::start::DropDatabaseStart;
use crate::ddl::utils::map_to_procedure_error;
use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::table_name::TableNameValue;
@@ -142,7 +141,13 @@ impl Procedure for DropDatabaseProcedure {
let (next, status) = state
.next(&self.runtime_context, &mut self.context)
.await
.map_err(map_to_procedure_error)?;
.map_err(|e| {
if e.is_retry_later() {
ProcedureError::retry_later(e)
} else {
ProcedureError::external(e)
}
})?;
*state = next;
Ok(status)

View File

@@ -323,7 +323,6 @@ mod tests {
}
.build(),
),
clean_poisons: false,
})
}

View File

@@ -13,7 +13,6 @@
// limitations under the License.
mod metadata;
use api::v1::flow::{flow_request, DropRequest, FlowRequest};
use async_trait::async_trait;
use common_catalog::format_full_flow_name;
@@ -30,7 +29,7 @@ use snafu::{ensure, ResultExt};
use strum::AsRefStr;
use crate::cache_invalidator::Context;
use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error};
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error};
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::flow_name::FlowName;
@@ -154,12 +153,6 @@ impl DropFlowProcedure {
};
let flow_info_value = self.data.flow_info_value.as_ref().unwrap();
let flow_part2nodes = flow_info_value
.flownode_ids()
.clone()
.into_iter()
.collect::<Vec<_>>();
self.context
.cache_invalidator
.invalidate(
@@ -171,9 +164,8 @@ impl DropFlowProcedure {
flow_name: flow_info_value.flow_name.to_string(),
}),
CacheIdent::DropFlow(DropFlow {
flow_id,
source_table_ids: flow_info_value.source_table_ids.clone(),
flow_part2node_id: flow_part2nodes,
flownode_ids: flow_info_value.flownode_ids.values().cloned().collect(),
}),
],
)
@@ -201,7 +193,7 @@ impl Procedure for DropFlowProcedure {
DropFlowState::InvalidateFlowCache => self.on_broadcast().await,
DropFlowState::DropFlows => self.on_flownode_drop_flows().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

View File

@@ -35,7 +35,7 @@ use table::metadata::TableId;
use table::table_reference::TableReference;
use self::executor::DropTableExecutor;
use crate::ddl::utils::map_to_procedure_error;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::key::table_route::TableRouteValue;
@@ -221,7 +221,7 @@ impl Procedure for DropTableProcedure {
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions().await,
DropTableState::DeleteTombstone => self.on_delete_metadata_tombstone().await,
}
.map_err(map_to_procedure_error)
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {

Some files were not shown because too many files have changed in this diff Show More