Compare commits

..

3 Commits

Author SHA1 Message Date
Ruihang Xia
1bfba48755 Revert "build(deps): upgrade opendal to 0.46 (#4037)"
This reverts commit f9db5ff0d6.
2024-06-03 20:28:59 +08:00
Ruihang Xia
457998f0fe Merge branch 'main' into avoid-query-meta
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-31 18:16:36 +08:00
Ruihang Xia
b02c256157 perf: use memory state to check if a logical region exists
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-05-31 18:16:08 +08:00
1335 changed files with 33847 additions and 115631 deletions

View File

@@ -1,15 +0,0 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

View File

@@ -18,7 +18,6 @@ GT_AZBLOB_ENDPOINT=AZBLOB endpoint
GT_GCS_BUCKET = GCS bucket GT_GCS_BUCKET = GCS bucket
GT_GCS_SCOPE = GCS scope GT_GCS_SCOPE = GCS scope
GT_GCS_CREDENTIAL_PATH = GCS credential path GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_CREDENTIAL = GCS credential
GT_GCS_ENDPOINT = GCS end point GT_GCS_ENDPOINT = GCS end point
# Settings for kafka wal test # Settings for kafka wal test
GT_KAFKA_ENDPOINTS = localhost:9092 GT_KAFKA_ENDPOINTS = localhost:9092
@@ -29,8 +28,3 @@ GT_MYSQL_ADDR = localhost:4002
# Setting for unstable fuzz tests # Setting for unstable fuzz tests
GT_FUZZ_BINARY_PATH=/path/to/ GT_FUZZ_BINARY_PATH=/path/to/
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
GT_FUZZ_INPUT_MAX_ROWS=2048
GT_FUZZ_INPUT_MAX_TABLES=32
GT_FUZZ_INPUT_MAX_COLUMNS=32
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8

View File

@@ -50,7 +50,7 @@ runs:
BUILDX_MULTI_PLATFORM_BUILD=all \ BUILDX_MULTI_PLATFORM_BUILD=all \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-centos image - name: Build and push dev-builder-centos image
shell: bash shell: bash
@@ -61,7 +61,7 @@ runs:
BUILDX_MULTI_PLATFORM_BUILD=amd64 \ BUILDX_MULTI_PLATFORM_BUILD=amd64 \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-android image # Only build image for amd64 platform. - name: Build and push dev-builder-android image # Only build image for amd64 platform.
shell: bash shell: bash
@@ -71,6 +71,6 @@ runs:
BASE_IMAGE=android \ BASE_IMAGE=android \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \ IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }} docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -24,14 +24,6 @@ inputs:
description: Build android artifacts description: Build android artifacts
required: false required: false
default: 'false' default: 'false'
image-namespace:
description: Image Namespace
required: false
default: 'greptime'
image-registry:
description: Image Registry
required: false
default: 'docker.io'
runs: runs:
using: composite using: composite
steps: steps:
@@ -43,9 +35,7 @@ runs:
make build-by-dev-builder \ make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \ CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \ FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \ BASE_IMAGE=${{ inputs.base-image }}
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts - name: Upload artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
@@ -63,9 +53,7 @@ runs:
shell: bash shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }} if: ${{ inputs.build-android-artifacts == 'true' }}
run: | run: |
cd ${{ inputs.working-dir }} && make strip-android-bin \ cd ${{ inputs.working-dir }} && make strip-android-bin
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload android artifacts - name: Upload android artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts

View File

@@ -17,12 +17,6 @@ inputs:
description: Enable dev mode, only build standard greptime description: Enable dev mode, only build standard greptime
required: false required: false
default: "false" default: "false"
image-namespace:
description: Image Namespace
required: true
image-registry:
description: Image Registry
required: true
working-dir: working-dir:
description: Working directory to build the artifacts description: Working directory to build the artifacts
required: false required: false
@@ -36,9 +30,7 @@ runs:
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance. # NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: | run: |
cd ${{ inputs.working-dir }} && \ cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4 \ make run-it-in-container BUILD_JOBS=4
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed. if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
@@ -57,8 +49,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3 - name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }} if: ${{ inputs.dev-mode == 'false' }}
@@ -70,8 +60,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build. - name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash shell: bash
@@ -88,8 +76,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime on android base image - name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
@@ -100,5 +86,3 @@ runs:
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true build-android-artifacts: true
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}

View File

@@ -4,6 +4,9 @@ inputs:
arch: arch:
description: Architecture to build description: Architecture to build
required: true required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile: cargo-profile:
description: Cargo profile to build description: Cargo profile to build
required: true required: true
@@ -40,9 +43,10 @@ runs:
brew install protobuf brew install protobuf
- name: Install rust toolchain - name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
target: ${{ inputs.arch }} toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
- name: Start etcd # For integration tests. - name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
@@ -55,16 +59,9 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests - name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
SQLNESS_OPTS: "--preserve-state"
run: | run: |
make test sqlness-test make test sqlness-test
@@ -78,8 +75,6 @@ runs:
- name: Build greptime binary - name: Build greptime binary
shell: bash shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: | run: |
make build \ make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \ CARGO_PROFILE=${{ inputs.cargo-profile }} \

View File

@@ -4,6 +4,9 @@ inputs:
arch: arch:
description: Architecture to build description: Architecture to build
required: true required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile: cargo-profile:
description: Cargo profile to build description: Cargo profile to build
required: true required: true
@@ -25,9 +28,10 @@ runs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
- name: Install rust toolchain - name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
target: ${{ inputs.arch }} toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
components: llvm-tools-preview components: llvm-tools-preview
- name: Rust Cache - name: Rust Cache
@@ -36,11 +40,11 @@ runs:
- name: Install Python - name: Install Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.10" python-version: '3.10'
- name: Install PyArrow Package - name: Install PyArrow Package
shell: pwsh shell: pwsh
run: pip install pyarrow numpy run: pip install pyarrow
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
@@ -58,14 +62,13 @@ runs:
env: env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493 RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed. if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: sqlness-logs name: sqlness-logs
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness* path: /tmp/greptime-*.log
retention-days: 3 retention-days: 3
- name: Build greptime binary - name: Build greptime binary

View File

@@ -123,10 +123,10 @@ runs:
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }} DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: | run: |
./.github/scripts/copy-image.sh \ ./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \ ${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }} ${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push latest greptimedb-centos image from DockerHub to ACR - name: Push greptimedb-centos image from DockerHub to ACR
shell: bash shell: bash
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }} if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
env: env:

View File

@@ -1,17 +0,0 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
helm repo add chaos-mesh https://charts.chaos-mesh.org
kubectl create ns chaos-mesh
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
- name: Print Chaos-mesh
if: always()
shell: bash
run: |
kubectl get po -n chaos-mesh

View File

@@ -2,7 +2,7 @@ name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes description: Deploy Etcd cluster on Kubernetes
inputs: inputs:
etcd-replicas: etcd-replicas:
default: 1 default: 3
description: "Etcd replicas" description: "Etcd replicas"
namespace: namespace:
default: "etcd-cluster" default: "etcd-cluster"
@@ -18,8 +18,6 @@ runs:
--set replicaCount=${{ inputs.etcd-replicas }} \ --set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \ --set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \ --set resources.requests.memory=128Mi \
--set resources.limits.cpu=1500m \
--set resources.limits.memory=2Gi \
--set auth.rbac.create=false \ --set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \ --set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \ --set persistence.size=2Gi \

View File

@@ -22,21 +22,13 @@ inputs:
etcd-endpoints: etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379" default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints" description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"
enable-region-failover:
default: false
runs: runs:
using: composite using: composite
steps: steps:
- name: Install GreptimeDB operator - name: Install GreptimeDB operator
uses: nick-fields/retry@v3
with:
timeout_minutes: 3
max_attempts: 3
shell: bash shell: bash
command: | run: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/ helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update helm repo update
helm upgrade \ helm upgrade \
@@ -52,7 +44,6 @@ runs:
helm upgrade \ helm upgrade \
--install my-greptimedb \ --install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \ --set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \ --set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \ --set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \ --set image.tag=${{ inputs.image-tag }} \
@@ -66,7 +57,6 @@ runs:
greptime/greptimedb-cluster \ greptime/greptimedb-cluster \
--create-namespace \ --create-namespace \
-n my-greptimedb \ -n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \ --wait \
--wait-for-jobs --wait-for-jobs
- name: Wait for GreptimeDB - name: Wait for GreptimeDB

View File

@@ -1,13 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
configData: |-
[runtime]
global_rt_size = 4

View File

@@ -1,33 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,29 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,45 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123
remoteWal:
enabled: true
kafka:
brokerEndpoints:
- "kafka.kafka-cluster.svc.cluster.local:9092"

View File

@@ -1,24 +0,0 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"
runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,24 +0,0 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio

View File

@@ -1,30 +0,0 @@
name: Setup PostgreSQL
description: Deploy PostgreSQL on Kubernetes
inputs:
postgres-replicas:
default: 1
description: "Number of PostgreSQL replicas"
namespace:
default: "postgres-namespace"
postgres-version:
default: "14.2"
description: "PostgreSQL version"
storage-size:
default: "1Gi"
description: "Storage size for PostgreSQL"
runs:
using: composite
steps:
- name: Install PostgreSQL
shell: bash
run: |
helm upgrade \
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
--set replicaCount=${{ inputs.postgres-replicas }} \
--set image.tag=${{ inputs.postgres-version }} \
--set persistence.size=${{ inputs.storage-size }} \
--set postgresql.username=greptimedb \
--set postgresql.password=admin \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -38,7 +38,7 @@ runs:
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2') if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v4 uses: aws-actions/configure-aws-credentials@v2
with: with:
aws-access-key-id: ${{ inputs.aws-access-key-id }} aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }} aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -25,7 +25,7 @@ runs:
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }} if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v4 uses: aws-actions/configure-aws-credentials@v2
with: with:
aws-access-key-id: ${{ inputs.aws-access-key-id }} aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }} aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -12,6 +12,9 @@ on:
name: Build API docs name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2024-04-20
jobs: jobs:
apidoc: apidoc:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
@@ -20,7 +23,9 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items - run: cargo doc --workspace --no-deps --document-private-items
- run: | - run: |
cat <<EOF > target/doc/index.html cat <<EOF > target/doc/index.html

View File

@@ -177,8 +177,6 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary. dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -208,8 +206,6 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary. dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub

View File

@@ -29,6 +29,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-20
jobs: jobs:
check-typos-and-docs: check-typos-and-docs:
name: Check typos and docs name: Check typos and docs
@@ -61,7 +64,9 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
@@ -77,7 +82,9 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: stable
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
@@ -100,7 +107,9 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
# Shares across multiple jobs # Shares across multiple jobs
@@ -130,29 +139,17 @@ jobs:
name: Fuzz Test name: Fuzz Test
needs: build needs: build
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps: steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
@@ -163,16 +160,14 @@ jobs:
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin cargo +nightly install cargo-fuzz
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
name: bins name: bins
path: . path: .
- name: Unzip binaries - name: Unzip binaries
run: | run: tar -xvf ./bins.tar.gz
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB - name: Run GreptimeDB
run: | run: |
./bins/greptime standalone start& ./bins/greptime standalone start&
@@ -187,30 +182,19 @@ jobs:
unstable-fuzztest: unstable-fuzztest:
name: Unstable Fuzz Test name: Unstable Fuzz Test
needs: build-greptime-ci needs: build
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60
strategy: strategy:
matrix: matrix:
target: [ "unstable_fuzz_create_table_standalone" ] target: [ "unstable_fuzz_create_table_standalone" ]
steps: steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
@@ -220,22 +204,27 @@ jobs:
shell: bash shell: bash
run: | run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin cargo install cargo-fuzz
- name: Download pre-built binariy - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
name: bin name: bins
path: . path: .
- name: Unzip bianry - name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Build Fuzz Test
shell: bash
run: | run: |
tar -xvf ./bin.tar.gz cd tests-fuzz &
rm ./bin.tar.gz cargo install cargo-gc-bin &
cargo gc &
cd ..
- name: Run Fuzz Test - name: Run Fuzz Test
uses: ./.github/actions/fuzz-test uses: ./.github/actions/fuzz-test
env: env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002 GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bin/greptime GT_FUZZ_BINARY_PATH: ./bins/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/ GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with: with:
target: ${{ matrix.target }} target: ${{ matrix.target }}
@@ -261,7 +250,9 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
# Shares across multiple jobs # Shares across multiple jobs
@@ -269,17 +260,10 @@ jobs:
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin run: cargo install cargo-gc-bin
- name: Check aws-lc-sys will not build
shell: bash
run: |
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
exit 1
fi
- name: Build greptime bianry - name: Build greptime bianry
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime run: cargo build --bin greptime --profile ci
- name: Pack greptime binary - name: Pack greptime binary
shell: bash shell: bash
run: | run: |
@@ -294,49 +278,25 @@ jobs:
version: current version: current
distributed-fuzztest: distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }}) name: Fuzz Test (Distributed, Disk)
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build-greptime-ci needs: build-greptime-ci
timeout-minutes: 60
strategy: strategy:
matrix: matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps: steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser - name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests # Prepares for fuzz tests
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
@@ -347,7 +307,7 @@ jobs:
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin cargo +nightly install cargo-fuzz
# Downloads ci image # Downloads ci image
- name: Download pre-built binariy - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@@ -355,9 +315,7 @@ jobs:
name: bin name: bin
path: . path: .
- name: Unzip binary - name: Unzip binary
run: | run: tar -xvf ./bin.tar.gz
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image - name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd - name: Wait for etcd
@@ -367,22 +325,6 @@ jobs:
pod -l app.kubernetes.io/instance=etcd \ pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \ --timeout=120s \
-n etcd-cluster -n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info - name: Print etcd info
shell: bash shell: bash
run: kubectl get all --show-labels -n etcd-cluster run: kubectl get all --show-labels -n etcd-cluster
@@ -391,7 +333,6 @@ jobs:
uses: ./.github/actions/setup-greptimedb-cluster uses: ./.github/actions/setup-greptimedb-cluster
with: with:
image-registry: localhost:5001 image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql) - name: Port forward (mysql)
run: | run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb& kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
@@ -417,191 +358,21 @@ jobs:
if: failure() if: failure()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }} name: fuzz-tests-kind-logs-${{ matrix.target }}
path: /tmp/kind path: /tmp/kind
retention-days: 3 retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
timeout-minutes: 60
strategy:
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
include:
- target: "fuzz_migrate_mito_regions"
mode:
name: "Local WAL"
minio: true
kafka: false
values: "with-minio.yaml"
- target: "fuzz_migrate_metric_regions"
mode:
name: "Local WAL"
minio: true
kafka: false
values: "with-minio.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh
uses: ./.github/actions/setup-chaos
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
enable-region-failover: ${{ matrix.mode.kafka }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness: sqlness:
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test
needs: build needs: build
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-20.04 ] os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@@ -610,12 +381,42 @@ jobs:
- name: Unzip binaries - name: Unzip binaries
run: tar -xvf ./bins.tar.gz run: tar -xvf ./bins.tar.gz
- name: Run sqlness - name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs - name: Upload sqlness logs
if: failure() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: sqlness-logs-${{ matrix.mode.name }} name: sqlness-logs
path: /tmp/sqlness*
retention-days: 3
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-with-kafka-wal
path: /tmp/sqlness* path: /tmp/sqlness*
retention-days: 3 retention-days: 3
@@ -628,16 +429,17 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt components: rustfmt
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "check-rust-fmt" shared-key: "check-rust-fmt"
- name: Check format - name: Run cargo fmt
run: make fmt-check run: cargo fmt --all -- --check
clippy: clippy:
name: Clippy name: Clippy
@@ -648,8 +450,9 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: clippy components: clippy
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
@@ -673,8 +476,9 @@ jobs:
with: with:
version: "14.0" version: "14.0"
- name: Install toolchain - name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview components: llvm-tools-preview
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
@@ -694,19 +498,13 @@ jobs:
with: with:
python-version: '3.10' python-version: '3.10'
- name: Install PyArrow Package - name: Install PyArrow Package
run: pip install pyarrow numpy run: pip install pyarrow
- name: Setup etcd server - name: Setup etcd server
working-directory: tests-integration/fixtures/etcd working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server - name: Setup kafka server
working-directory: tests-integration/fixtures/kafka working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases - name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env: env:
@@ -717,15 +515,8 @@ jobs:
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092 GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload - name: Codecov upload
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v4

View File

@@ -67,13 +67,19 @@ jobs:
- run: 'echo "No action required"' - run: 'echo "No action required"'
sqlness: sqlness:
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
steps:
- run: 'echo "No action required"'
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-20.04 ] os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
- name: "Remote WAL"
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'

View File

@@ -154,8 +154,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -175,8 +173,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
@@ -203,7 +199,7 @@ jobs:
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: true push-latest-tag: false # Don't push the latest tag to registry.
- name: Set nightly build result - name: Set nightly build result
id: set-nightly-build-result id: set-nightly-build-result
@@ -244,7 +240,7 @@ jobs:
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false dev-mode: false
update-version-info: false # Don't update version info in S3. update-version-info: false # Don't update version info in S3.
push-latest-tag: true push-latest-tag: false # Don't push the latest tag to registry.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner

View File

@@ -9,6 +9,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2024-04-20
permissions: permissions:
issues: write issues: write
@@ -30,13 +33,6 @@ jobs:
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
- name: Upload sqlness logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-kind
path: /tmp/kind/
retention-days: 3
sqlness-windows: sqlness-windows:
name: Sqlness tests on Windows name: Sqlness tests on Windows
@@ -49,19 +45,19 @@ jobs:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Run sqlness - name: Run sqlness
run: make sqlness-test run: cargo sqlness
env:
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs - name: Upload sqlness logs
if: failure() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: sqlness-logs name: sqlness-logs
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness* path: /tmp/greptime-*.log
retention-days: 3 retention-days: 3
test-on-windows: test-on-windows:
@@ -80,8 +76,9 @@ jobs:
with: with:
version: "14.0" version: "14.0"
- name: Install Rust toolchain - name: Install Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview components: llvm-tools-preview
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
@@ -92,7 +89,7 @@ jobs:
with: with:
python-version: "3.10" python-version: "3.10"
- name: Install PyArrow Package - name: Install PyArrow Package
run: pip install pyarrow numpy run: pip install pyarrow
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
with: with:
@@ -112,7 +109,11 @@ jobs:
check-status: check-status:
name: Check status name: Check status
needs: [sqlness-test, sqlness-windows, test-on-windows] needs: [
sqlness-test,
sqlness-windows,
test-on-windows,
]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
outputs: outputs:
@@ -126,7 +127,9 @@ jobs:
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [check-status] needs: [
check-status
]
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}

View File

@@ -1,14 +1,12 @@
name: Release dev-builder images name: Release dev-builder images
on: on:
push:
branches:
- main
paths:
- rust-toolchain.toml
- 'docker/dev-builder/**'
workflow_dispatch: # Allows you to run this workflow manually. workflow_dispatch: # Allows you to run this workflow manually.
inputs: inputs:
version:
description: Version of the dev-builder
required: false
default: latest
release_dev_builder_ubuntu_image: release_dev_builder_ubuntu_image:
type: boolean type: boolean
description: Release dev-builder-ubuntu image description: Release dev-builder-ubuntu image
@@ -30,103 +28,22 @@ jobs:
name: Release dev builder images name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job. if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-20.04-16-cores runs-on: ubuntu-20.04-16-cores
outputs:
version: ${{ steps.set-version.outputs.version }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Configure build image version
id: set-version
shell: bash
run: |
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
buildTime=`date +%Y%m%d%H%M%S`
BUILD_VERSION="$commitShortSHA-$buildTime"
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
- name: Build and push dev builder images - name: Build and push dev builder images
uses: ./.github/actions/build-dev-builder-images uses: ./.github/actions/build-dev-builder-images
with: with:
version: ${{ env.VERSION }} version: ${{ inputs.version }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }} dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }} build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }} build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }} build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.ECR_REGION }}
- name: Login to Amazon ECR
id: login-ecr-public
uses: aws-actions/amazon-ecr-login@v2
env:
AWS_REGION: ${{ vars.ECR_REGION }}
with:
registry-type: public
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container. release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region name: Release dev builder images to CN region
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
@@ -134,39 +51,35 @@ jobs:
release-dev-builder-images release-dev-builder-images
] ]
steps: steps:
- name: Login to AliCloud Container Registry
uses: docker/login-action@v3
with:
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ --dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
quay.io/skopeo/stable:latest \ docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ --dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
quay.io/skopeo/stable:latest \ docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image }} if: ${{ inputs.release_dev_builder_android_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ --dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
quay.io/skopeo/stable:latest \ docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -33,7 +33,6 @@ on:
description: The runner uses to build linux-arm64 artifacts description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64 default: ec2-c6g.4xlarge-arm64
options: options:
- ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G - ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G - ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G - ec2-c6g.4xlarge-arm64 # 16C32G
@@ -83,6 +82,7 @@ on:
# Use env variables to control all the release process. # Use env variables to control all the release process.
env: env:
# The arguments of building greptime. # The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-20
CARGO_PROFILE: nightly CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release. # Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.10.0 NEXT_RELEASE_VERSION: v0.9.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs # Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions: permissions:
@@ -123,11 +123,6 @@ jobs:
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check Rust toolchain version
shell: bash
run: |
./scripts/check-builder-rust-version.sh
# The create-version will create a global variable named 'version' in the global workflows. # The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }}); # - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313; # - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
@@ -188,8 +183,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -209,8 +202,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-macos-artifacts: build-macos-artifacts:
name: Build macOS artifacts name: Build macOS artifacts
@@ -249,11 +240,11 @@ jobs:
- uses: ./.github/actions/build-macos-artifacts - uses: ./.github/actions/build-macos-artifacts
with: with:
arch: ${{ matrix.arch }} arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }} features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming. disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
disable-run-tests: true
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }} artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build macos result - name: Set build macos result
@@ -292,6 +283,7 @@ jobs:
- uses: ./.github/actions/build-windows-artifacts - uses: ./.github/actions/build-windows-artifacts
with: with:
arch: ${{ matrix.arch }} arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }} features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}

View File

@@ -16,7 +16,6 @@ repos:
hooks: hooks:
- id: fmt - id: fmt
- id: clippy - id: clippy
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"] args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
stages: [push] stages: [push]
- id: cargo-check - id: cargo-check
args: ["--workspace", "--all-targets", "--all-features"]

View File

@@ -1,43 +0,0 @@
# GreptimeDB Authors
## Individual Committers (in alphabetical order)
* [CookiePieWw](https://github.com/CookiePieWw)
* [KKould](https://github.com/KKould)
* [NiwakaDev](https://github.com/NiwakaDev)
* [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj)
## Team Members (in alphabetical order)
* [Breeze-P](https://github.com/Breeze-P)
* [GrepTime](https://github.com/GrepTime)
* [MichaelScofield](https://github.com/MichaelScofield)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [ZonaHex](https://github.com/ZonaHex)
* [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678)
* [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996)
* [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008)
* [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87)
* [tisonkun](https://github.com/tisonkun)
* [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia)
* [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc)
* [zyy17](https://github.com/zyy17)
## All Contributors
[![All Contributors](https://contrib.rocks/image?repo=GreptimeTeam/greptimedb)](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)

View File

@@ -2,11 +2,7 @@
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here. Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer. Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs). Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
@@ -14,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity. It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md) - Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working. - Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
- Check the closed issues before opening your issue. - Check the closed issues before opening your issue.
- Try to follow the existing style of the code. - Try to follow the existing style of the code.
@@ -30,7 +26,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
## Code of Conduct ## Code of Conduct
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member. Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
## License ## License
@@ -55,7 +51,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md). - Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`. - Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
#### `pre-commit` Hooks #### `pre-commit` Hooks

4967
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,6 @@
[workspace] [workspace]
members = [ members = [
"benchmarks",
"src/api", "src/api",
"src/auth", "src/auth",
"src/catalog", "src/catalog",
@@ -20,7 +21,6 @@ members = [
"src/common/mem-prof", "src/common/mem-prof",
"src/common/meta", "src/common/meta",
"src/common/plugins", "src/common/plugins",
"src/common/pprof",
"src/common/procedure", "src/common/procedure",
"src/common/procedure-test", "src/common/procedure-test",
"src/common/query", "src/common/query",
@@ -46,7 +46,6 @@ members = [
"src/object-store", "src/object-store",
"src/operator", "src/operator",
"src/partition", "src/partition",
"src/pipeline",
"src/plugins", "src/plugins",
"src/promql", "src/promql",
"src/puffin", "src/puffin",
@@ -65,7 +64,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.9.5" version = "0.8.1"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
@@ -78,7 +77,6 @@ clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny" rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094 # Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow" rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies] [workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can # We turn off default-features for some dependencies here so the workspaces which inherit them can
@@ -91,7 +89,7 @@ aquamarine = "0.3"
arrow = { version = "51.0.0", features = ["prettyprint"] } arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] } arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "51.0" arrow-flight = "51.0"
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] } arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
arrow-schema = { version = "51.0", features = ["serde"] } arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
@@ -100,34 +98,34 @@ base64 = "0.21"
bigdecimal = "0.4.2" bigdecimal = "0.4.2"
bitflags = "2.4.1" bitflags = "2.4.1"
bytemuck = "1.12" bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] } bytes = { version = "1.5", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
config = "0.13.0" config = "0.13.0"
crossbeam-utils = "0.8" crossbeam-utils = "0.8"
dashmap = "5.4" dashmap = "5.4"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
derive_builder = "0.12" derive_builder = "0.12"
dotenv = "0.15" dotenv = "0.15"
etcd-client = { version = "0.13" } # TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
itertools = "0.10" itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4" lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" } meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
mockall = "0.11.4" mockall = "0.11.4"
moka = "0.12" moka = "0.12"
notify = "6.1" notify = "6.1"
@@ -137,59 +135,44 @@ opentelemetry-proto = { version = "0.5", features = [
"gen-tonic", "gen-tonic",
"metrics", "metrics",
"trace", "trace",
"with-serde",
"logs",
] } ] }
parking_lot = "0.12"
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] } parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4.3", features = ["ser"] } promql-parser = { version = "0.4" }
prost = "0.12" prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8" rand = "0.8"
ratelimit = "0.9"
regex = "1.8" regex = "1.8"
regex-automata = { version = "0.4" } regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.11", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
"stream", "stream",
"multipart", "multipart",
] } ] }
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [ rskafka = "0.5"
"transport-tls",
] }
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33" rust_decimal = "1.33"
rustc-hash = "2.0"
schemars = "0.8" schemars = "0.8"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3" serde_with = "3"
shadow-rs = "0.35"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] } smallvec = { version = "1", features = ["serde"] }
snafu = "0.8" snafu = "0.8"
sysinfo = "0.30" sysinfo = "0.30"
# on branch v0.44.x # on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
"visitor", "visitor",
] } ] }
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }
tempfile = "3" tempfile = "3"
tokio = { version = "1.40", features = ["full"] } tokio = { version = "1.36", features = ["full"] }
tokio-postgres = "0.7"
tokio-stream = { version = "0.1" } tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] } tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = { version = "0.4" } tower = { version = "0.4" }
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] } uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13" zstd = "0.13"
@@ -199,7 +182,7 @@ auth = { path = "src/auth" }
cache = { path = "src/cache" } cache = { path = "src/cache" }
catalog = { path = "src/catalog" } catalog = { path = "src/catalog" }
client = { path = "src/client" } client = { path = "src/client" }
cmd = { path = "src/cmd", default-features = false } cmd = { path = "src/cmd" }
common-base = { path = "src/common/base" } common-base = { path = "src/common/base" }
common-catalog = { path = "src/common/catalog" } common-catalog = { path = "src/common/catalog" }
common-config = { path = "src/common/config" } common-config = { path = "src/common/config" }
@@ -215,7 +198,6 @@ common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" } common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" } common-meta = { path = "src/common/meta" }
common-plugins = { path = "src/common/plugins" } common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" } common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" } common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" } common-query = { path = "src/common/query" }
@@ -230,7 +212,7 @@ datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" } datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" } file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" } flow = { path = "src/flow" }
frontend = { path = "src/frontend", default-features = false } frontend = { path = "src/frontend" }
index = { path = "src/index" } index = { path = "src/index" }
log-store = { path = "src/log-store" } log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" } meta-client = { path = "src/meta-client" }
@@ -240,7 +222,6 @@ mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" } object-store = { path = "src/object-store" }
operator = { path = "src/operator" } operator = { path = "src/operator" }
partition = { path = "src/partition" } partition = { path = "src/partition" }
pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" } plugins = { path = "src/plugins" }
promql = { path = "src/promql" } promql = { path = "src/promql" }
puffin = { path = "src/puffin" } puffin = { path = "src/puffin" }
@@ -253,37 +234,25 @@ store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" } substrait = { path = "src/common/substrait" }
table = { path = "src/table" } table = { path = "src/table" }
[patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
[workspace.dependencies.meter-macros] [workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git" git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "a10facb353b41460eeb98578868ebf19c2084fac" rev = "80b72716dcde47ec4161478416a5c6c21343364d"
[profile.release] [profile.release]
debug = 1 debug = 1
[profile.nightly] [profile.nightly]
inherits = "release" inherits = "release"
strip = "debuginfo" strip = true
lto = "thin" lto = "thin"
debug = false debug = false
incremental = false incremental = false
[profile.ci] [profile.ci]
inherits = "dev" inherits = "dev"
debug = false
strip = true strip = true
[profile.dev.package.sqlness-runner] [profile.dev.package.sqlness-runner]
debug = false debug = false
strip = true strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -8,7 +8,6 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
BUILDX_MULTI_PLATFORM_BUILD ?= false BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu BASE_IMAGE ?= ubuntu
@@ -16,7 +15,6 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi) OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
# The arguments for running integration tests. # The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9 ETCD_VERSION ?= v3.5.9
@@ -78,7 +76,7 @@ build: ## Build debug version greptime.
build-by-dev-builder: ## Build greptime by dev-builder. build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make build \ make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \ CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \ CARGO_PROFILE=${CARGO_PROFILE} \
@@ -92,7 +90,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
build-android-bin: ## Build greptime binary for android. build-android-bin: ## Build greptime binary for android.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
make build \ make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \ CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \ CARGO_PROFILE=release \
@@ -106,8 +104,8 @@ build-android-bin: ## Build greptime binary for android.
strip-android-bin: build-android-bin ## Strip greptime binary for android. strip-android-bin: build-android-bin ## Strip greptime binary for android.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb \ -v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime' bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: clean .PHONY: clean
clean: ## Clean the project. clean: ## Clean the project.
@@ -146,7 +144,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \ docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \ --build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \ -f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} . -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: multi-platform-buildx .PHONY: multi-platform-buildx
multi-platform-buildx: ## Create buildx multi-platform builder. multi-platform-buildx: ## Create buildx multi-platform builder.
@@ -163,18 +161,7 @@ nextest: ## Install nextest tools.
.PHONY: sqlness-test .PHONY: sqlness-test
sqlness-test: ## Run sqlness test. sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS} cargo sqlness
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check .PHONY: check
check: ## Cargo check all the targets. check: ## Cargo check all the targets.
@@ -191,7 +178,6 @@ fix-clippy: ## Fix clippy violations.
.PHONY: fmt-check .PHONY: fmt-check
fmt-check: ## Check code format. fmt-check: ## Check code format.
cargo fmt --all -- --check cargo fmt --all -- --check
python3 scripts/check-snafu.py
.PHONY: start-etcd .PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose. start-etcd: ## Start single node etcd for testing purpose.
@@ -205,23 +191,15 @@ stop-etcd: ## Stop single node etcd for testing purpose.
run-it-in-container: start-etcd ## Run integration tests in dev-builder. run-it-in-container: start-etcd ## Run integration tests in dev-builder.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS} make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: start-cluster
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
.PHONY: stop-cluster
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
##@ Docs ##@ Docs
config-docs: ## Generate configuration documentation from toml files. config-docs: ## Generate configuration documentation from toml files.
docker run --rm \ docker run --rm \
-v ${PWD}:/greptimedb \ -v ${PWD}:/greptimedb \
-w /greptimedb/config \ -w /greptimedb/config \
toml2docs/toml2docs:v0.1.3 \ toml2docs/toml2docs:v0.1.1 \
-p '##' \ -p '##' \
-t ./config-docs-template.md \ -t ./config-docs-template.md \
-o ./config.md -o ./config.md

View File

@@ -6,12 +6,12 @@
</picture> </picture>
</p> </p>
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2> <h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
<div align="center"> <div align="center">
<h3 align="center"> <h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> | <a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> | <a href="https://docs.greptime.com/">User guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> | <a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a> <a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4> </h4>
@@ -50,23 +50,24 @@
## Introduction ## Introduction
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale. **GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
## Why GreptimeDB ## Why GreptimeDB
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you: Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
* **Unified all kinds of time series** * **Easy horizontal scaling**
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation. Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
* **Cloud-Edge collaboration** * **Analyzing time-series data**
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient. Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
* **Cloud-native distributed database** * **Cloud-native distributed database**
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages. Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
* **Performance and Cost-effective** * **Performance and Cost-effective**
@@ -74,7 +75,7 @@ Our core developers have been building time-series data platforms for years. Bas
* **Compatible with InfluxDB, Prometheus and more protocols** * **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview). Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
## Try GreptimeDB ## Try GreptimeDB
@@ -104,10 +105,10 @@ Read more about [Installation](https://docs.greptime.com/getting-started/install
## Getting Started ## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start) * [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
* [User Guide](https://docs.greptime.com/user-guide/overview) * [Write Data](https://docs.greptime.com/user-guide/clients/overview)
* [Demos](https://github.com/GreptimeTeam/demo-scene) * [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
* [FAQ](https://docs.greptime.com/faq-and-others/faq) * [Operations](https://docs.greptime.com/user-guide/operations/overview)
## Build ## Build
@@ -150,10 +151,9 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
## Project Status ## Project Status
The current version has not yet reached the standards for General Availability. The current version has not yet reached General Availability version standards.
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412) In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
## Community ## Community
@@ -172,13 +172,6 @@ In addition, you may:
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/) - Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime) - Follow us on [Twitter](https://twitter.com/greptime)
## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional
enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commerial license.
## License ## License
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
@@ -190,8 +183,6 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concept
## Acknowledgement ## Acknowledgement
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format. - GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/). - GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer. - [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.

38
benchmarks/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "benchmarks"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
dotenv.workspace = true
futures.workspace = true
futures-util.workspace = true
humantime.workspace = true
humantime-serde.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
lazy_static.workspace = true
log-store.workspace = true
mito2.workspace = true
num_cpus.workspace = true
parquet.workspace = true
prometheus.workspace = true
rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true

11
benchmarks/README.md Normal file
View File

@@ -0,0 +1,11 @@
Benchmarkers for GreptimeDB
--------------------------------
## Wal Benchmarker
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
### How to use
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.

View File

@@ -0,0 +1,21 @@
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
wal_provider = "kafka"
bootstrap_brokers = ["localhost:9092"]
num_workers = 10
num_topics = 32
num_regions = 1000
num_scrapes = 1000
num_rows = 5
col_types = "ifs"
max_batch_size = "512KB"
linger = "1ms"
backoff_init = "10ms"
backoff_max = "1ms"
backoff_base = 2
backoff_deadline = "3s"
compression = "zstd"
rng_seed = 42
skip_read = false
skip_write = false
random_topics = true
report_metrics = false

View File

@@ -0,0 +1,326 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(int_roundings)]
use std::fs;
use std::sync::Arc;
use std::time::Instant;
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
use benchmarks::metrics;
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
use clap::Parser;
use common_telemetry::info;
use common_wal::config::kafka::common::BackoffConfig;
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::options::{KafkaWalOptions, WalOptions};
use itertools::Itertools;
use log_store::kafka::log_store::KafkaLogStore;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use mito2::wal::Wal;
use prometheus::{Encoder, TextEncoder};
use rand::distributions::{Alphanumeric, DistString};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use rskafka::client::partition::Compression;
use rskafka::client::ClientBuilder;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
let region_chunks = (0..cfg.num_regions)
.map(|id| {
build_region(
id as u64,
topics,
&mut SmallRng::seed_from_u64(cfg.rng_seed),
cfg,
)
})
.chunks(chunk_size as usize)
.into_iter()
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
.collect::<Vec<_>>();
let mut write_elapsed = 0;
let mut read_elapsed = 0;
if !cfg.skip_write {
info!("Benchmarking write ...");
let num_scrapes = cfg.num_scrapes;
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for _ in 0..num_scrapes {
let mut wal_writer = wal.writer();
regions
.iter()
.for_each(|region| region.add_wal_entry(&mut wal_writer));
wal_writer.write_to_wal().await.unwrap();
}
})
}))
.await;
write_elapsed += timer.elapsed().as_millis();
}
if !cfg.skip_read {
info!("Benchmarking read ...");
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for region in regions.iter() {
region.replay(&wal).await;
}
})
}))
.await;
read_elapsed = timer.elapsed().as_millis();
}
dump_report(cfg, write_elapsed, read_elapsed);
}
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
let wal_options = match cfg.wal_provider {
WalProvider::Kafka => {
assert!(!topics.is_empty());
WalOptions::Kafka(KafkaWalOptions {
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
})
}
WalProvider::RaftEngine => WalOptions::RaftEngine,
};
Region::new(
RegionId::from_u64(id),
build_schema(&parse_col_types(&cfg.col_types), rng),
wal_options,
cfg.num_rows,
cfg.rng_seed,
)
}
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
col_types
.iter()
.map(|col_type| ColumnSchema {
column_name: Alphanumeric.sample_string(&mut rng, 5),
datatype: *col_type as i32,
semantic_type: SemanticType::Field as i32,
datatype_extension: None,
})
.chain(vec![ColumnSchema {
column_name: "ts".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Tag as i32,
datatype_extension: None,
}])
.collect()
}
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
let cost_report = format!(
"write costs: {} ms, read costs: {} ms",
write_elapsed, read_elapsed,
);
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
let write_throughput = if write_elapsed > 0 {
(total_written_bytes * 1000).div_floor(write_elapsed)
} else {
0
};
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
let read_throughput = if read_elapsed > 0 {
(total_read_bytes * 1000).div_floor(read_elapsed)
} else {
0
};
let throughput_report = format!(
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
total_written_bytes,
total_read_bytes,
write_throughput,
write_throughput.div_floor(1 << 20),
read_throughput,
read_throughput.div_floor(1 << 20),
);
let metrics_report = if cfg.report_metrics {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
let metrics = prometheus::gather();
encoder.encode(&metrics, &mut buffer).unwrap();
String::from_utf8(buffer).unwrap()
} else {
String::new()
};
info!(
r#"
Benchmark config:
{cfg:?}
Benchmark report:
{cost_report}
{throughput_report}
{metrics_report}"#
);
}
async fn create_topics(cfg: &Config) -> Vec<String> {
// Creates topics.
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
.build()
.await
.unwrap();
let ctrl_client = client.controller_client().unwrap();
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
.map(|i| {
let topic = if cfg.random_topics {
format!(
"greptime_wal_bench_topic_{}_{}",
uuid::Uuid::new_v4().as_u128(),
i
)
} else {
format!("greptime_wal_bench_topic_{}", i)
};
let task = ctrl_client.create_topic(
topic.clone(),
1,
cfg.bootstrap_brokers.len() as i16,
2000,
);
(topic, task)
})
.unzip();
// Must ignore errors since we allow topics being created more than once.
let _ = futures::future::try_join_all(tasks).await;
topics
}
fn parse_compression(comp: &str) -> Compression {
match comp {
"no" => Compression::NoCompression,
"gzip" => Compression::Gzip,
"lz4" => Compression::Lz4,
"snappy" => Compression::Snappy,
"zstd" => Compression::Zstd,
other => unreachable!("Unrecognized compression {other}"),
}
}
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
let parts = col_types.split('x').collect::<Vec<_>>();
assert!(parts.len() <= 2);
let pattern = parts[0];
let repeat = parts
.get(1)
.map(|r| r.parse::<usize>().unwrap())
.unwrap_or(1);
pattern
.chars()
.map(|c| match c {
'i' | 'I' => ColumnDataType::Int64,
'f' | 'F' => ColumnDataType::Float64,
's' | 'S' => ColumnDataType::String,
other => unreachable!("Cannot parse {other} as a column data type"),
})
.cycle()
.take(pattern.len() * repeat)
.collect()
}
fn main() {
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
common_telemetry::init_default_ut_logging();
let args = Args::parse();
let cfg = if !args.cfg_file.is_empty() {
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
} else {
Config::from(args)
};
// Validates arguments.
if cfg.num_regions < cfg.num_workers {
panic!("num_regions must be greater than or equal to num_workers");
}
if cfg
.num_workers
.min(cfg.num_topics)
.min(cfg.num_regions)
.min(cfg.num_scrapes)
.min(cfg.max_batch_size.as_bytes() as u32)
.min(cfg.bootstrap_brokers.len() as u32)
== 0
{
panic!("Invalid arguments");
}
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
match cfg.wal_provider {
WalProvider::Kafka => {
let topics = create_topics(&cfg).await;
let kafka_cfg = KafkaConfig {
broker_endpoints: cfg.bootstrap_brokers.clone(),
max_batch_size: cfg.max_batch_size,
linger: cfg.linger,
backoff: BackoffConfig {
init: cfg.backoff_init,
max: cfg.backoff_max,
base: cfg.backoff_base,
deadline: Some(cfg.backoff_deadline),
},
compression: parse_compression(&cfg.compression),
..Default::default()
};
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &topics, wal).await;
}
WalProvider::RaftEngine => {
// The benchmarker assumes the raft engine directory exists.
let store = RaftEngineLogStore::try_new(
"/tmp/greptimedb/raft-engine-wal".to_string(),
RaftEngineConfig::default(),
)
.await
.map(Arc::new)
.unwrap();
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &[], wal).await;
}
}
});
}

View File

@@ -12,6 +12,5 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
mod abort; pub mod metrics;
mod finish; pub mod wal_bench;
mod update;

39
benchmarks/src/metrics.rs Normal file
View File

@@ -0,0 +1,39 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use lazy_static::lazy_static;
use prometheus::*;
/// Logstore label.
pub const LOGSTORE_LABEL: &str = "logstore";
/// Operation type label.
pub const OPTYPE_LABEL: &str = "optype";
lazy_static! {
/// Counters of bytes of each operation on a logstore.
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
"greptime_bench_wal_op_bytes_total",
"wal operation bytes total",
&[OPTYPE_LABEL],
)
.unwrap();
/// Counter of bytes of the append_batch operation.
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["write"],
);
/// Counter of bytes of the read operation.
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["read"],
);
}

366
benchmarks/src/wal_bench.rs Normal file
View File

@@ -0,0 +1,366 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::mem::size_of;
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
use clap::{Parser, ValueEnum};
use common_base::readable_size::ReadableSize;
use common_wal::options::WalOptions;
use futures::StreamExt;
use mito2::wal::{Wal, WalWriter};
use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
use crate::metrics;
/// The wal provider.
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum WalProvider {
#[default]
RaftEngine,
Kafka,
}
#[derive(Parser)]
pub struct Args {
/// The provided configuration file.
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
#[clap(long, short = 'c')]
pub cfg_file: String,
/// The wal provider.
#[clap(long, value_enum, default_value_t = WalProvider::default())]
pub wal_provider: WalProvider,
/// The advertised addresses of the kafka brokers.
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
#[clap(long, short = 'b', default_value = "localhost:9092")]
pub bootstrap_brokers: String,
/// The number of workers each running in a dedicated thread.
#[clap(long, default_value_t = num_cpus::get() as u32)]
pub num_workers: u32,
/// The number of kafka topics to be created.
#[clap(long, default_value_t = 32)]
pub num_topics: u32,
/// The number of regions.
#[clap(long, default_value_t = 1000)]
pub num_regions: u32,
/// The number of times each region is scraped.
#[clap(long, default_value_t = 1000)]
pub num_scrapes: u32,
/// The number of rows in each wal entry.
/// Each time a region is scraped, a wal entry containing will be produced.
#[clap(long, default_value_t = 5)]
pub num_rows: u32,
/// The column types of the schema for each region.
/// Currently, three column types are supported:
/// - i = ColumnDataType::Int64
/// - f = ColumnDataType::Float64
/// - s = ColumnDataType::String
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
///
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
/// This feature is useful if you want to specify many columns.
#[clap(long, default_value = "ifs")]
pub col_types: String,
/// The maximum size of a batch of kafka records.
/// The default value is 1mb.
#[clap(long, default_value = "512KB")]
pub max_batch_size: ReadableSize,
/// The minimum latency the kafka client issues a batch of kafka records.
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
#[clap(long, default_value = "1ms")]
pub linger: String,
/// The initial backoff delay of the kafka consumer.
#[clap(long, default_value = "10ms")]
pub backoff_init: String,
/// The maximum backoff delay of the kafka consumer.
#[clap(long, default_value = "1s")]
pub backoff_max: String,
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
#[clap(long, default_value_t = 2)]
pub backoff_base: u32,
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
#[clap(long, default_value = "3s")]
pub backoff_deadline: String,
/// The client-side compression algorithm for kafka records.
#[clap(long, default_value = "zstd")]
pub compression: String,
/// The seed of random number generators.
#[clap(long, default_value_t = 42)]
pub rng_seed: u64,
/// Skips the read phase, aka. region replay, if set to true.
#[clap(long, default_value_t = false)]
pub skip_read: bool,
/// Skips the write phase if set to true.
#[clap(long, default_value_t = false)]
pub skip_write: bool,
/// Randomly generates topic names if set to true.
/// Useful when you want to run the benchmarker without worrying about the topics created before.
#[clap(long, default_value_t = false)]
pub random_topics: bool,
/// Logs out the gathered prometheus metrics when the benchmarker ends.
#[clap(long, default_value_t = false)]
pub report_metrics: bool,
}
/// Benchmarker config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub wal_provider: WalProvider,
pub bootstrap_brokers: Vec<String>,
pub num_workers: u32,
pub num_topics: u32,
pub num_regions: u32,
pub num_scrapes: u32,
pub num_rows: u32,
pub col_types: String,
pub max_batch_size: ReadableSize,
#[serde(with = "humantime_serde")]
pub linger: Duration,
#[serde(with = "humantime_serde")]
pub backoff_init: Duration,
#[serde(with = "humantime_serde")]
pub backoff_max: Duration,
pub backoff_base: u32,
#[serde(with = "humantime_serde")]
pub backoff_deadline: Duration,
pub compression: String,
pub rng_seed: u64,
pub skip_read: bool,
pub skip_write: bool,
pub random_topics: bool,
pub report_metrics: bool,
}
impl From<Args> for Config {
fn from(args: Args) -> Self {
let cfg = Self {
wal_provider: args.wal_provider,
bootstrap_brokers: args
.bootstrap_brokers
.split(',')
.map(ToString::to_string)
.collect::<Vec<_>>(),
num_workers: args.num_workers.min(num_cpus::get() as u32),
num_topics: args.num_topics,
num_regions: args.num_regions,
num_scrapes: args.num_scrapes,
num_rows: args.num_rows,
col_types: args.col_types,
max_batch_size: args.max_batch_size,
linger: humantime::parse_duration(&args.linger).unwrap(),
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
backoff_base: args.backoff_base,
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
compression: args.compression,
rng_seed: args.rng_seed,
skip_read: args.skip_read,
skip_write: args.skip_write,
random_topics: args.random_topics,
report_metrics: args.report_metrics,
};
cfg
}
}
/// The region used for wal benchmarker.
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
provider: Provider,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
rng: Mutex<Option<SmallRng>>,
num_rows: u32,
}
impl Region {
/// Creates a new region.
pub fn new(
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
num_rows: u32,
rng_seed: u64,
) -> Self {
let provider = match wal_options {
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
};
Self {
id,
schema,
provider,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
num_rows,
}
}
/// Scrapes the region and adds the generated entry to wal.
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
let mutation = Mutation {
op_type: OpType::Put as i32,
sequence: self
.next_sequence
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
rows: Some(self.build_rows()),
};
let entry = WalEntry {
mutations: vec![mutation],
};
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
wal_writer
.add_entry(
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
&self.provider,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
}
}
/// Computes the estimated size in bytes of the entry.
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
let wrapper_size = size_of::<WalEntry>()
+ entry.mutations.capacity() * size_of::<Mutation>()
+ size_of::<Rows>();
let rows = entry.mutations[0].rows.as_ref().unwrap();
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
+ rows
.schema
.iter()
.map(|s| s.column_name.capacity())
.sum::<usize>();
let values_size = (rows.rows.capacity() * size_of::<Row>())
+ rows
.rows
.iter()
.map(|r| r.values.capacity() * size_of::<Value>())
.sum::<usize>();
wrapper_size + schema_size + values_size
}
fn build_rows(&self) -> Rows {
let cols = self
.schema
.iter()
.map(|col_schema| {
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
self.build_col(&col_data_type, self.num_rows)
})
.collect::<Vec<_>>();
let rows = (0..self.num_rows)
.map(|i| {
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
Row { values }
})
.collect();
Rows {
schema: self.schema.clone(),
rows,
}
}
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
let mut rng_guard = self.rng.lock().unwrap();
let rng = rng_guard.as_mut().unwrap();
match col_data_type {
ColumnDataType::TimestampMillisecond => (0..num_rows)
.map(|_| {
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
Value {
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
}
})
.collect(),
ColumnDataType::Int64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0, 10_000));
Value {
value_data: Some(ValueData::I64Value(v)),
}
})
.collect(),
ColumnDataType::Float64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0.0, 5000.0));
Value {
value_data: Some(ValueData::F64Value(v)),
}
})
.collect(),
ColumnDataType::String => (0..num_rows)
.map(|_| {
let v = Alphanumeric.sample_string(rng, 10);
Value {
value_data: Some(ValueData::StringValue(v)),
}
})
.collect(),
_ => unreachable!(),
}
}
}

View File

@@ -1,12 +1,10 @@
# Configurations # Configurations
- [Configurations](#configurations) - [Standalone Mode](#standalone-mode)
- [Standalone Mode](#standalone-mode) - [Distributed Mode](#distributed-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend) - [Frontend](#frontend)
- [Metasrv](#metasrv) - [Metasrv](#metasrv)
- [Datanode](#datanode) - [Datanode](#datanode)
- [Flownode](#flownode)
## Standalone Mode ## Standalone Mode
@@ -25,7 +23,3 @@
### Datanode ### Datanode
{{ toml2docs "./datanode.example.toml" }} {{ toml2docs "./datanode.example.toml" }}
### Flownode
{{ toml2docs "./flownode.example.toml"}}

View File

@@ -1,12 +1,10 @@
# Configurations # Configurations
- [Configurations](#configurations) - [Standalone Mode](#standalone-mode)
- [Standalone Mode](#standalone-mode) - [Distributed Mode](#distributed-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend) - [Frontend](#frontend)
- [Metasrv](#metasrv) - [Metasrv](#metasrv)
- [Datanode](#datanode) - [Datanode](#datanode)
- [Flownode](#flownode)
## Standalone Mode ## Standalone Mode
@@ -14,24 +12,18 @@
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. | | `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. | | `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | Unset | The default timezone of the server. | | `default_timezone` | String | `None` | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. | | `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. | | `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. | | `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. | | `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
@@ -39,8 +31,8 @@
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | `None` | Certificate file path. |
| `mysql.tls.key_path` | String | Unset | Private key file path. | | `mysql.tls.key_path` | String | `None` | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload | | `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. | | `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
@@ -48,8 +40,8 @@
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | Unset | Private key file path. | | `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload | | `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. | | `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
@@ -60,7 +52,7 @@
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. | | `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -69,21 +61,14 @@
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. | | `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` | | `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. | | `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. | | `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. | | `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. | | `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. | | `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
@@ -93,22 +78,21 @@
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. | | `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. | | `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. | | `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. | | `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. | | `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. | | `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. | | `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. | | `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. | | `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. | | `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. | | `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -116,71 +100,50 @@
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. | | `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. | | `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). | | `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). | | `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. | | `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. | | `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. | | `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. | | `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. | | `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. | | `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. | | `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. | | `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. | | `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. | | `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | Unset | -- | | `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
## Distributed Mode ## Distributed Mode
@@ -189,25 +152,22 @@
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. | | `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `runtime` | -- | -- | The runtime options. | | `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. | | `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. | | `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | | `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. | | `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. | | `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. | | `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. | | `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
@@ -215,8 +175,8 @@
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | `None` | Certificate file path. |
| `mysql.tls.key_path` | String | Unset | Private key file path. | | `mysql.tls.key_path` | String | `None` | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload | | `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. | | `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
@@ -224,8 +184,8 @@
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | Unset | Private key file path. | | `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload | | `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. | | `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
@@ -249,29 +209,23 @@
| `datanode.client.connect_timeout` | String | `10s` | -- | | `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- | | `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. | | `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. | | `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. | | `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. | | `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | Unset | -- | | `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Metasrv ### Metasrv
@@ -281,37 +235,31 @@
| `data_home` | String | `/tmp/metasrv/` | The working home directory. | | `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. | | `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. | | `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. | | `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". | | `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. | | `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. | | `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. | | `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `procedure` | -- | -- | Procedure storage options. | | `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. | | `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially | | `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). | | `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `failure_detector` | -- | -- | -- | | `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. | | `failure_detector.threshold` | Float | `8.0` | -- |
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. | | `failure_detector.min_std_deviation` | String | `100ms` | -- |
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. | | `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. | | `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
| `datanode` | -- | -- | Datanode options. | | `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. | | `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | Operation timeout. | | `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. | | `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. | | `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `wal` | -- | -- | -- | | `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- | | `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. | | `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` | | `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
| `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) | | `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. | | `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. | | `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. | | `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. | | `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
@@ -319,29 +267,23 @@
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. | | `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. | | `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. | | `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. | | `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. | | `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. | | `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | Unset | -- | | `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Datanode ### Datanode
@@ -349,35 +291,15 @@
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. | | `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. | | `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. | | `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. | | `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. | | `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. | | `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | | `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -393,7 +315,7 @@
| `meta_client.metadata_cache_tti` | String | `5m` | -- | | `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `wal` | -- | -- | The WAL options. | | `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -402,36 +324,32 @@
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. | | `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. | | `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. | | `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. | | `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. | | `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. | | `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. | | `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. | | `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. | | `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. | | `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. | | `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. | | `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. | | `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. | | `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -439,109 +357,47 @@
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. | | `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. | | `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). | | `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). | | `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. | | `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. | | `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` | | `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. | | `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. | | `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. | | `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. | | `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. | | `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. | | `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. | | `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | Unset | -- | | `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Flownode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |

View File

@@ -2,7 +2,7 @@
mode = "standalone" mode = "standalone"
## The datanode identifier and should be unique in the cluster. ## The datanode identifier and should be unique in the cluster.
## @toml2docs:none-default ## +toml2docs:none-default
node_id = 42 node_id = 42
## Start services after regions have obtained leases. ## Start services after regions have obtained leases.
@@ -13,84 +13,24 @@ require_lease_before_startup = false
## By default, it provides services after all regions have been initialized. ## By default, it provides services after all regions have been initialized.
init_regions_in_background = false init_regions_in_background = false
## Enable telemetry to collect anonymous usage data. ## The gRPC address of the datanode.
enable_telemetry = true
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0
## Deprecated, use `grpc.addr` instead.
## @toml2docs:none-default
rpc_addr = "127.0.0.1:3001" rpc_addr = "127.0.0.1:3001"
## Deprecated, use `grpc.hostname` instead. ## The hostname of the datanode.
## @toml2docs:none-default ## +toml2docs:none-default
rpc_hostname = "127.0.0.1" rpc_hostname = "127.0.0.1"
## Deprecated, use `grpc.runtime_size` instead. ## The number of gRPC server worker threads.
## @toml2docs:none-default
rpc_runtime_size = 8 rpc_runtime_size = 8
## Deprecated, use `grpc.rpc_max_recv_message_size` instead. ## The maximum receive message size for gRPC server.
## @toml2docs:none-default
rpc_max_recv_message_size = "512MB" rpc_max_recv_message_size = "512MB"
## Deprecated, use `grpc.rpc_max_send_message_size` instead. ## The maximum send message size for gRPC server.
## @toml2docs:none-default
rpc_max_send_message_size = "512MB" rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
## The HTTP server options. enable_telemetry = true
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The heartbeat options. ## The heartbeat options.
[heartbeat] [heartbeat]
@@ -138,7 +78,7 @@ provider = "raft_engine"
## The directory to store the WAL files. ## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default ## +toml2docs:none-default
dir = "/tmp/greptimedb/wal" dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file. ## The size of the WAL segment file.
@@ -173,9 +113,6 @@ prefill_log_files = false
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
sync_period = "10s" sync_period = "10s"
## Parallelism during WAL recovery.
recovery_parallelism = 2
## The Kafka broker endpoints. ## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"] broker_endpoints = ["127.0.0.1:9092"]
@@ -183,7 +120,11 @@ broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch. ## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic. ## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
max_batch_bytes = "1MB" max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
## The consumer wait timeout. ## The consumer wait timeout.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
@@ -205,43 +146,6 @@ backoff_base = 2
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins" backoff_deadline = "5mins"
## Whether to enable WAL index creation.
## **It's only used when the provider is `kafka`**.
create_index = true
## The interval for dumping WAL indexes.
## **It's only used when the provider is `kafka`**.
dump_index_interval = "60s"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
# Example of using S3 as the storage. # Example of using S3 as the storage.
# [storage] # [storage]
# type = "S3" # type = "S3"
@@ -278,7 +182,6 @@ overwrite_entry_start_id = false
# root = "data" # root = "data"
# scope = "test" # scope = "test"
# credential_path = "123456" # credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com" # endpoint = "https://storage.googleapis.com"
## The data storage options. ## The data storage options.
@@ -296,104 +199,85 @@ type = "File"
## Cache configuration for object storage such as 'S3' etc. ## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory. ## The local file cache directory.
## @toml2docs:none-default ## +toml2docs:none-default
cache_path = "/path/local_cache" cache_path = "/path/local_cache"
## The local file cache capacity in bytes. ## The local file cache capacity in bytes.
## @toml2docs:none-default ## +toml2docs:none-default
cache_capacity = "256MB" cache_capacity = "256MB"
## The S3 bucket name. ## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**. ## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## @toml2docs:none-default ## +toml2docs:none-default
bucket = "greptimedb" bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`. ## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**. ## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
root = "greptimedb" root = "greptimedb"
## The access key id of the aws account. ## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key. ## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**. ## **It's only used when the storage type is `S3` and `Oss`**.
## @toml2docs:none-default ## +toml2docs:none-default
access_key_id = "test" access_key_id = "test"
## The secret access key of the aws account. ## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key. ## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**. ## **It's only used when the storage type is `S3`**.
## @toml2docs:none-default ## +toml2docs:none-default
secret_access_key = "test" secret_access_key = "test"
## The secret access key of the aliyun account. ## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**. ## **It's only used when the storage type is `Oss`**.
## @toml2docs:none-default ## +toml2docs:none-default
access_key_secret = "test" access_key_secret = "test"
## The account key of the azure account. ## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
account_name = "test" account_name = "test"
## The account key of the azure account. ## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
account_key = "test" account_key = "test"
## The scope of the google cloud storage. ## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**. ## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default ## +toml2docs:none-default
scope = "test" scope = "test"
## The credential path of the google cloud storage. ## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**. ## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default ## +toml2docs:none-default
credential_path = "test" credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential = "base64-credential"
## The container of the azure account. ## The container of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
container = "greptimedb" container = "greptimedb"
## The sas token of the azure account. ## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
sas_token = "" sas_token = ""
## The endpoint of the S3 service. ## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. ## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
endpoint = "https://s3.amazonaws.com" endpoint = "https://s3.amazonaws.com"
## The region of the S3 service. ## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. ## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
region = "us-west-2" region = "us-west-2"
# Custom storage options # Custom storage options
# [[storage.providers]] # [[storage.providers]]
# name = "S3"
# type = "S3" # type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]] # [[storage.providers]]
# name = "Gcs"
# type = "Gcs" # type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines. ## The region engine options. You can configure multiple region engines.
[[region_engine]] [[region_engine]]
@@ -402,7 +286,7 @@ region = "us-west-2"
[region_engine.mito] [region_engine.mito]
## Number of region workers. ## Number of region workers.
#+ num_workers = 8 num_workers = 8
## Request channel size of each worker. ## Request channel size of each worker.
worker_channel_size = 128 worker_channel_size = 128
@@ -416,48 +300,29 @@ manifest_checkpoint_distance = 10
## Whether to compress manifest and checkpoint file by gzip (default false). ## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
## Max number of running background flush jobs (default: 1/2 of cpu cores). ## Max number of running background jobs
## @toml2docs:none-default="Auto" max_background_jobs = 4
#+ max_background_flushes = 4
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_compactions = 2
## Max number of running background purge jobs (default: number of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_purges = 8
## Interval to auto flush a region if it has not flushed yet. ## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h" auto_flush_interval = "1h"
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. ## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## @toml2docs:none-default="Auto" global_write_buffer_size = "1GB"
#+ global_write_buffer_size = "1GB"
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` ## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## @toml2docs:none-default="Auto" global_write_buffer_reject_size = "2GB"
#+ global_write_buffer_reject_size = "2GB"
## Cache size for SST metadata. Setting it to 0 to disable the cache. ## Cache size for SST metadata. Setting it to 0 to disable the cache.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. ## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## @toml2docs:none-default="Auto" sst_meta_cache_size = "128MB"
#+ sst_meta_cache_size = "128MB"
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache. ## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. ## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto" vector_cache_size = "512MB"
#+ vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache. ## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/8 of OS memory.
## @toml2docs:none-default="Auto"
#+ page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. ## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto" page_cache_size = "512MB"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache. ## Whether to enable the experimental write cache.
enable_experimental_write_cache = false enable_experimental_write_cache = false
@@ -469,8 +334,7 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MB" experimental_write_cache_size = "512MB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default experimental_write_cache_ttl = "1h"
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
@@ -487,76 +351,31 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay. ## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
## The default name for this directory is `index_intermediate` for backward compatibility.
##
## This path contains two subdirectories:
## - `__intm`: for storing intermediate files used during creating index.
## - `staging`: for storing staging files used during searching index.
aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
## Whether to create the index on flush. ## Whether to create the index on flush.
## - `auto`: automatically (default) ## - `auto`: automatically
## - `disable`: never ## - `disable`: never
create_on_flush = "auto" create_on_flush = "auto"
## Whether to create the index on compaction. ## Whether to create the index on compaction.
## - `auto`: automatically (default) ## - `auto`: automatically
## - `disable`: never ## - `disable`: never
create_on_compaction = "auto" create_on_compaction = "auto"
## Whether to apply the index on query ## Whether to apply the index on query
## - `auto`: automatically (default) ## - `auto`: automatically
## - `disable`: never ## - `disable`: never
apply_on_query = "auto" apply_on_query = "auto"
## Memory threshold for performing an external sort during index creation. ## Memory threshold for performing an external sort during index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default) ## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
## - `unlimited`: no memory limit mem_threshold_on_create = "64M"
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead. ## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = "" intermediate_path = ""
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable] [region_engine.mito.memtable]
## Memtable type. ## Memtable type.
## - `time_series`: time-series memtable ## - `time_series`: time-series memtable
@@ -575,53 +394,31 @@ data_freeze_threshold = 32768
## Only available for `partition_tree` memtable. ## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB" fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files.
dir = "/tmp/greptimedb/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## +toml2docs:none-default
level = "info" level = "info"
## Enable OTLP tracing. ## Enable OTLP tracing.
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317" ## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API. ## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. ## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics] [export_metrics]
@@ -633,20 +430,19 @@ enable = false
write_interval = "30s" write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself ## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import] [export_metrics.self_import]
## @toml2docs:none-default ## +toml2docs:none-default
db = "greptime_metrics" db = "information_schema"
[export_metrics.remote_write] [export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. ## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = "" url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## +toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" tokio_console_addr = "127.0.0.1"

View File

@@ -1,108 +0,0 @@
## The running mode of the flownode. It can be `standalone` or `distributed`.
mode = "distributed"
## The flownode identifier and should be unique in the cluster.
## @toml2docs:none-default
node_id = 14
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:6800"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 2
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The metasrv client options.
[meta_client]
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
interval = "3s"
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,13 +1,9 @@
## The default timezone of the server. ## The running mode of the datanode. It can be `standalone` or `distributed`.
## @toml2docs:none-default mode = "standalone"
default_timezone = "UTC"
## The runtime options. ## The default timezone of the server.
#+ [runtime] ## +toml2docs:none-default
## The number of threads to execute the runtime for global read operations. default_timezone = "UTC"
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The heartbeat options. ## The heartbeat options.
[heartbeat] [heartbeat]
@@ -21,20 +17,16 @@ retry_interval = "3s"
[http] [http]
## The address to bind the HTTP server. ## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout. ## HTTP request timeout.
timeout = "30s" timeout = "30s"
## HTTP request body limit. ## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
@@ -44,11 +36,11 @@ runtime_size = 8
mode = "disable" mode = "disable"
## Certificate file path. ## Certificate file path.
## @toml2docs:none-default ## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path. ## Private key file path.
## @toml2docs:none-default ## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload. ## Watch for Certificate and key file change and auto reload.
@@ -76,11 +68,11 @@ runtime_size = 2
mode = "disable" mode = "disable"
## Certificate file path. ## Certificate file path.
## @toml2docs:none-default ## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path. ## Private key file path.
## @toml2docs:none-default ## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload ## Watch for Certificate and key file change and auto reload
@@ -101,11 +93,11 @@ runtime_size = 2
mode = "disable" mode = "disable"
## Certificate file path. ## Certificate file path.
## @toml2docs:none-default ## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path. ## Private key file path.
## @toml2docs:none-default ## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload ## Watch for Certificate and key file change and auto reload
@@ -166,47 +158,29 @@ tcp_nodelay = true
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files.
dir = "/tmp/greptimedb/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## +toml2docs:none-default
level = "info" level = "info"
## Enable OTLP tracing. ## Enable OTLP tracing.
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317" ## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API. ## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. ## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics] [export_metrics]
@@ -218,20 +192,19 @@ enable = false
write_interval = "30s" write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself ## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import] [export_metrics.self_import]
## @toml2docs:none-default ## +toml2docs:none-default
db = "greptime_metrics" db = "information_schema"
[export_metrics.remote_write] [export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. ## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = "" url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## +toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" tokio_console_addr = "127.0.0.1"

View File

@@ -7,15 +7,14 @@ bind_addr = "127.0.0.1:3002"
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. ## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002" server_addr = "127.0.0.1:3002"
## Store server address default to etcd store. ## Etcd server address.
store_addr = "127.0.0.1:2379" store_addr = "127.0.0.1:2379"
## Datanode selector type. ## Datanode selector type.
## - `round_robin` (default value) ## - `lease_based` (default value).
## - `lease_based`
## - `load_based` ## - `load_based`
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". ## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
selector = "round_robin" selector = "lease_based"
## Store data in memory. ## Store data in memory.
use_memory_store = false use_memory_store = false
@@ -26,22 +25,6 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix. ## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = "" store_key_prefix = ""
## Whether to enable region failover.
## This feature is only available on GreptimeDB running on cluster mode and
## - Using Remote WAL
## - Using shared storage (e.g., s3).
enable_region_failover = false
## The datastore for meta server.
backend = "EtcdStore"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## Procedure storage options. ## Procedure storage options.
[procedure] [procedure]
@@ -60,32 +43,17 @@ max_metadata_value_size = "1500KiB"
# Failure detectors options. # Failure detectors options.
[failure_detector] [failure_detector]
## The threshold value used by the failure detector to determine failure conditions.
threshold = 8.0 threshold = 8.0
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
min_std_deviation = "100ms" min_std_deviation = "100ms"
acceptable_heartbeat_pause = "3000ms"
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
acceptable_heartbeat_pause = "10000ms"
## The initial estimate of the heartbeat interval used by the failure detector.
first_heartbeat_estimate = "1000ms" first_heartbeat_estimate = "1000ms"
## Datanode options. ## Datanode options.
[datanode] [datanode]
## Datanode client options. ## Datanode client options.
[datanode.client] [datanode.client]
## Operation timeout.
timeout = "10s" timeout = "10s"
## Connect server timeout.
connect_timeout = "10s" connect_timeout = "10s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true tcp_nodelay = true
[wal] [wal]
@@ -99,12 +67,7 @@ provider = "raft_engine"
## The broker endpoints of the Kafka cluster. ## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"] broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL. ## Number of topics to be created upon start.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
num_topics = 64 num_topics = 64
## Topic selector type. ## Topic selector type.
@@ -113,7 +76,6 @@ num_topics = 64
selector_type = "round_robin" selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. ## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic" topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition. ## Expected number of replicas of each partition.
@@ -133,67 +95,31 @@ backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. ## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins" backoff_deadline = "5mins"
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files.
dir = "/tmp/greptimedb/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## +toml2docs:none-default
level = "info" level = "info"
## Enable OTLP tracing. ## Enable OTLP tracing.
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317" ## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API. ## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. ## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics] [export_metrics]
@@ -205,20 +131,19 @@ enable = false
write_interval = "30s" write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself ## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import] [export_metrics.self_import]
## @toml2docs:none-default ## +toml2docs:none-default
db = "greptime_metrics" db = "information_schema"
[export_metrics.remote_write] [export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. ## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = "" url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## +toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" tokio_console_addr = "127.0.0.1"

View File

@@ -5,35 +5,17 @@ mode = "standalone"
enable_telemetry = true enable_telemetry = true
## The default timezone of the server. ## The default timezone of the server.
## @toml2docs:none-default ## +toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The HTTP server options. ## The HTTP server options.
[http] [http]
## The address to bind the HTTP server. ## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout. ## HTTP request timeout.
timeout = "30s" timeout = "30s"
## HTTP request body limit. ## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## The gRPC server options. ## The gRPC server options.
@@ -49,11 +31,11 @@ runtime_size = 8
mode = "disable" mode = "disable"
## Certificate file path. ## Certificate file path.
## @toml2docs:none-default ## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path. ## Private key file path.
## @toml2docs:none-default ## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload. ## Watch for Certificate and key file change and auto reload.
@@ -81,11 +63,11 @@ runtime_size = 2
mode = "disable" mode = "disable"
## Certificate file path. ## Certificate file path.
## @toml2docs:none-default ## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path. ## Private key file path.
## @toml2docs:none-default ## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload ## Watch for Certificate and key file change and auto reload
@@ -106,11 +88,11 @@ runtime_size = 2
mode = "disable" mode = "disable"
## Certificate file path. ## Certificate file path.
## @toml2docs:none-default ## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path. ## Private key file path.
## @toml2docs:none-default ## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload ## Watch for Certificate and key file change and auto reload
@@ -142,7 +124,7 @@ provider = "raft_engine"
## The directory to store the WAL files. ## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default ## +toml2docs:none-default
dir = "/tmp/greptimedb/wal" dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file. ## The size of the WAL segment file.
@@ -177,45 +159,18 @@ prefill_log_files = false
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
sync_period = "10s" sync_period = "10s"
## Parallelism during WAL recovery.
recovery_parallelism = 2
## The Kafka broker endpoints. ## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"] broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
## **It's only used when the provider is `kafka`**.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
## **It's only used when the provider is `kafka`**.
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
## **It's only used when the provider is `kafka`**.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
## **It's only used when the provider is `kafka`**.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
## **It's only used when the provider is `kafka`**.
create_topic_timeout = "30s"
## The max size of a single producer batch. ## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic. ## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
max_batch_bytes = "1MB" max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
## The consumer wait timeout. ## The consumer wait timeout.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
@@ -237,35 +192,6 @@ backoff_base = 2
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins" backoff_deadline = "5mins"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## Metadata storage options. ## Metadata storage options.
[metadata_store] [metadata_store]
## Kv file size in bytes. ## Kv file size in bytes.
@@ -316,7 +242,6 @@ retry_delay = "500ms"
# root = "data" # root = "data"
# scope = "test" # scope = "test"
# credential_path = "123456" # credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com" # endpoint = "https://storage.googleapis.com"
## The data storage options. ## The data storage options.
@@ -334,104 +259,85 @@ type = "File"
## Cache configuration for object storage such as 'S3' etc. ## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory. ## The local file cache directory.
## @toml2docs:none-default ## +toml2docs:none-default
cache_path = "/path/local_cache" cache_path = "/path/local_cache"
## The local file cache capacity in bytes. ## The local file cache capacity in bytes.
## @toml2docs:none-default ## +toml2docs:none-default
cache_capacity = "256MB" cache_capacity = "256MB"
## The S3 bucket name. ## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**. ## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## @toml2docs:none-default ## +toml2docs:none-default
bucket = "greptimedb" bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`. ## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**. ## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
root = "greptimedb" root = "greptimedb"
## The access key id of the aws account. ## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key. ## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**. ## **It's only used when the storage type is `S3` and `Oss`**.
## @toml2docs:none-default ## +toml2docs:none-default
access_key_id = "test" access_key_id = "test"
## The secret access key of the aws account. ## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key. ## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**. ## **It's only used when the storage type is `S3`**.
## @toml2docs:none-default ## +toml2docs:none-default
secret_access_key = "test" secret_access_key = "test"
## The secret access key of the aliyun account. ## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**. ## **It's only used when the storage type is `Oss`**.
## @toml2docs:none-default ## +toml2docs:none-default
access_key_secret = "test" access_key_secret = "test"
## The account key of the azure account. ## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
account_name = "test" account_name = "test"
## The account key of the azure account. ## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
account_key = "test" account_key = "test"
## The scope of the google cloud storage. ## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**. ## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default ## +toml2docs:none-default
scope = "test" scope = "test"
## The credential path of the google cloud storage. ## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**. ## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default ## +toml2docs:none-default
credential_path = "test" credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential = "base64-credential"
## The container of the azure account. ## The container of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
container = "greptimedb" container = "greptimedb"
## The sas token of the azure account. ## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**. ## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
sas_token = "" sas_token = ""
## The endpoint of the S3 service. ## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. ## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
endpoint = "https://s3.amazonaws.com" endpoint = "https://s3.amazonaws.com"
## The region of the S3 service. ## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. ## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default ## +toml2docs:none-default
region = "us-west-2" region = "us-west-2"
# Custom storage options # Custom storage options
# [[storage.providers]] # [[storage.providers]]
# name = "S3"
# type = "S3" # type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]] # [[storage.providers]]
# name = "Gcs"
# type = "Gcs" # type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines. ## The region engine options. You can configure multiple region engines.
[[region_engine]] [[region_engine]]
@@ -440,7 +346,7 @@ region = "us-west-2"
[region_engine.mito] [region_engine.mito]
## Number of region workers. ## Number of region workers.
#+ num_workers = 8 num_workers = 8
## Request channel size of each worker. ## Request channel size of each worker.
worker_channel_size = 128 worker_channel_size = 128
@@ -454,48 +360,29 @@ manifest_checkpoint_distance = 10
## Whether to compress manifest and checkpoint file by gzip (default false). ## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
## Max number of running background flush jobs (default: 1/2 of cpu cores). ## Max number of running background jobs
## @toml2docs:none-default="Auto" max_background_jobs = 4
#+ max_background_flushes = 4
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_compactions = 2
## Max number of running background purge jobs (default: number of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_purges = 8
## Interval to auto flush a region if it has not flushed yet. ## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h" auto_flush_interval = "1h"
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. ## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## @toml2docs:none-default="Auto" global_write_buffer_size = "1GB"
#+ global_write_buffer_size = "1GB"
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. ## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## @toml2docs:none-default="Auto" global_write_buffer_reject_size = "2GB"
#+ global_write_buffer_reject_size = "2GB"
## Cache size for SST metadata. Setting it to 0 to disable the cache. ## Cache size for SST metadata. Setting it to 0 to disable the cache.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. ## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## @toml2docs:none-default="Auto" sst_meta_cache_size = "128MB"
#+ sst_meta_cache_size = "128MB"
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache. ## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. ## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto" vector_cache_size = "512MB"
#+ vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache. ## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/8 of OS memory.
## @toml2docs:none-default="Auto"
#+ page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. ## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto" page_cache_size = "512MB"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache. ## Whether to enable the experimental write cache.
enable_experimental_write_cache = false enable_experimental_write_cache = false
@@ -507,8 +394,7 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MB" experimental_write_cache_size = "512MB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default experimental_write_cache_ttl = "1h"
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
@@ -525,82 +411,31 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay. ## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
## The default name for this directory is `index_intermediate` for backward compatibility.
##
## This path contains two subdirectories:
## - `__intm`: for storing intermediate files used during creating index.
## - `staging`: for storing staging files used during searching index.
aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
## Whether to create the index on flush. ## Whether to create the index on flush.
## - `auto`: automatically (default) ## - `auto`: automatically
## - `disable`: never ## - `disable`: never
create_on_flush = "auto" create_on_flush = "auto"
## Whether to create the index on compaction. ## Whether to create the index on compaction.
## - `auto`: automatically (default) ## - `auto`: automatically
## - `disable`: never ## - `disable`: never
create_on_compaction = "auto" create_on_compaction = "auto"
## Whether to apply the index on query ## Whether to apply the index on query
## - `auto`: automatically (default) ## - `auto`: automatically
## - `disable`: never ## - `disable`: never
apply_on_query = "auto" apply_on_query = "auto"
## Memory threshold for performing an external sort during index creation. ## Memory threshold for performing an external sort during index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default) ## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
## - `unlimited`: no memory limit mem_threshold_on_create = "64M"
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead. ## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = "" intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable] [region_engine.mito.memtable]
## Memtable type. ## Memtable type.
## - `time_series`: time-series memtable ## - `time_series`: time-series memtable
@@ -619,53 +454,31 @@ data_freeze_threshold = 32768
## Only available for `partition_tree` memtable. ## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB" fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files.
dir = "/tmp/greptimedb/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## +toml2docs:none-default
level = "info" level = "info"
## Enable OTLP tracing. ## Enable OTLP tracing.
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317" ## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API. ## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. ## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics] [export_metrics]
@@ -676,21 +489,20 @@ enable = false
## The interval of export metrics. ## The interval of export metrics.
write_interval = "30s" write_interval = "30s"
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself ## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import] [export_metrics.self_import]
## @toml2docs:none-default ## +toml2docs:none-default
db = "greptime_metrics" db = "information_schema"
[export_metrics.remote_write] [export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. ## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = "" url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## +toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" tokio_console_addr = "127.0.0.1"

View File

@@ -1,9 +1,5 @@
FROM centos:7 FROM centos:7
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN yum install -y epel-release \ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \

View File

@@ -1,50 +0,0 @@
#!/bin/bash
set -euxo pipefail
cd "$(mktemp -d)"
# Fix version to v1.6.6, this is different than the latest version in original install script in
# https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh
base_url="https://github.com/cargo-bins/cargo-binstall/releases/download/v1.6.6/cargo-binstall-"
os="$(uname -s)"
if [ "$os" == "Darwin" ]; then
url="${base_url}universal-apple-darwin.zip"
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
unzip cargo-binstall-universal-apple-darwin.zip
elif [ "$os" == "Linux" ]; then
machine="$(uname -m)"
if [ "$machine" == "armv7l" ]; then
machine="armv7"
fi
target="${machine}-unknown-linux-musl"
if [ "$machine" == "armv7" ]; then
target="${target}eabihf"
fi
url="${base_url}${target}.tgz"
curl -L --proto '=https' --tlsv1.2 -sSf "$url" | tar -xvzf -
elif [ "${OS-}" = "Windows_NT" ]; then
machine="$(uname -m)"
target="${machine}-pc-windows-msvc"
url="${base_url}${target}.zip"
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
unzip "cargo-binstall-${target}.zip"
else
echo "Unsupported OS ${os}"
exit 1
fi
./cargo-binstall -y --force cargo-binstall
CARGO_HOME="${CARGO_HOME:-$HOME/.cargo}"
if ! [[ ":$PATH:" == *":$CARGO_HOME/bin:"* ]]; then
if [ -n "${CI:-}" ] && [ -n "${GITHUB_PATH:-}" ]; then
echo "$CARGO_HOME/bin" >> "$GITHUB_PATH"
else
echo
printf "\033[0;31mYour path is missing %s, you might want to add it.\033[0m\n" "$CARGO_HOME/bin"
echo
fi
fi

View File

@@ -2,10 +2,6 @@ FROM centos:7 as builder
ENV LANG en_US.utf8 ENV LANG en_US.utf8
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
# Install dependencies # Install dependencies
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools' RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
RUN yum install -y epel-release \ RUN yum install -y epel-release \
@@ -29,12 +25,6 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN} RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
# compile from source take too long, so we use the precompiled binary instead
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
# Install nextest. # Install nextest.
RUN cargo install cargo-binstall --locked
RUN cargo binstall cargo-nextest --no-confirm RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -24,15 +24,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
python3.10 \ python3.10 \
python3.10-dev python3.10-dev
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
# Remove Python 3.8 and install pip. # Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \ RUN apt-get -y purge python3.8 && \
apt-get -y autoremove && \ apt-get -y autoremove && \
@@ -64,11 +55,6 @@ ENV PATH /root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN} RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
# compile from source take too long, so we use the precompiled binary instead
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
# Install nextest. # Install nextest.
RUN cargo install cargo-binstall --locked
RUN cargo binstall cargo-nextest --no-confirm RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -43,9 +43,6 @@ ENV PATH /root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN} RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
RUN cargo install cargo-binstall --version 1.6.6 --locked
# Install nextest. # Install nextest.
RUN cargo install cargo-binstall --locked
RUN cargo binstall cargo-nextest --no-confirm RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -1,133 +0,0 @@
x-custom:
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
etcd_common_settings: &etcd_common_settings
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
entrypoint: /usr/local/bin/etcd
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
services:
etcd0:
<<: *etcd_common_settings
container_name: etcd0
ports:
- 2379:2379
- 2380:2380
command:
- --name=etcd0
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=http://etcd0:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --listen-client-urls=http://0.0.0.0:2379
- --advertise-client-urls=http://etcd0:2379
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *etcd_initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb
metasrv:
image: *greptimedb_image
container_name: metasrv
ports:
- 3002:3002
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
etcd0:
condition: service_healthy
networks:
- greptimedb
datanode0:
image: *greptimedb_image
container_name: datanode0
ports:
- 3001:3001
- 5000:5000
command:
- datanode
- start
- --node-id=0
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:5000
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck:
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
frontend0:
image: *greptimedb_image
container_name: frontend0
ports:
- 4000:4000
- 4001:4001
- 4002:4002
- 4003:4003
command:
- frontend
- start
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
healthcheck:
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
datanode0:
condition: service_healthy
networks:
- greptimedb
flownode0:
image: *greptimedb_image
container_name: flownode0
ports:
- 4004:4004
command:
- flownode
- start
- --node-id=0
- --metasrv-addrs=metasrv:3002
- --rpc-addr=0.0.0.0:4004
- --rpc-hostname=flownode0:4004
depends_on:
frontend0:
condition: service_healthy
networks:
- greptimedb
networks:
greptimedb:
name: greptimedb

View File

@@ -1,51 +0,0 @@
# Log benchmark configuration
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
Here are the versions of databases we used in the benchmark
| name | version |
| :------------ | :--------- |
| GreptimeDB | v0.9.2 |
| Clickhouse | 24.9.1.219 |
| Elasticsearch | 8.15.0 |
## Structured model vs Unstructured model
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
__Structured model__
The log data is pre-processed into columns by vector. For example an insert request looks like following
```SQL
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
```
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
__Unstructured model__
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
```SQL
INSERT INTO test_table (message, timestamp) VALUES ()
```
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
## Creating tables
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
The mapping of Elastic search is created automatically.
## Vector Configuration
We use vector to generate random log data and send inserts to databases.
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
## SQLs and payloads
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
## Steps to reproduce
0. Decide whether to run structured model test or unstructured mode test.
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
2. Create table in GreptimeDB and Clickhouse in advance.
3. Run vector to insert data.
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
## Addition
- You can tune GreptimeDB's configuration to get better performance.
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).

View File

@@ -1,56 +0,0 @@
-- GreptimeDB create table clause
-- structured test, use vector to pre-process log data into fields
CREATE TABLE IF NOT EXISTS `test_table` (
`bytes` Int64 NULL,
`http_version` STRING NULL,
`ip` STRING NULL,
`method` STRING NULL,
`path` STRING NULL,
`status` SMALLINT UNSIGNED NULL,
`user` STRING NULL,
`timestamp` TIMESTAMP(3) NOT NULL,
PRIMARY KEY (`user`, `path`, `status`),
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- unstructured test, build fulltext index on message column
CREATE TABLE IF NOT EXISTS `test_table` (
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
`timestamp` TIMESTAMP(3) NOT NULL,
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- Clickhouse create table clause
-- structured test
CREATE TABLE IF NOT EXISTS test_table
(
bytes UInt64 NOT NULL,
http_version String NOT NULL,
ip String NOT NULL,
method String NOT NULL,
path String NOT NULL,
status UInt8 NOT NULL,
user String NOT NULL,
timestamp String NOT NULL,
)
ENGINE = MergeTree()
ORDER BY (user, path, status);
-- unstructured test
SET allow_experimental_full_text_index = true;
CREATE TABLE IF NOT EXISTS test_table
(
message String,
timestamp String,
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
)
ENGINE = MergeTree()
ORDER BY tuple();

View File

@@ -1,199 +0,0 @@
# Query URL and payload for Elastic Search
## Count
URL: `http://127.0.0.1:9200/_count`
## Query by timerange
URL: `http://127.0.0.1:9200/_search`
You can use the following payload to get the full timerange first.
```JSON
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
```
And then use this payload to query by timerange.
```JSON
{
"from": 0,
"size": 1000,
"query": {
"range": {
"timestamp": {
"gte": "2024-08-16T04:30:44.000Z",
"lte": "2024-08-16T04:51:52.000Z"
}
}
}
}
```
## Query by condition
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
}
]
}
}
}
```
## Query by condition and timerange
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T07:03:37.383Z",
"lte": "2024-08-19T07:24:58.883Z"
}
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T05:16:17.099Z",
"lte": "2024-08-19T05:46:02.722Z"
}
}
}
]
}
}
}
```

View File

@@ -1,50 +0,0 @@
-- Structured query for GreptimeDB and Clickhouse
-- query count
select count(*) from test_table;
-- query by timerange. Note: place the timestamp range in the where clause
-- GreptimeDB
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- query by condition
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
and timestamp between 1723774396760 and 1723774788760;
-- Clickhouse
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- Unstructured query for GreptimeDB and Clickhouse
-- query by condition
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%');
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
and timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%')
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';

View File

@@ -1,57 +0,0 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[transforms.parse_logs]
type = "remap"
inputs = ["demo_logs"]
source = '''
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
# Convert timestamp to a standard format
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
# Convert status and bytes to integers
.status = to_int!(.status)
.bytes = to_int!(.bytes)
'''
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "parse_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 4000
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "parse_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "parse_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -1,43 +0,0 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "demo_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 500
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "demo_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "demo_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -1,253 +0,0 @@
# How to run TSBS Benchmark
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
## Prerequires
You need the following tools to run TSBS Benchmark:
- Go
- git
- make
- rust (optional, if you want to build the DB from source)
## Build TSBS suite
Clone our fork of TSBS:
```shell
git clone https://github.com/GreptimeTeam/tsbs.git
```
Then build it:
```shell
cd tsbs
make
```
You can check the `bin/` directory for compiled binaries. We will only use some of them.
```shell
ls ./bin/
```
Binaries we will use later:
- `tsbs_generate_data`
- `tsbs_generate_queries`
- `tsbs_load_greptime`
- `tsbs_run_queries_influx`
## Generate test data and queries
The data is generated by `tsbs_generate_data`
```shell
mkdir bench-data
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:00Z" \
--log-interval="10s" --format="influx" \
> ./bench-data/influx-data.lp
```
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
```shell
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type cpu-max-all-1 \
--format="greptime" \
> ./bench-data/greptime-queries-cpu-max-all-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type cpu-max-all-8 \
--format="greptime" \
> ./bench-data/greptime-queries-cpu-max-all-8.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-1 \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-5 \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-5.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type double-groupby-all \
--format="greptime" \
> ./bench-data/greptime-queries-double-groupby-all.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type groupby-orderby-limit \
--format="greptime" \
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type high-cpu-1 \
--format="greptime" \
> ./bench-data/greptime-queries-high-cpu-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=50 \
--query-type high-cpu-all \
--format="greptime" \
> ./bench-data/greptime-queries-high-cpu-all.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=10 \
--query-type lastpoint \
--format="greptime" \
> ./bench-data/greptime-queries-lastpoint.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-1-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-1-12 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-1-8-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-1-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-1-12 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
./bin/tsbs_generate_queries \
--use-case="devops" --seed=123 --scale=4000 \
--timestamp-start="2023-06-11T00:00:00Z" \
--timestamp-end="2023-06-14T00:00:01Z" \
--queries=100 \
--query-type single-groupby-5-8-1 \
--format="greptime" \
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
```
## Start GreptimeDB
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
## Write Data
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
```shell
./bin/tsbs_load_greptime \
--urls=http://localhost:4000 \
--file=./bench-data/influx-data.lp \
--batch-size=3000 \
--gzip=false \
--workers=6
```
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
## Query Data
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
```shell
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
--db-name=benchmark \
--urls="http://localhost:4000"
```
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.

View File

@@ -1,58 +0,0 @@
# TSBS benchmark - v0.9.1
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | ----------------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 100GB (GP3) |
| OS | Ubuntu Server 24.04 LTS |
## Write performance
| Environment | Ingest rate (rows/s) |
| --------------- | -------------------- |
| Local | 387697.68 |
| EC2 c5d.2xlarge | 234620.19 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- |
| cpu-max-all-1 | 21.14 | 14.75 |
| cpu-max-all-8 | 36.79 | 30.69 |
| double-groupby-1 | 529.02 | 987.85 |
| double-groupby-5 | 1064.53 | 1455.95 |
| double-groupby-all | 1625.33 | 2143.96 |
| groupby-orderby-limit | 529.19 | 1353.49 |
| high-cpu-1 | 12.09 | 8.24 |
| high-cpu-all | 3619.47 | 5312.82 |
| lastpoint | 224.91 | 576.06 |
| single-groupby-1-1-1 | 10.82 | 6.01 |
| single-groupby-1-1-12 | 11.16 | 7.42 |
| single-groupby-1-8-1 | 13.50 | 10.20 |
| single-groupby-5-1-1 | 11.99 | 6.70 |
| single-groupby-5-1-12 | 13.17 | 8.72 |
| single-groupby-5-8-1 | 16.01 | 12.07 |
`single-groupby-1-1-1` query throughput
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
| --------------- | ------------------ | -------------- | ----------------- |
| Local | 50 | 33.04 | 1511.74 |
| Local | 100 | 67.70 | 1476.14 |
| EC2 c5d.2xlarge | 50 | 61.93 | 806.97 |
| EC2 c5d.2xlarge | 100 | 126.31 | 791.40 |

View File

@@ -1,16 +0,0 @@
# Change Log Level on the Fly
## HTTP API
example:
```bash
curl --data "trace;flow=debug" 127.0.0.1:4000/debug/log_level
```
And database will reply with something like:
```bash
Log Level changed from Some("info") to "trace;flow=debug"%
```
The data is a string in the format of `global_level;module1=level1;module2=level2;...` that follow the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).

View File

@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
fuzz_target!(|input: FuzzInput| { fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging(); common_telemetry::init_default_ut_logging();
common_runtime::block_on_global(async { common_runtime::block_on_write(async {
let Connections { mysql } = init_greptime_connections().await; let Connections { mysql } = init_greptime_connections().await;
let mut rng = ChaChaRng::seed_from_u64(input.seed); let mut rng = ChaChaRng::seed_from_u64(input.seed);
let columns = rng.gen_range(2..30); let columns = rng.gen_range(2..30);

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -1,197 +0,0 @@
---
Feature Name: Json Datatype
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/4230
Date: 2024-8-6
Author: "Yuhan Wang <profsyb@gmail.com>"
---
# Summary
This RFC proposes a method for storing and querying JSON data in the database.
# Motivation
JSON is widely used across various scenarios. Direct support for writing and querying JSON can significantly enhance the database's flexibility.
# Details
## Storage and Query
GreptimeDB's type system is built on Arrow/DataFusion, where each data type in GreptimeDB corresponds to a data type in Arrow/DataFusion. The proposed JSON type will be implemented on top of the existing `Binary` type, leveraging the current `datatype::value::Value` and `datatype::vectors::BinaryVector` implementations, utilizing the JSONB format as the encoding of JSON data. JSON data is stored and processed similarly to binary data within the storage layer and query engine.
This approach brings problems when dealing with insertions and queries of JSON columns.
## Insertion
Users commonly write JSON data as strings. Thus we need to make conversions between string and JSONB. There are 2 ways to do this:
1. MySQL and PostgreSQL servers provide auto-conversions between strings and JSONB. When a string is inserted into a JSON column, the server will try to parse the string as JSON and convert it to JSONB. The non-JSON strings will be rejected.
2. A function `parse_json` is provided to convert string to JSONB. If the string is not a valid JSON string, the function will return an error.
For example, in MySQL client:
```SQL
CREATE TABLE IF NOT EXISTS test (
ts TIMESTAMP TIME INDEX,
a INT,
b JSON
);
INSERT INTO test VALUES(
0,
0,
'{
"name": "jHl2oDDnPc1i2OzlP5Y",
"timestamp": "2024-07-25T04:33:11.369386Z",
"attributes": { "event_attributes": 48.28667 }
}'
);
INSERT INTO test VALUES(
0,
0,
parse_json('{
"name": "jHl2oDDnPc1i2OzlP5Y",
"timestamp": "2024-07-25T04:33:11.369386Z",
"attributes": { "event_attributes": 48.28667 }
}')
);
```
Are both valid.
The dataflow of the insertion process is as follows:
```
Insert JSON strings directly through client:
Parse Insert
String(Serialized JSON)┌──────────┐Arrow Binary(JSONB)┌──────┐Arrow Binary(JSONB)
Client ---------------------->│ Server │------------------>│ Mito │------------------> Storage
└──────────┘ └──────┘
(Server identifies JSON type and performs auto-conversion)
Insert JSON strings through parse_json function:
Parse Insert
String(Serialized JSON)┌──────────┐String(Serialized JSON)┌─────┐Arrow Binary(JSONB)┌──────┐Arrow Binary(JSONB)
Client ---------------------->│ Server │---------------------->│ UDF │------------------>│ Mito │------------------> Storage
└──────────┘ └─────┘ └──────┘
(Conversion is performed by UDF inside Query Engine)
```
Servers identify JSON column through column schema and perform auto-conversions. But when using prepared statements and binding parameters, the corresponding cached plans in datafusion generated by prepared statements cannot identify JSON columns. Under this circumstance, the servers identify JSON columns through the given parameters and perform auto-conversions.
The following is an example of inserting JSON data through prepared statements:
```Rust
sqlx::query(
"create table test(ts timestamp time index, j json)",
)
.execute(&pool)
.await
.unwrap();
let json = serde_json::json!({
"code": 200,
"success": true,
"payload": {
"features": [
"serde",
"json"
],
"homepage": null
}
});
// Valid, can identify serde_json::Value as JSON type
sqlx::query("insert into test values($1, $2)")
.bind(i)
.bind(json)
.execute(&pool)
.await
.unwrap();
// Invalid, cannot identify String as JSON type
sqlx::query("insert into test values($1, $2)")
.bind(i)
.bind(json.to_string())
.execute(&pool)
.await
.unwrap();
```
## Query
Correspondingly, users prefer to display JSON data as strings. Thus we need to make conversions between JSON data and strings before presenting JSON data. There are also 2 ways to do this: auto-conversions on MySQL and PostgreSQL servers, and function `json_to_string`.
For example, in MySQL client:
```SQL
SELECT b FROM test;
SELECT json_to_string(b) FROM test;
```
Will both return the JSON as human-readable strings.
Specifically, to perform auto-conversions, we attach a message to JSON data in the `metadata` of `Field` in Arrow/Datafusion schema when scanning a JSON column. Frontend servers could identify JSON data and convert it to strings.
The dataflow of the query process is as follows:
```
Query directly through client:
Decode Scan
String(Serialized JSON)┌──────────┐Arrow Binary(JSONB)┌──────────────┐Arrow Binary(JSONB)
Client <----------------------│ Server │<------------------│ Query Engine │<----------------- Storage
└──────────┘ └──────────────┘
(Server identifies JSON type and performs auto-conversion based on column metadata)
Query through json_to_string function:
Scan & Decode
String(Serialized JSON)┌──────────┐String(Serialized JSON)┌──────────────┐Arrow Binary(JSONB)
Client <----------------------│ Server │<----------------------│ Query Engine │<----------------- Storage
└──────────┘ └──────────────┘
(Conversion is performed by UDF inside Query Engine)
```
However, if a function uses JSON type as its return type, the metadata method mentioned above is not applicable. Thus the functions of JSON type should specify the return type explicitly instead of returning a JSON type, such as `json_get_int` and `json_get_float` which return corresponding data of `INT` and `FLOAT` type respectively.
## Functions
Similar to the common JSON type, JSON data can be queried with functions.
For example:
```SQL
CREATE TABLE IF NOT EXISTS test (
ts TIMESTAMP TIME INDEX,
a INT,
b JSON
);
INSERT INTO test VALUES(
0,
0,
'{
"name": "jHl2oDDnPc1i2OzlP5Y",
"timestamp": "2024-07-25T04:33:11.369386Z",
"attributes": { "event_attributes": 48.28667 }
}'
);
SELECT json_get_string(b, 'name') FROM test;
+---------------------+
| b.name |
+---------------------+
| jHl2oDDnPc1i2OzlP5Y |
+---------------------+
SELECT json_get_float(b, 'attributes.event_attributes') FROM test;
+--------------------------------+
| b.attributes.event_attributes |
+--------------------------------+
| 48.28667 |
+--------------------------------+
```
And more functions can be added in the future.
# Drawbacks
As a general purpose JSON data type, JSONB may not be as efficient as specialized data types for specific scenarios.
The auto-conversion mechanism is not supported in all scenarios. We need to find workarounds for these scenarios.
# Alternatives
Extract and flatten JSON schema to store in a structured format through pipeline. For nested data, we can provide nested types like `STRUCT` or `ARRAY`.

527
docs/schema-structs.md Normal file
View File

@@ -0,0 +1,527 @@
# Schema Structs
# Common Schemas
The `datatypes` crate defines the elementary schema struct to describe the metadata.
## ColumnSchema
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
```rust
pub struct ColumnSchema {
pub name: String,
pub data_type: ConcreteDataType,
is_nullable: bool,
is_time_index: bool,
default_constraint: Option<ColumnDefaultConstraint>,
metadata: Metadata,
}
```
## Schema
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
```rust
use arrow::datatypes::Schema as ArrowSchema;
pub struct Schema {
column_schemas: Vec<ColumnSchema>,
name_to_index: HashMap<String, usize>,
arrow_schema: Arc<ArrowSchema>,
timestamp_index: Option<usize>,
version: u32,
}
pub type SchemaRef = Arc<Schema>;
```
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
## RawSchema
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
```rust
pub struct RawSchema {
pub column_schemas: Vec<ColumnSchema>,
pub timestamp_index: Option<usize>,
pub version: u32,
}
```
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
# Schema of the Table
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
```rust
pub struct TableMeta {
pub schema: SchemaRef,
pub primary_key_indices: Vec<usize>,
pub value_indices: Vec<usize>,
// ...
}
```
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
Suppose we create a table with the following SQL
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito;
```
Then the table's `TableMeta` may look like this:
```json
{
"schema":{
"column_schemas":[
"ts",
"host",
"usage_user",
"usage_system",
"datacenter"
],
"time_index":0,
"version":0
},
"primary_key_indices":[
4,
1
],
"value_indices":[
2,
3
]
}
```
# Schemas of the storage engine
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
The storage engine maintains schemas of regions in more complicated ways because it
- adds internal columns that are invisible to users to store additional metadata for each row
- provides a data model similar to the key-value model so it organizes columns in a different order
- maintains additional metadata like column id or column family
So the storage engine defines several schema structs:
- RegionSchema
- StoreSchema
- ProjectedSchema
## RegionSchema
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
```rust
pub struct RegionSchema {
user_schema: SchemaRef,
store_schema: StoreSchemaRef,
columns: ColumnsMetadataRef,
}
```
Each region reserves some columns called `internal columns` for internal usage:
- `__sequence`, sequence number of a row
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
- User schema, a `Schema` struct that doesn't have internal columns
- Store schema, a `StoreSchema` struct that has internal columns
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
`RegionSchema` organizes columns in the following order:
```
key columns, timestamp, [__version,] value columns, __sequence, __op_type
```
We can ignore the `__version` column because it is disabled now:
```
key columns, timestamp, value columns, __sequence, __op_type
```
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
So the `RegionSchema` of our `cpu` table above looks like this:
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
"store_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
]
}
```
## StoreSchema
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
```rust
struct StoreSchema {
columns: Vec<ColumnMetadata>,
schema: SchemaRef,
row_key_end: usize,
user_column_end: usize,
}
```
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
The `StoreSchema` of the region above is similar to this:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
}
```
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
```rust
impl StoreSchema {
#[inline]
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
0..self.row_key_end
}
#[inline]
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
self.row_key_end..self.user_column_end
}
}
```
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
## ProjectedSchema
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
```rust
pub struct ProjectedSchema {
projection: Option<Projection>,
schema_to_read: StoreSchemaRef,
projected_user_schema: SchemaRef,
}
```
We need to handle many cases while doing projection:
- The columns' order of table and region is different
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
- We support `ALTER TABLE` so data files may have different schemas.
### Projection
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
```sql
CREATE TABLE cpu (
ts TIMESTAMP,
host STRING,
usage_user DOUBLE,
usage_system DOUBLE,
datacenter STRING,
TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito;
select ts, usage_system from cpu;
```
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
```json
{
"user_schema":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system"
],
}
```
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
So we can construct the following `ProjectedSchema`:
```json
{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":4
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system"
],
"time_index":0
}
}
```
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
### ReadAdapter
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
```rust
#[derive(Debug)]
pub struct ReadAdapter {
source_schema: StoreSchemaRef,
dest_schema: ProjectedSchemaRef,
indices_in_result: Vec<Option<usize>>,
is_source_needed: Vec<bool>,
}
```
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
Suppose we add a new column `usage_idle` to the table `cpu`.
```sql
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
```
The new `StoreSchema` becomes:
```json
{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":6
}
```
Note that we bump the version of the schema to 1.
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
```json
{
"source_schema":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_user",
"usage_system",
"__sequence",
"__op_type"
],
"time_index":2,
"version":0
},
"row_key_end":3,
"user_column_end":5
},
"dest_schema":{
"schema_to_read":{
"schema":{
"column_schemas":[
"datacenter",
"host",
"ts",
"usage_system",
"usage_idle",
"__sequence",
"__op_type"
],
"time_index":2,
"version":1
},
"row_key_end":3,
"user_column_end":5
},
"projected_user_schema":{
"column_schemas":[
"ts",
"usage_system",
"usage_idle"
],
"time_index":0
}
},
"indices_in_result":[
0,
1,
2,
3,
null,
4,
5
],
"is_source_needed":[
true,
true,
true,
false,
true,
true,
true
]
}
```
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
```text
┌──────────────────────────────┐
│ │
│ ┌────────────────────┐ │
│ │ store_schema │ │
│ │ │ │
│ │ StoreSchema │ │
│ │ version 1 │ │
│ └────────────────────┘ │
│ │
│ ┌────────────────────┐ │
│ │ user_schema │ │
│ └────────────────────┘ │
│ │
│ RegionSchema │
│ │
└──────────────┬───────────────┘
┌──────────────▼───────────────┐
│ │
│ ┌──────────────────────────┐ │
│ │ schema_to_read │ │
│ │ │ │
│ │ StoreSchema (projected) │ │
│ │ version 1 │ │
│ └──────────────────────────┘ │
┌───┤ ├───┐
│ │ ┌──────────────────────────┐ │ │
│ │ │ projected_user_schema │ │ │
│ │ └──────────────────────────┘ │ │
│ │ │ │
│ │ ProjectedSchema │ │
dest schema │ └──────────────────────────────┘ │ dest schema
│ │
│ │
┌──────▼───────┐ ┌───────▼──────┐
│ │ │ │
│ ReadAdapter │ │ ReadAdapter │
│ │ │ │
└──────▲───────┘ └───────▲──────┘
│ │
│ │
source schema │ │ source schema
│ │
┌───────┴─────────┐ ┌────────┴────────┐
│ │ │ │
│ ┌─────────────┐ │ │ ┌─────────────┐ │
│ │ │ │ │ │ │ │
│ │ StoreSchema │ │ │ │ StoreSchema │ │
│ │ │ │ │ │ │ │
│ │ version 0 │ │ │ │ version 1 │ │
│ │ │ │ │ │ │ │
│ └─────────────┘ │ │ └─────────────┘ │
│ │ │ │
│ SST 0 │ │ SST 1 │
│ │ │ │
└─────────────────┘ └─────────────────┘
```
# Conversion
This figure shows the conversion between schemas:
```text
┌─────────────┐ schema From ┌─────────────┐
│ ├──────────────────┐ ┌────────────────────────────► │
│ TableMeta │ │ │ │ RawSchema │
│ │ │ │ ┌─────────────────────────┤ │
└─────────────┘ │ │ │ TryFrom └─────────────┘
│ │ │
│ │ │
│ │ │
│ │ │
│ │ │
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
│ │ │ ├─────────────────────► │
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
│ │ │ │ ◄─────────────────────┤ │ │
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
│ │ │ │ │ │
│ │ │ │ └────────────────────────────────────────┐ │
│ │ │ │ │ │
│ columns │ user_schema() │ │ │
│ │ │ │ projected_user_schema() schema() │
│ │ │ │ │ │
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
│ │ RegionSchema │ │ ProjectedSchema │ │ │
│ │ ├─────────────────────────► │ │ │
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
│ │ └─────────────────────────────────────────► │ │
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
│ ◄──────────────────────────────────────────────┤ │
└─────────────────────────┘ columns └───────────────┘
```

View File

@@ -25,7 +25,7 @@ Please ensure the following configuration before importing the dashboard into Gr
__1. Prometheus scrape config__ __1. Prometheus scrape config__
Configure Prometheus to scrape the cluster. Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
```yml ```yml
# example config # example config
@@ -34,15 +34,27 @@ Configure Prometheus to scrape the cluster.
scrape_configs: scrape_configs:
- job_name: metasrv - job_name: metasrv
static_configs: static_configs:
- targets: ['<metasrv-ip>:<port>'] - targets: ['<ip>:<port>']
labels:
greptime_pod: metasrv
- job_name: datanode - job_name: datanode
static_configs: static_configs:
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>'] - targets: ['<ip>:<port>']
labels:
greptime_pod: datanode1
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode2
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode3
- job_name: frontend - job_name: frontend
static_configs: static_configs:
- targets: ['<frontend-ip>:<port>'] - targets: ['<ip>:<port>']
labels:
greptime_pod: frontend
``` ```
__2. Grafana config__ __2. Grafana config__
@@ -51,4 +63,4 @@ Create a Prometheus data source in Grafana before using this dashboard. We use `
### Usage ### Usage
Use `datasource` or `instance` on the upper-left corner to filter data from certain node. Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,2 @@
[toolchain] [toolchain]
channel = "nightly-2024-10-19" channel = "nightly-2024-04-20"

View File

@@ -1,42 +0,0 @@
#!/usr/bin/env bash
set -e
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
function check_rust_toolchain_version() {
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
exit 1
fi
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
if [ -z "$CURRENT_VERSION" ]; then
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
exit 1
fi
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
exit 1
fi
# Compare the version and the difference should be less than 1 day.
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
if [ $date_diff -gt 1 ]; then
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
exit 1
fi
}
check_rust_toolchain_version

View File

@@ -1,69 +0,0 @@
# Copyright 2023 Greptime Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
def find_rust_files(directory):
error_files = []
other_rust_files = []
for root, _, files in os.walk(directory):
for file in files:
if file == "error.rs":
error_files.append(os.path.join(root, file))
elif file.endswith(".rs"):
other_rust_files.append(os.path.join(root, file))
return error_files, other_rust_files
def extract_branch_names(file_content):
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu"
for rust_file in rust_files:
with open(rust_file, "r") as file:
content = file.read()
if branch_name_snafu in content:
return True
return False
def main():
error_files, other_rust_files = find_rust_files(".")
branch_names = []
for error_file in error_files:
with open(error_file, "r") as file:
content = file.read()
branch_names.extend(extract_branch_names(content))
unused_snafu = [
branch_name
for branch_name in branch_names
if not check_snafu_in_files(branch_name, other_rust_files)
]
for name in unused_snafu:
print(name)
if unused_snafu:
raise SystemExit(1)
if __name__ == "__main__":
main()

View File

@@ -1,21 +1,15 @@
#!/usr/bin/env bash #!/bin/sh
set -ue set -ue
OS_TYPE= OS_TYPE=
ARCH_TYPE= ARCH_TYPE=
# Set the GitHub token to avoid GitHub API rate limit.
# You can run with `GITHUB_TOKEN`:
# GITHUB_TOKEN=<your_token> ./scripts/install.sh
GITHUB_TOKEN=${GITHUB_TOKEN:-}
VERSION=${1:-latest} VERSION=${1:-latest}
GITHUB_ORG=GreptimeTeam GITHUB_ORG=GreptimeTeam
GITHUB_REPO=greptimedb GITHUB_REPO=greptimedb
BIN=greptime BIN=greptime
function get_os_type() { get_os_type() {
os_type="$(uname -s)" os_type="$(uname -s)"
case "$os_type" in case "$os_type" in
@@ -31,7 +25,7 @@ function get_os_type() {
esac esac
} }
function get_arch_type() { get_arch_type() {
arch_type="$(uname -m)" arch_type="$(uname -m)"
case "$arch_type" in case "$arch_type" in
@@ -53,19 +47,15 @@ function get_arch_type() {
esac esac
} }
function download_artifact() { get_os_type
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then get_arch_type
# Use the latest stable released version.
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release. if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
# Use the latest nightly version.
if [ "${VERSION}" = "latest" ]; then if [ "${VERSION}" = "latest" ]; then
# To avoid other tools dependency, we choose to use `curl` to get the version metadata and parsed by `sed`. VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
VERSION=$(curl -sL \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${GITHUB_TOKEN:+-H "Authorization: Bearer $GITHUB_TOKEN"} \
"https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest" | sed -n 's/.*"tag_name": "\([^"]*\)".*/\1/p')
if [ -z "${VERSION}" ]; then if [ -z "${VERSION}" ]; then
echo "Failed to get the latest stable released version." echo "Failed to get the latest version."
exit 1 exit 1
fi fi
fi fi
@@ -83,9 +73,4 @@ function download_artifact() {
rm -r "${PACKAGE_NAME%.tar.gz}" && \ rm -r "${PACKAGE_NAME%.tar.gz}" && \
echo "Run './${BIN} --help' to get started" echo "Run './${BIN} --help' to get started"
fi fi
fi fi
}
get_os_type
get_arch_type
download_artifact

View File

@@ -17,11 +17,10 @@ datatypes.workspace = true
greptime-proto.workspace = true greptime-proto.workspace = true
paste = "1.0" paste = "1.0"
prost.workspace = true prost.workspace = true
serde_json.workspace = true
snafu.workspace = true snafu.workspace = true
[build-dependencies] [build-dependencies]
tonic-build = "0.11" tonic-build = "0.9"
[dev-dependencies] [dev-dependencies]
paste = "1.0" paste = "1.0"

View File

@@ -58,23 +58,13 @@ pub enum Error {
location: Location, location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
#[snafu(display("Failed to serialize JSON"))]
SerializeJson {
#[snafu(source)]
error: serde_json::Error,
#[snafu(implicit)]
location: Location,
},
} }
impl ErrorExt for Error { impl ErrorExt for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self { match self {
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments, Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => { Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
StatusCode::Unexpected
}
Error::ConvertColumnDefaultConstraint { source, .. } Error::ConvertColumnDefaultConstraint { source, .. }
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(), | Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
} }

View File

@@ -17,11 +17,10 @@ use std::sync::Arc;
use common_base::BitVec; use common_base::BitVec;
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION}; use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
use common_decimal::Decimal128; use common_decimal::Decimal128;
use common_time::interval::IntervalUnit;
use common_time::time::Time; use common_time::time::Time;
use common_time::timestamp::TimeUnit; use common_time::timestamp::TimeUnit;
use common_time::{ use common_time::{Date, DateTime, Interval, Timestamp};
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
};
use datatypes::prelude::{ConcreteDataType, ValueRef}; use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::scalars::ScalarVector; use datatypes::scalars::ScalarVector;
use datatypes::types::{ use datatypes::types::{
@@ -43,8 +42,7 @@ use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query; use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData; use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{ use greptime_proto::v1::{
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension, QueryRequest, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
Row, SemanticType,
}; };
use paste::paste; use paste::paste;
use snafu::prelude::*; use snafu::prelude::*;
@@ -105,18 +103,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::Uint64 => ConcreteDataType::uint64_datatype(), ColumnDataType::Uint64 => ConcreteDataType::uint64_datatype(),
ColumnDataType::Float32 => ConcreteDataType::float32_datatype(), ColumnDataType::Float32 => ConcreteDataType::float32_datatype(),
ColumnDataType::Float64 => ConcreteDataType::float64_datatype(), ColumnDataType::Float64 => ConcreteDataType::float64_datatype(),
ColumnDataType::Binary => { ColumnDataType::Binary => ConcreteDataType::binary_datatype(),
if let Some(TypeExt::JsonType(_)) = datatype_wrapper
.datatype_ext
.as_ref()
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
{
ConcreteDataType::json_datatype()
} else {
ConcreteDataType::binary_datatype()
}
}
ColumnDataType::Json => ConcreteDataType::json_datatype(),
ColumnDataType::String => ConcreteDataType::string_datatype(), ColumnDataType::String => ConcreteDataType::string_datatype(),
ColumnDataType::Date => ConcreteDataType::date_datatype(), ColumnDataType::Date => ConcreteDataType::date_datatype(),
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(), ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
@@ -249,7 +236,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64, ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
ConcreteDataType::Float32(_) => ColumnDataType::Float32, ConcreteDataType::Float32(_) => ColumnDataType::Float32,
ConcreteDataType::Float64(_) => ColumnDataType::Float64, ConcreteDataType::Float64(_) => ColumnDataType::Float64,
ConcreteDataType::Binary(_) | ConcreteDataType::Json(_) => ColumnDataType::Binary, ConcreteDataType::Binary(_) => ColumnDataType::Binary,
ConcreteDataType::String(_) => ColumnDataType::String, ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date, ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime, ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
@@ -289,16 +276,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
})), })),
}) })
} }
ColumnDataType::Binary => {
if datatype == ConcreteDataType::json_datatype() {
// Json is the same as binary in proto. The extension marks the binary in proto is actually a json.
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
})
} else {
None
}
}
_ => None, _ => None,
}; };
Ok(Self { Ok(Self {
@@ -418,10 +395,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
decimal128_values: Vec::with_capacity(capacity), decimal128_values: Vec::with_capacity(capacity),
..Default::default() ..Default::default()
}, },
ColumnDataType::Json => Values {
string_values: Vec::with_capacity(capacity),
..Default::default()
},
} }
} }
@@ -462,11 +435,13 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()), TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()), TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
}, },
Value::IntervalYearMonth(val) => values.interval_year_month_values.push(val.to_i32()), Value::Interval(val) => match val.unit() {
Value::IntervalDayTime(val) => values.interval_day_time_values.push(val.to_i64()), IntervalUnit::YearMonth => values.interval_year_month_values.push(val.to_i32()),
Value::IntervalMonthDayNano(val) => values IntervalUnit::DayTime => values.interval_day_time_values.push(val.to_i64()),
IntervalUnit::MonthDayNano => values
.interval_month_day_nano_values .interval_month_day_nano_values
.push(convert_month_day_nano_to_pb(val)), .push(convert_i128_to_interval(val.to_i128())),
},
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)), Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
Value::List(_) | Value::Duration(_) => unreachable!(), Value::List(_) | Value::Duration(_) => unreachable!(),
}); });
@@ -511,12 +486,14 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
} }
} }
/// Converts an interval to google protobuf type [IntervalMonthDayNano]. /// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
pub fn convert_month_day_nano_to_pb(v: IntervalMonthDayNano) -> v1::IntervalMonthDayNano { pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
let interval = Interval::from_i128(v);
let (months, days, nanoseconds) = interval.to_month_day_nano();
v1::IntervalMonthDayNano { v1::IntervalMonthDayNano {
months: v.months, months,
days: v.days, days,
nanoseconds: v.nanoseconds, nanoseconds,
} }
} }
@@ -564,15 +541,11 @@ pub fn pb_value_to_value_ref<'a>(
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)), ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)), ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)), ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
ValueData::IntervalYearMonthValue(v) => { ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
ValueRef::IntervalYearMonth(IntervalYearMonth::from_i32(*v)) ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
}
ValueData::IntervalDayTimeValue(v) => {
ValueRef::IntervalDayTime(IntervalDayTime::from_i64(*v))
}
ValueData::IntervalMonthDayNanoValue(v) => { ValueData::IntervalMonthDayNanoValue(v) => {
let interval = IntervalMonthDayNano::new(v.months, v.days, v.nanoseconds); let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
ValueRef::IntervalMonthDayNano(interval) ValueRef::Interval(interval)
} }
ValueData::Decimal128Value(v) => { ValueData::Decimal128Value(v) => {
// get precision and scale from datatype_extension // get precision and scale from datatype_extension
@@ -663,7 +636,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
IntervalType::MonthDayNano(_) => { IntervalType::MonthDayNano(_) => {
Arc::new(IntervalMonthDayNanoVector::from_iter_values( Arc::new(IntervalMonthDayNanoVector::from_iter_values(
values.interval_month_day_nano_values.iter().map(|x| { values.interval_month_day_nano_values.iter().map(|x| {
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128() Interval::from_month_day_nano(x.months, x.days, x.nanoseconds).to_i128()
}), }),
)) ))
} }
@@ -676,8 +649,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
ConcreteDataType::Null(_) ConcreteDataType::Null(_)
| ConcreteDataType::List(_) | ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) | ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) | ConcreteDataType::Duration(_) => {
| ConcreteDataType::Json(_) => {
unreachable!() unreachable!()
} }
} }
@@ -808,18 +780,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
.interval_year_month_values .interval_year_month_values
.into_iter() .into_iter()
.map(|v| Value::IntervalYearMonth(IntervalYearMonth::from_i32(v))) .map(|v| Value::Interval(Interval::from_i32(v)))
.collect(), .collect(),
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
.interval_day_time_values .interval_day_time_values
.into_iter() .into_iter()
.map(|v| Value::IntervalDayTime(IntervalDayTime::from_i64(v))) .map(|v| Value::Interval(Interval::from_i64(v)))
.collect(), .collect(),
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
.interval_month_day_nano_values .interval_month_day_nano_values
.into_iter() .into_iter()
.map(|v| { .map(|v| {
Value::IntervalMonthDayNano(IntervalMonthDayNano::new( Value::Interval(Interval::from_month_day_nano(
v.months, v.months,
v.days, v.days,
v.nanoseconds, v.nanoseconds,
@@ -841,8 +813,7 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
ConcreteDataType::Null(_) ConcreteDataType::Null(_)
| ConcreteDataType::List(_) | ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) | ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) | ConcreteDataType::Duration(_) => {
| ConcreteDataType::Json(_) => {
unreachable!() unreachable!()
} }
} }
@@ -860,13 +831,7 @@ pub fn is_column_type_value_eq(
expect_type: &ConcreteDataType, expect_type: &ConcreteDataType,
) -> bool { ) -> bool {
ColumnDataTypeWrapper::try_new(type_value, type_extension) ColumnDataTypeWrapper::try_new(type_value, type_extension)
.map(|wrapper| { .map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
let datatype = ConcreteDataType::from(wrapper);
(datatype == *expect_type)
// Json type leverage binary type in pb, so this is valid.
|| (datatype == ConcreteDataType::binary_datatype()
&& *expect_type == ConcreteDataType::json_datatype())
})
.unwrap_or(false) .unwrap_or(false)
} }
@@ -947,17 +912,19 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
value_data: Some(ValueData::TimeNanosecondValue(v.value())), value_data: Some(ValueData::TimeNanosecondValue(v.value())),
}, },
}, },
Value::IntervalYearMonth(v) => v1::Value { Value::Interval(v) => match v.unit() {
IntervalUnit::YearMonth => v1::Value {
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())), value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
}, },
Value::IntervalDayTime(v) => v1::Value { IntervalUnit::DayTime => v1::Value {
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())), value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
}, },
Value::IntervalMonthDayNano(v) => v1::Value { IntervalUnit::MonthDayNano => v1::Value {
value_data: Some(ValueData::IntervalMonthDayNanoValue( value_data: Some(ValueData::IntervalMonthDayNanoValue(
convert_month_day_nano_to_pb(v), convert_i128_to_interval(v.to_i128()),
)), )),
}, },
},
Value::Decimal128(v) => v1::Value { Value::Decimal128(v) => v1::Value {
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))), value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
}, },
@@ -1048,11 +1015,13 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()), TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()), TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
}), }),
Value::IntervalYearMonth(v) => Some(ValueData::IntervalYearMonthValue(v.to_i32())), Value::Interval(v) => Some(match v.unit() {
Value::IntervalDayTime(v) => Some(ValueData::IntervalDayTimeValue(v.to_i64())), IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
Value::IntervalMonthDayNano(v) => Some(ValueData::IntervalMonthDayNanoValue( IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
convert_month_day_nano_to_pb(v), IntervalUnit::MonthDayNano => {
)), ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
}
}),
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))), Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
Value::List(_) | Value::Duration(_) => unreachable!(), Value::List(_) | Value::Duration(_) => unreachable!(),
}, },
@@ -1063,7 +1032,6 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use common_time::interval::IntervalUnit;
use datatypes::types::{ use datatypes::types::{
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType, TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
@@ -1509,11 +1477,11 @@ mod tests {
#[test] #[test]
fn test_convert_i128_to_interval() { fn test_convert_i128_to_interval() {
let i128_val = 3; let i128_val = 3000;
let interval = convert_month_day_nano_to_pb(IntervalMonthDayNano::from_i128(i128_val)); let interval = convert_i128_to_interval(i128_val);
assert_eq!(interval.months, 0); assert_eq!(interval.months, 0);
assert_eq!(interval.days, 0); assert_eq!(interval.days, 0);
assert_eq!(interval.nanoseconds, 3); assert_eq!(interval.nanoseconds, 3000);
} }
#[test] #[test]
@@ -1593,9 +1561,9 @@ mod tests {
}, },
); );
let expect = vec![ let expect = vec![
Value::IntervalYearMonth(IntervalYearMonth::new(1_i32)), Value::Interval(Interval::from_year_month(1_i32)),
Value::IntervalYearMonth(IntervalYearMonth::new(2_i32)), Value::Interval(Interval::from_year_month(2_i32)),
Value::IntervalYearMonth(IntervalYearMonth::new(3_i32)), Value::Interval(Interval::from_year_month(3_i32)),
]; ];
assert_eq!(expect, actual); assert_eq!(expect, actual);
@@ -1608,9 +1576,9 @@ mod tests {
}, },
); );
let expect = vec![ let expect = vec![
Value::IntervalDayTime(IntervalDayTime::from_i64(1_i64)), Value::Interval(Interval::from_i64(1_i64)),
Value::IntervalDayTime(IntervalDayTime::from_i64(2_i64)), Value::Interval(Interval::from_i64(2_i64)),
Value::IntervalDayTime(IntervalDayTime::from_i64(3_i64)), Value::Interval(Interval::from_i64(3_i64)),
]; ];
assert_eq!(expect, actual); assert_eq!(expect, actual);
@@ -1639,9 +1607,9 @@ mod tests {
}, },
); );
let expect = vec![ let expect = vec![
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 2, 3)), Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(5, 6, 7)), Value::Interval(Interval::from_month_day_nano(5, 6, 7)),
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(9, 10, 11)), Value::Interval(Interval::from_month_day_nano(9, 10, 11)),
]; ];
assert_eq!(expect, actual); assert_eq!(expect, actual);
} }
@@ -1875,7 +1843,6 @@ mod tests {
null_mask: vec![2], null_mask: vec![2],
datatype: ColumnDataType::Boolean as i32, datatype: ColumnDataType::Boolean as i32,
datatype_extension: None, datatype_extension: None,
options: None,
}; };
assert!(is_column_type_value_eq( assert!(is_column_type_value_eq(
column1.datatype, column1.datatype,

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#![feature(let_chains)]
pub mod error; pub mod error;
pub mod helper; pub mod helper;

View File

@@ -21,14 +21,14 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
#[derive(Debug)] #[derive(Debug)]
pub struct RegionResponse { pub struct RegionResponse {
pub affected_rows: AffectedRows, pub affected_rows: AffectedRows,
pub extensions: HashMap<String, Vec<u8>>, pub extension: HashMap<String, Vec<u8>>,
} }
impl RegionResponse { impl RegionResponse {
pub fn from_region_response(region_response: RegionResponseV1) -> Self { pub fn from_region_response(region_response: RegionResponseV1) -> Self {
Self { Self {
affected_rows: region_response.affected_rows as _, affected_rows: region_response.affected_rows as _,
extensions: region_response.extensions, extension: region_response.extension,
} }
} }
@@ -36,7 +36,7 @@ impl RegionResponse {
pub fn new(affected_rows: AffectedRows) -> Self { pub fn new(affected_rows: AffectedRows) -> Self {
Self { Self {
affected_rows, affected_rows,
extensions: Default::default(), extension: Default::default(),
} }
} }
} }

View File

@@ -14,19 +14,13 @@
use std::collections::HashMap; use std::collections::HashMap;
use datatypes::schema::{ use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
};
use snafu::ResultExt; use snafu::ResultExt;
use crate::error::{self, Result}; use crate::error::{self, Result};
use crate::helper::ColumnDataTypeWrapper; use crate::helper::ColumnDataTypeWrapper;
use crate::v1::{ColumnDef, ColumnOptions, SemanticType}; use crate::v1::ColumnDef;
/// Key used to store fulltext options in gRPC column options.
const FULLTEXT_GRPC_KEY: &str = "fulltext";
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> { pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
let data_type = ColumnDataTypeWrapper::try_new( let data_type = ColumnDataTypeWrapper::try_new(
column_def.data_type, column_def.data_type,
@@ -49,147 +43,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
if !column_def.comment.is_empty() { if !column_def.comment.is_empty() {
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone()); metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
} }
if let Some(options) = column_def.options.as_ref()
&& let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
{
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
}
Ok(
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable) ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
.with_metadata(metadata)
.with_time_index(column_def.semantic_type() == SemanticType::Timestamp)
.with_default_constraint(constraint) .with_default_constraint(constraint)
.context(error::InvalidColumnDefaultConstraintSnafu { .context(error::InvalidColumnDefaultConstraintSnafu {
column: &column_def.name, column: &column_def.name,
}) })?
} .with_metadata(metadata),
)
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
let mut options = ColumnOptions::default();
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
options
.options
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
}
(!options.options.is_empty()).then_some(options)
}
/// Checks if the `ColumnOptions` contains fulltext options.
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
options
.as_ref()
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
}
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<ColumnOptions>> {
let mut options = ColumnOptions::default();
let v = serde_json::to_string(fulltext).context(error::SerializeJsonSnafu)?;
options.options.insert(FULLTEXT_GRPC_KEY.to_string(), v);
Ok((!options.options.is_empty()).then_some(options))
}
#[cfg(test)]
mod tests {
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::FulltextAnalyzer;
use super::*;
use crate::v1::ColumnDataType;
#[test]
fn test_try_as_column_schema() {
let column_def = ColumnDef {
name: "test".to_string(),
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: ColumnDefaultConstraint::Value("test_default".into())
.try_into()
.unwrap(),
semantic_type: SemanticType::Field as i32,
comment: "test_comment".to_string(),
datatype_extension: None,
options: Some(ColumnOptions {
options: HashMap::from([(
FULLTEXT_GRPC_KEY.to_string(),
"{\"enable\":true}".to_string(),
)]),
}),
};
let schema = try_as_column_schema(&column_def).unwrap();
assert_eq!(schema.name, "test");
assert_eq!(schema.data_type, ConcreteDataType::string_datatype());
assert!(!schema.is_time_index());
assert!(schema.is_nullable());
assert_eq!(
schema.default_constraint().unwrap(),
&ColumnDefaultConstraint::Value("test_default".into())
);
assert_eq!(schema.metadata().get(COMMENT_KEY).unwrap(), "test_comment");
assert_eq!(
schema.fulltext_options().unwrap().unwrap(),
FulltextOptions {
enable: true,
..Default::default()
}
);
}
#[test]
fn test_options_from_column_schema() {
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true);
let options = options_from_column_schema(&schema);
assert!(options.is_none());
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
.with_fulltext_options(FulltextOptions {
enable: true,
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
})
.unwrap();
let options = options_from_column_schema(&schema).unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
);
}
#[test]
fn test_options_with_fulltext() {
let fulltext = FulltextOptions {
enable: true,
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
};
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
);
}
#[test]
fn test_contains_fulltext() {
let options = ColumnOptions {
options: HashMap::from([(
FULLTEXT_GRPC_KEY.to_string(),
"{\"enable\":true}".to_string(),
)]),
};
assert!(contains_fulltext(&Some(options)));
let options = ColumnOptions {
options: HashMap::new(),
};
assert!(!contains_fulltext(&Some(options)));
assert!(!contains_fulltext(&None));
}
} }

View File

@@ -75,16 +75,6 @@ pub enum Password<'a> {
PgMD5(HashedPassword<'a>, Salt<'a>), PgMD5(HashedPassword<'a>, Salt<'a>),
} }
impl Password<'_> {
pub fn r#type(&self) -> &str {
match self {
Password::PlainText(_) => "plain_text",
Password::MysqlNativePassword(_, _) => "mysql_native_password",
Password::PgMD5(_, _) => "pg_md5",
}
}
}
pub fn auth_mysql( pub fn auth_mysql(
auth_data: HashedPassword, auth_data: HashedPassword,
salt: Salt, salt: Salt,

View File

@@ -38,11 +38,10 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display("Authentication source failure"))] #[snafu(display("Auth failed"))]
AuthBackend { AuthBackend {
#[snafu(implicit)] #[snafu(implicit)]
location: Location, location: Location,
#[snafu(source)]
source: BoxedError, source: BoxedError,
}, },
@@ -88,8 +87,8 @@ impl ErrorExt for Error {
Error::IllegalParam { .. } => StatusCode::InvalidArguments, Error::IllegalParam { .. } => StatusCode::InvalidArguments,
Error::FileWatch { .. } => StatusCode::InvalidArguments, Error::FileWatch { .. } => StatusCode::InvalidArguments,
Error::InternalState { .. } => StatusCode::Unexpected, Error::InternalState { .. } => StatusCode::Unexpected,
Error::Io { .. } => StatusCode::StorageUnavailable, Error::Io { .. } => StatusCode::Internal,
Error::AuthBackend { source, .. } => source.status_code(), Error::AuthBackend { .. } => StatusCode::Internal,
Error::UserNotFound { .. } => StatusCode::UserNotFound, Error::UserNotFound { .. } => StatusCode::UserNotFound,
Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType, Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType,

View File

@@ -30,7 +30,6 @@ pub enum PermissionReq<'a> {
PromStoreWrite, PromStoreWrite,
PromStoreRead, PromStoreRead,
Otlp, Otlp,
LogWrite,
} }
#[derive(Debug)] #[derive(Debug)]
@@ -42,7 +41,7 @@ pub enum PermissionResp {
pub trait PermissionChecker: Send + Sync { pub trait PermissionChecker: Send + Sync {
fn check_permission( fn check_permission(
&self, &self,
user_info: UserInfoRef, user_info: Option<UserInfoRef>,
req: PermissionReq, req: PermissionReq,
) -> Result<PermissionResp>; ) -> Result<PermissionResp>;
} }
@@ -50,7 +49,7 @@ pub trait PermissionChecker: Send + Sync {
impl PermissionChecker for Option<&PermissionCheckerRef> { impl PermissionChecker for Option<&PermissionCheckerRef> {
fn check_permission( fn check_permission(
&self, &self,
user_info: UserInfoRef, user_info: Option<UserInfoRef>,
req: PermissionReq, req: PermissionReq,
) -> Result<PermissionResp> { ) -> Result<PermissionResp> {
match self { match self {

View File

@@ -13,11 +13,9 @@
// limitations under the License. // limitations under the License.
use common_base::secrets::ExposeSecret; use common_base::secrets::ExposeSecret;
use common_error::ext::BoxedError;
use snafu::{OptionExt, ResultExt};
use crate::error::{ use crate::error::{
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu, AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
UserPasswordMismatchSnafu, UserPasswordMismatchSnafu,
}; };
use crate::user_info::DefaultUserInfo; use crate::user_info::DefaultUserInfo;
@@ -51,19 +49,6 @@ impl MockUserProvider {
info.schema.clone_into(&mut self.schema); info.schema.clone_into(&mut self.schema);
info.username.clone_into(&mut self.username); info.username.clone_into(&mut self.username);
} }
// this is a deliberate function to ref AuthBackendSnafu
// so that it won't get deleted in the future
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
let none_option = None;
none_option
.context(UserNotFoundSnafu {
username: "no_user".to_string(),
})
.map_err(BoxedError::new)
.context(AuthBackendSnafu)
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]

View File

@@ -57,11 +57,6 @@ pub trait UserProvider: Send + Sync {
self.authorize(catalog, schema, &user_info).await?; self.authorize(catalog, schema, &user_info).await?;
Ok(user_info) Ok(user_info)
} }
/// Returns whether this user provider implementation is backed by an external system.
fn external(&self) -> bool {
false
}
} }
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> { fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {

View File

@@ -33,7 +33,7 @@ impl StaticUserProvider {
value: value.to_string(), value: value.to_string(),
msg: "StaticUserProviderOption must be in format `<option>:<value>`", msg: "StaticUserProviderOption must be in format `<option>:<value>`",
})?; })?;
match mode { return match mode {
"file" => { "file" => {
let users = load_credential_from_file(content)? let users = load_credential_from_file(content)?
.context(InvalidConfigSnafu { .context(InvalidConfigSnafu {
@@ -58,7 +58,7 @@ impl StaticUserProvider {
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`", msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
} }
.fail(), .fail(),
} };
} }
} }

View File

@@ -18,7 +18,6 @@ use std::sync::Arc;
use api::v1::greptime_request::Request; use api::v1::greptime_request::Request;
use auth::error::Error::InternalState; use auth::error::Error::InternalState;
use auth::error::InternalStateSnafu;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef}; use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
use sql::statements::show::{ShowDatabases, ShowKind}; use sql::statements::show::{ShowDatabases, ShowKind};
use sql::statements::statement::Statement; use sql::statements::statement::Statement;
@@ -28,16 +27,15 @@ struct DummyPermissionChecker;
impl PermissionChecker for DummyPermissionChecker { impl PermissionChecker for DummyPermissionChecker {
fn check_permission( fn check_permission(
&self, &self,
_user_info: UserInfoRef, _user_info: Option<UserInfoRef>,
req: PermissionReq, req: PermissionReq,
) -> auth::error::Result<PermissionResp> { ) -> auth::error::Result<PermissionResp> {
match req { match req {
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow), PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject), PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
_ => InternalStateSnafu { _ => Err(InternalState {
msg: "testing".to_string(), msg: "testing".to_string(),
} }),
.fail(),
} }
} }
} }
@@ -47,21 +45,17 @@ fn test_permission_checker() {
let checker: PermissionCheckerRef = Arc::new(DummyPermissionChecker); let checker: PermissionCheckerRef = Arc::new(DummyPermissionChecker);
let grpc_result = checker.check_permission( let grpc_result = checker.check_permission(
auth::userinfo_by_name(None), None,
PermissionReq::GrpcRequest(&Request::Query(Default::default())), PermissionReq::GrpcRequest(&Request::Query(Default::default())),
); );
assert_matches!(grpc_result, Ok(PermissionResp::Allow)); assert_matches!(grpc_result, Ok(PermissionResp::Allow));
let sql_result = checker.check_permission( let sql_result = checker.check_permission(
auth::userinfo_by_name(None), None,
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new( PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
ShowKind::All,
false,
))),
); );
assert_matches!(sql_result, Ok(PermissionResp::Reject)); assert_matches!(sql_result, Ok(PermissionResp::Reject));
let err_result = let err_result = checker.check_permission(None, PermissionReq::Opentsdb);
checker.check_permission(auth::userinfo_by_name(None), PermissionReq::Opentsdb);
assert_matches!(err_result, Err(InternalState { msg }) if msg == "testing"); assert_matches!(err_result, Err(InternalState { msg }) if msg == "testing");
} }

View File

@@ -34,7 +34,7 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error { impl ErrorExt for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self { match self {
Error::CacheRequired { .. } => StatusCode::Unexpected, Error::CacheRequired { .. } => StatusCode::Internal,
} }
} }

View File

@@ -22,10 +22,8 @@ common-config.workspace = true
common-error.workspace = true common-error.workspace = true
common-macro.workspace = true common-macro.workspace = true
common-meta.workspace = true common-meta.workspace = true
common-procedure.workspace = true
common-query.workspace = true common-query.workspace = true
common-recordbatch.workspace = true common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true common-telemetry.workspace = true
common-time.workspace = true common-time.workspace = true
common-version.workspace = true common-version.workspace = true
@@ -42,7 +40,6 @@ moka = { workspace = true, features = ["future", "sync"] }
partition.workspace = true partition.workspace = true
paste = "1.0" paste = "1.0"
prometheus.workspace = true prometheus.workspace = true
rustc-hash.workspace = true
serde_json.workspace = true serde_json.workspace = true
session.workspace = true session.workspace = true
snafu.workspace = true snafu.workspace = true
@@ -50,7 +47,6 @@ sql.workspace = true
store-api.workspace = true store-api.workspace = true
table.workspace = true table.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-stream = "0.1"
[dev-dependencies] [dev-dependencies]
cache.workspace = true cache.workspace = true

View File

@@ -18,7 +18,6 @@ use std::fmt::Debug;
use common_error::ext::{BoxedError, ErrorExt}; use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode; use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug; use common_macro::stack_trace_debug;
use common_query::error::datafusion_status_code;
use datafusion::error::DataFusionError; use datafusion::error::DataFusionError;
use snafu::{Location, Snafu}; use snafu::{Location, Snafu};
@@ -50,71 +49,13 @@ pub enum Error {
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to list nodes in cluster"))] #[snafu(display("Failed to list nodes in cluster: {source}"))]
ListNodes { ListNodes {
#[snafu(implicit)] #[snafu(implicit)]
location: Location, location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to region stats in cluster"))]
ListRegionStats {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list flows in catalog {catalog}"))]
ListFlows {
#[snafu(implicit)]
location: Location,
catalog: String,
source: BoxedError,
},
#[snafu(display("Flow info not found: {flow_name} in catalog {catalog_name}"))]
FlowInfoNotFound {
flow_name: String,
catalog_name: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Can't convert value to json, input={input}"))]
Json {
input: String,
#[snafu(source)]
error: serde_json::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to get information extension client"))]
GetInformationExtension {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to list procedures"))]
ListProcedures {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Procedure id not found"))]
ProcedureIdNotFound {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("convert proto data error"))]
ConvertProtoData {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to re-compile script due to internal error"))] #[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal { CompileScriptInternal {
#[snafu(implicit)] #[snafu(implicit)]
@@ -130,6 +71,13 @@ pub enum Error {
source: table::error::Error, source: table::error::Error,
}, },
#[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))] #[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound { CatalogNotFound {
catalog_name: String, catalog_name: String,
@@ -166,18 +114,6 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display(
"View plan columns changed from: {} to: {}",
origin_names,
actual_names
))]
ViewPlanColumnsChanged {
origin_names: String,
actual_names: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to find table partitions"))] #[snafu(display("Failed to find table partitions"))]
FindPartitions { source: partition::error::Error }, FindPartitions { source: partition::error::Error },
@@ -212,6 +148,13 @@ pub enum Error {
source: common_query::error::Error, source: common_query::error::Error,
}, },
#[snafu(display("Failed to perform metasrv operation"))]
Metasrv {
#[snafu(implicit)]
location: Location,
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog"))] #[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog { InvalidTableInfoInCatalog {
#[snafu(implicit)] #[snafu(implicit)]
@@ -230,14 +173,6 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display("Failed to project view columns"))]
ProjectViewColumns {
#[snafu(source)]
error: DataFusionError,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table metadata manager error"))] #[snafu(display("Table metadata manager error"))]
TableMetadataManager { TableMetadataManager {
source: common_meta::error::Error, source: common_meta::error::Error,
@@ -273,21 +208,6 @@ pub enum Error {
}, },
} }
impl Error {
pub fn should_fail(&self) -> bool {
use Error::*;
matches!(
self,
GetViewCache { .. }
| ViewInfoNotFound { .. }
| DecodePlan { .. }
| ViewPlanColumnsChanged { .. }
| ProjectViewColumns { .. }
)
}
}
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error { impl ErrorExt for Error {
@@ -298,16 +218,11 @@ impl ErrorExt for Error {
| Error::FindPartitions { .. } | Error::FindPartitions { .. }
| Error::FindRegionRoutes { .. } | Error::FindRegionRoutes { .. }
| Error::CacheNotFound { .. } | Error::CacheNotFound { .. }
| Error::CastManager { .. } | Error::CastManager { .. } => StatusCode::Unexpected,
| Error::Json { .. }
| Error::GetInformationExtension { .. }
| Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected,
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound, Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound, Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal, Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
@@ -317,14 +232,11 @@ impl ErrorExt for Error {
Error::ListCatalogs { source, .. } Error::ListCatalogs { source, .. }
| Error::ListNodes { source, .. } | Error::ListNodes { source, .. }
| Error::ListSchemas { source, .. } | Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } | Error::ListTables { source, .. } => source.status_code(),
| Error::ListFlows { source, .. }
| Error::ListProcedures { source, .. }
| Error::ListRegionStats { source, .. }
| Error::ConvertProtoData { source, .. } => source.status_code(),
Error::CreateTable { source, .. } => source.status_code(), Error::CreateTable { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(), Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(), Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
@@ -333,8 +245,7 @@ impl ErrorExt for Error {
} }
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied, Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None), Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::ProjectViewColumns { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(), Error::TableMetadataManager { source, .. } => source.status_code(),
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => { Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
source.status_code() source.status_code()
@@ -349,7 +260,7 @@ impl ErrorExt for Error {
impl From<Error> for DataFusionError { impl From<Error> for DataFusionError {
fn from(e: Error) -> Self { fn from(e: Error) -> Self {
DataFusionError::External(Box::new(e)) DataFusionError::Internal(e.to_string())
} }
} }
@@ -359,6 +270,27 @@ mod tests {
use super::*; use super::*;
#[test]
pub fn test_error_status_code() {
assert_eq!(
StatusCode::TableAlreadyExists,
Error::TableExists {
table: "some_table".to_string(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
msg: String::default(),
location: Location::generate(),
}
.status_code()
);
}
#[test] #[test]
pub fn test_errors_to_datafusion_error() { pub fn test_errors_to_datafusion_error() {
let e: DataFusionError = Error::TableExists { let e: DataFusionError = Error::TableExists {
@@ -367,7 +299,7 @@ mod tests {
} }
.into(); .into();
match e { match e {
DataFusionError::External(_) => {} DataFusionError::Internal(_) => {}
_ => { _ => {
panic!("catalog error should be converted to DataFusionError::Internal") panic!("catalog error should be converted to DataFusionError::Internal")
} }

View File

@@ -14,55 +14,50 @@
mod cluster_info; mod cluster_info;
pub mod columns; pub mod columns;
pub mod flows;
mod information_memory_table;
pub mod key_column_usage; pub mod key_column_usage;
mod memory_table;
mod partitions; mod partitions;
mod procedure_info; mod predicate;
mod region_peers; mod region_peers;
mod region_statistics;
mod runtime_metrics; mod runtime_metrics;
pub mod schemata; pub mod schemata;
mod table_constraints; mod table_constraints;
mod table_names; mod table_names;
pub mod tables; pub mod tables;
mod views; pub(crate) mod utils;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME}; use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
use common_error::ext::ErrorExt; use common_error::ext::BoxedError;
use common_meta::cluster::NodeInfo; use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_meta::datanode::RegionStat;
use common_meta::key::flow::FlowMetadataManager;
use common_procedure::ProcedureInfo;
use common_recordbatch::SendableRecordBatchStream;
use datatypes::schema::SchemaRef; use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use paste::paste; use paste::paste;
pub(crate) use predicate::Predicates;
use snafu::ResultExt;
use store_api::data_source::DataSource;
use store_api::storage::{ScanRequest, TableId}; use store_api::storage::{ScanRequest, TableId};
use table::metadata::TableType; use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::TableRef; use table::metadata::{
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
};
use table::{Table, TableRef};
pub use table_names::*; pub use table_names::*;
use views::InformationSchemaViews;
use self::columns::InformationSchemaColumns; use self::columns::InformationSchemaColumns;
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef}; use crate::error::Result;
use crate::error::{Error, Result}; use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo; use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::flows::InformationSchemaFlows; use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
use crate::system_schema::information_schema::information_memory_table::get_schema_columns; use crate::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage; use crate::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions; use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers; use crate::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics; use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata; use crate::information_schema::tables::InformationSchemaTables;
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::system_schema::information_schema::tables::InformationSchemaTables;
use crate::system_schema::memory_table::MemoryTable;
pub(crate) use crate::system_schema::predicate::Predicates;
use crate::system_schema::SystemSchemaProvider;
use crate::CatalogManager; use crate::CatalogManager;
lazy_static! { lazy_static! {
@@ -111,26 +106,107 @@ macro_rules! setup_memory_table {
pub struct InformationSchemaProvider { pub struct InformationSchemaProvider {
catalog_name: String, catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>,
tables: HashMap<String, TableRef>, tables: HashMap<String, TableRef>,
} }
impl SystemSchemaProvider for InformationSchemaProvider { impl InformationSchemaProvider {
fn tables(&self) -> &HashMap<String, TableRef> { pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
let mut provider = Self {
catalog_name,
catalog_manager,
tables: HashMap::new(),
};
provider.build_tables();
provider
}
/// Returns table names in the order of table id.
pub fn table_names(&self) -> Vec<String> {
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
tables.sort_by(|t1, t2| {
t1.table_info()
.table_id()
.partial_cmp(&t2.table_info().table_id())
.unwrap()
});
tables
.into_iter()
.map(|t| t.table_info().name.clone())
.collect()
}
/// Returns a map of [TableRef] in information schema.
pub fn tables(&self) -> &HashMap<String, TableRef> {
assert!(!self.tables.is_empty()); assert!(!self.tables.is_empty());
&self.tables &self.tables
} }
}
impl SystemSchemaProviderInner for InformationSchemaProvider { /// Returns the [TableRef] by table name.
fn catalog_name(&self) -> &str { pub fn table(&self, name: &str) -> Option<TableRef> {
&self.catalog_name self.tables.get(name).cloned()
}
fn schema_name() -> &'static str {
INFORMATION_SCHEMA_NAME
} }
fn system_table(&self, name: &str) -> Option<SystemTableRef> { fn build_tables(&mut self) {
let mut tables = HashMap::new();
// SECURITY NOTE:
// Carefully consider the tables that may expose sensitive cluster configurations,
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
tables.insert(
RUNTIME_METRICS.to_string(),
self.build_table(RUNTIME_METRICS).unwrap(),
);
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
);
tables.insert(
REGION_PEERS.to_string(),
self.build_table(REGION_PEERS).unwrap(),
);
tables.insert(
CLUSTER_INFO.to_string(),
self.build_table(CLUSTER_INFO).unwrap(),
);
}
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
tables.insert(
KEY_COLUMN_USAGE.to_string(),
self.build_table(KEY_COLUMN_USAGE).unwrap(),
);
tables.insert(
TABLE_CONSTRAINTS.to_string(),
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
// Add memory tables
for name in MEMORY_TABLES.iter() {
tables.insert((*name).to_string(), self.build_table(name).expect(name));
}
self.tables = tables;
}
fn build_table(&self, name: &str) -> Option<TableRef> {
self.information_table(name).map(|table| {
let table_info = Self::table_info(self.catalog_name.clone(), &table);
let filter_pushdown = FilterPushDownType::Inexact;
let data_source = Arc::new(InformationTableDataSource::new(table));
let table = Table::new(table_info, filter_pushdown, data_source);
Arc::new(table)
})
}
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
match name.to_ascii_lowercase().as_str() { match name.to_ascii_lowercase().as_str() {
TABLES => Some(Arc::new(InformationSchemaTables::new( TABLES => Some(Arc::new(InformationSchemaTables::new(
self.catalog_name.clone(), self.catalog_name.clone(),
@@ -186,100 +262,27 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new( CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
self.catalog_manager.clone(), self.catalog_manager.clone(),
)) as _), )) as _),
VIEWS => Some(Arc::new(InformationSchemaViews::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
self.catalog_name.clone(),
self.flow_metadata_manager.clone(),
)) as _),
PROCEDURE_INFO => Some(
Arc::new(procedure_info::InformationSchemaProcedureInfo::new(
self.catalog_manager.clone(),
)) as _,
),
REGION_STATISTICS => Some(Arc::new(
region_statistics::InformationSchemaRegionStatistics::new(
self.catalog_manager.clone(),
),
) as _),
_ => None, _ => None,
} }
} }
}
impl InformationSchemaProvider { fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
pub fn new( let table_meta = TableMetaBuilder::default()
catalog_name: String, .schema(table.schema())
catalog_manager: Weak<dyn CatalogManager>, .primary_key_indices(vec![])
flow_metadata_manager: Arc<FlowMetadataManager>, .next_column_id(0)
) -> Self { .build()
let mut provider = Self { .unwrap();
catalog_name, let table_info = TableInfoBuilder::default()
catalog_manager, .table_id(table.table_id())
flow_metadata_manager, .name(table.table_name().to_string())
tables: HashMap::new(), .catalog_name(catalog_name)
}; .schema_name(INFORMATION_SCHEMA_NAME.to_string())
.meta(table_meta)
provider.build_tables(); .table_type(table.table_type())
.build()
provider .unwrap();
} Arc::new(table_info)
fn build_tables(&mut self) {
let mut tables = HashMap::new();
// SECURITY NOTE:
// Carefully consider the tables that may expose sensitive cluster configurations,
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
tables.insert(
RUNTIME_METRICS.to_string(),
self.build_table(RUNTIME_METRICS).unwrap(),
);
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
);
tables.insert(
REGION_PEERS.to_string(),
self.build_table(REGION_PEERS).unwrap(),
);
tables.insert(
CLUSTER_INFO.to_string(),
self.build_table(CLUSTER_INFO).unwrap(),
);
tables.insert(
PROCEDURE_INFO.to_string(),
self.build_table(PROCEDURE_INFO).unwrap(),
);
tables.insert(
REGION_STATISTICS.to_string(),
self.build_table(REGION_STATISTICS).unwrap(),
);
}
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
tables.insert(
KEY_COLUMN_USAGE.to_string(),
self.build_table(KEY_COLUMN_USAGE).unwrap(),
);
tables.insert(
TABLE_CONSTRAINTS.to_string(),
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
// Add memory tables
for name in MEMORY_TABLES.iter() {
tables.insert((*name).to_string(), self.build_table(name).expect(name));
}
self.tables = tables;
} }
} }
@@ -297,64 +300,57 @@ trait InformationTable {
} }
} }
// Provide compatibility for legacy `information_schema` code. type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
impl<T> SystemTable for T
where struct InformationTableDataSource {
T: InformationTable, table: InformationTableRef,
{ }
fn table_id(&self) -> TableId {
InformationTable::table_id(self) impl InformationTableDataSource {
fn new(table: InformationTableRef) -> Self {
Self { table }
} }
fn table_name(&self) -> &'static str { fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
InformationTable::table_name(self) let schema = self
} .table
.schema()
fn schema(&self) -> SchemaRef { .try_project(projection)
InformationTable::schema(self) .context(SchemaConversionSnafu)
} .map_err(BoxedError::new)?;
Ok(Arc::new(schema))
fn table_type(&self) -> TableType {
InformationTable::table_type(self)
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
InformationTable::to_stream(self, request)
} }
} }
pub type InformationExtensionRef = Arc<dyn InformationExtension<Error = Error> + Send + Sync>; impl DataSource for InformationTableDataSource {
fn get_stream(
&self,
request: ScanRequest,
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
let projection = request.projection.clone();
let projected_schema = match &projection {
Some(projection) => self.try_project(projection)?,
None => self.table.schema(),
};
/// The `InformationExtension` trait provides the extension methods for the `information_schema` tables. let stream = self
#[async_trait::async_trait] .table
pub trait InformationExtension { .to_stream(request)
type Error: ErrorExt; .map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)
.map_err(BoxedError::new)?
.map(move |batch| match &projection {
Some(p) => batch.and_then(|b| b.try_project(p)),
None => batch,
});
/// Gets the nodes information. let stream = RecordBatchStreamWrapper {
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error>; schema: projected_schema,
stream: Box::pin(stream),
output_ordering: None,
metrics: Default::default(),
};
/// Gets the procedures information. Ok(Box::pin(stream))
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error>;
/// Gets the region statistics.
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
}
pub struct NoopInformationExtension;
#[async_trait::async_trait]
impl InformationExtension for NoopInformationExtension {
type Error = Error;
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
Ok(vec![])
}
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
Ok(vec![])
}
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
Ok(vec![])
} }
} }

View File

@@ -17,10 +17,13 @@ use std::time::Duration;
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
use common_config::Mode;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::cluster::NodeInfo; use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
use common_meta::peer::Peer;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::warn;
use common_time::timestamp::Timestamp; use common_time::timestamp::Timestamp;
use datafusion::execution::TaskContext; use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
@@ -37,9 +40,8 @@ use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId}; use store_api::storage::{ScanRequest, TableId};
use super::CLUSTER_INFO; use super::CLUSTER_INFO;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
use crate::system_schema::information_schema::{InformationTable, Predicates}; use crate::information_schema::{utils, InformationTable, Predicates};
use crate::system_schema::utils;
use crate::CatalogManager; use crate::CatalogManager;
const PEER_ID: &str = "peer_id"; const PEER_ID: &str = "peer_id";
@@ -67,6 +69,7 @@ const INIT_CAPACITY: usize = 42;
pub(super) struct InformationSchemaClusterInfo { pub(super) struct InformationSchemaClusterInfo {
schema: SchemaRef, schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,
start_time_ms: u64,
} }
impl InformationSchemaClusterInfo { impl InformationSchemaClusterInfo {
@@ -74,6 +77,7 @@ impl InformationSchemaClusterInfo {
Self { Self {
schema: Self::schema(), schema: Self::schema(),
catalog_manager, catalog_manager,
start_time_ms: common_time::util::current_time_millis() as u64,
} }
} }
@@ -95,7 +99,11 @@ impl InformationSchemaClusterInfo {
} }
fn builder(&self) -> InformationSchemaClusterInfoBuilder { fn builder(&self) -> InformationSchemaClusterInfoBuilder {
InformationSchemaClusterInfoBuilder::new(self.schema.clone(), self.catalog_manager.clone()) InformationSchemaClusterInfoBuilder::new(
self.schema.clone(),
self.catalog_manager.clone(),
self.start_time_ms,
)
} }
} }
@@ -135,6 +143,7 @@ impl InformationTable for InformationSchemaClusterInfo {
struct InformationSchemaClusterInfoBuilder { struct InformationSchemaClusterInfoBuilder {
schema: SchemaRef, schema: SchemaRef,
start_time_ms: u64,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,
peer_ids: Int64VectorBuilder, peer_ids: Int64VectorBuilder,
@@ -148,7 +157,11 @@ struct InformationSchemaClusterInfoBuilder {
} }
impl InformationSchemaClusterInfoBuilder { impl InformationSchemaClusterInfoBuilder {
fn new(schema: SchemaRef, catalog_manager: Weak<dyn CatalogManager>) -> Self { fn new(
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,
start_time_ms: u64,
) -> Self {
Self { Self {
schema, schema,
catalog_manager, catalog_manager,
@@ -160,17 +173,56 @@ impl InformationSchemaClusterInfoBuilder {
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY), start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY), uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY), active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
start_time_ms,
} }
} }
/// Construct the `information_schema.cluster_info` virtual table /// Construct the `information_schema.cluster_info` virtual table
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> { async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
let information_extension = utils::information_extension(&self.catalog_manager)?; let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
let node_infos = information_extension.nodes().await?;
match mode {
Mode::Standalone => {
let build_info = common_version::build_info();
self.add_node_info(
&predicates,
NodeInfo {
// For the standalone:
// - id always 0
// - empty string for peer_addr
peer: Peer {
id: 0,
addr: "".to_string(),
},
last_activity_ts: -1,
status: NodeStatus::Standalone,
version: build_info.version.to_string(),
git_commit: build_info.commit_short.to_string(),
// Use `self.start_time_ms` instead.
// It's not precise but enough.
start_time_ms: self.start_time_ms,
},
);
}
Mode::Distributed => {
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
let node_infos = meta_client
.list_nodes(None)
.await
.map_err(BoxedError::new)
.context(ListNodesSnafu)?;
for node_info in node_infos { for node_info in node_infos {
self.add_node_info(&predicates, node_info); self.add_node_info(&predicates, node_info);
} }
} else {
warn!("Could not find meta client in distributed mode.");
}
}
}
self.finish() self.finish()
} }

View File

@@ -257,8 +257,8 @@ impl InformationSchemaColumnsBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?; .context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None); let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
while let Some(table) = stream.try_next().await? { while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices; let keys = &table.table_info().meta.primary_key_indices;

View File

@@ -27,7 +27,6 @@ use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, V
use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value; use datatypes::value::Value;
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder}; use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
use futures_util::TryStreamExt;
use snafu::{OptionExt, ResultExt}; use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId}; use store_api::storage::{ScanRequest, TableId};
@@ -35,7 +34,7 @@ use super::KEY_COLUMN_USAGE;
use crate::error::{ use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
}; };
use crate::system_schema::information_schema::{InformationTable, Predicates}; use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager; use crate::CatalogManager;
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema"; pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
@@ -212,15 +211,25 @@ impl InformationSchemaKeyColumnUsageBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?; .context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let mut primary_constraints = vec![]; let mut primary_constraints = vec![];
let table_info = table.table_info(); for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_name = &table_info.name; if !catalog_manager
let keys = &table_info.meta.primary_key_indices; .schema_exists(&catalog_name, &schema_name)
.await?
{
continue;
}
for table_name in catalog_manager
.table_names(&catalog_name, &schema_name)
.await?
{
if let Some(table) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await?
{
let keys = &table.table_info().meta.primary_key_indices;
let schema = table.schema(); let schema = table.schema();
for (idx, column) in schema.column_schemas().iter().enumerate() { for (idx, column) in schema.column_schemas().iter().enumerate() {
@@ -231,7 +240,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
TIME_INDEX_CONSTRAINT_NAME, TIME_INDEX_CONSTRAINT_NAME,
&catalog_name, &catalog_name,
&schema_name, &schema_name,
table_name, &table_name,
&column.name, &column.name,
1, //always 1 for time index 1, //always 1 for time index
); );
@@ -240,12 +249,17 @@ impl InformationSchemaKeyColumnUsageBuilder {
primary_constraints.push(( primary_constraints.push((
catalog_name.clone(), catalog_name.clone(),
schema_name.clone(), schema_name.clone(),
table_name.to_string(), table_name.clone(),
column.name.clone(), column.name.clone(),
)); ));
} }
// TODO(dimbtp): foreign key constraint not supported yet // TODO(dimbtp): foreign key constraint not supported yet
} }
} else {
unreachable!();
}
}
}
for (i, (catalog_name, schema_name, table_name, column_name)) in for (i, (catalog_name, schema_name, table_name, column_name)) in
primary_constraints.into_iter().enumerate() primary_constraints.into_iter().enumerate()
@@ -261,8 +275,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
i as u32 + 1, i as u32 + 1,
); );
} }
}
}
self.finish() self.finish()
} }

View File

@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
mod table_columns; mod tables;
use std::sync::Arc; use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -28,21 +27,22 @@ use datatypes::schema::SchemaRef;
use datatypes::vectors::VectorRef; use datatypes::vectors::VectorRef;
use snafu::ResultExt; use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId}; use store_api::storage::{ScanRequest, TableId};
pub use tables::get_schema_columns;
use super::SystemTable;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
use crate::information_schema::InformationTable;
/// A memory table with specified schema and columns. /// A memory table with specified schema and columns.
pub(crate) struct MemoryTable { pub(super) struct MemoryTable {
pub(crate) table_id: TableId, table_id: TableId,
pub(crate) table_name: &'static str, table_name: &'static str,
pub(crate) schema: SchemaRef, schema: SchemaRef,
pub(crate) columns: Vec<VectorRef>, columns: Vec<VectorRef>,
} }
impl MemoryTable { impl MemoryTable {
/// Creates a memory table with table id, name, schema and columns. /// Creates a memory table with table id, name, schema and columns.
pub fn new( pub(super) fn new(
table_id: TableId, table_id: TableId,
table_name: &'static str, table_name: &'static str,
schema: SchemaRef, schema: SchemaRef,
@@ -56,54 +56,12 @@ impl MemoryTable {
} }
} }
pub fn builder(&self) -> MemoryTableBuilder { fn builder(&self) -> MemoryTableBuilder {
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone()) MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
} }
} }
pub(crate) struct MemoryTableBuilder { impl InformationTable for MemoryTable {
schema: SchemaRef,
columns: Vec<VectorRef>,
}
impl MemoryTableBuilder {
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
Self { schema, columns }
}
/// Construct the `information_schema.{table_name}` virtual table
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
if self.columns.is_empty() {
Ok(RecordBatch::new_empty(self.schema.clone()))
} else {
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
.context(CreateRecordBatchSnafu)
}
}
}
impl DfPartitionStream for MemoryTable {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.memory_records()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
impl SystemTable for MemoryTable {
fn table_id(&self) -> TableId { fn table_id(&self) -> TableId {
self.table_id self.table_id
} }
@@ -137,6 +95,48 @@ impl SystemTable for MemoryTable {
} }
} }
struct MemoryTableBuilder {
schema: SchemaRef,
columns: Vec<VectorRef>,
}
impl MemoryTableBuilder {
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
Self { schema, columns }
}
/// Construct the `information_schema.{table_name}` virtual table
async fn memory_records(&mut self) -> Result<RecordBatch> {
if self.columns.is_empty() {
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
} else {
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
.context(CreateRecordBatchSnafu)
}
}
}
impl DfPartitionStream for MemoryTable {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.memory_records()
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
@@ -147,7 +147,6 @@ mod tests {
use datatypes::vectors::StringVector; use datatypes::vectors::StringVector;
use super::*; use super::*;
use crate::system_schema::SystemTable;
#[tokio::test] #[tokio::test]
async fn test_memory_table() { async fn test_memory_table() {
@@ -167,8 +166,8 @@ mod tests {
); );
assert_eq!(42, table.table_id()); assert_eq!(42, table.table_id());
assert_eq!("test", table.table_name); assert_eq!("test", table.table_name());
assert_eq!(schema, SystemTable::schema(&table)); assert_eq!(schema, InformationTable::schema(&table));
let stream = table.to_stream(ScanRequest::default()).unwrap(); let stream = table.to_stream(ScanRequest::default()).unwrap();
@@ -197,7 +196,7 @@ mod tests {
assert_eq!(42, table.table_id()); assert_eq!(42, table.table_id());
assert_eq!("test", table.table_name()); assert_eq!("test", table.table_name());
assert_eq!(schema, SystemTable::schema(&table)); assert_eq!(schema, InformationTable::schema(&table));
let stream = table.to_stream(ScanRequest::default()).unwrap(); let stream = table.to_stream(ScanRequest::default()).unwrap();

View File

@@ -15,19 +15,17 @@
use std::sync::Arc; use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE}; use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
use datatypes::schema::{Schema, SchemaRef}; use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::vectors::{Int64Vector, StringVector, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector};
use super::table_names::*; use crate::information_schema::table_names::*;
use crate::system_schema::utils::tables::{
bigint_column, datetime_column, string_column, string_columns,
};
const NO_VALUE: &str = "NO"; const NO_VALUE: &str = "NO";
/// Find the schema and columns by the table_name, only valid for memory tables. /// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise. /// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) { pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name { let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
COLUMN_PRIVILEGES => ( COLUMN_PRIVILEGES => (
string_columns(&[ string_columns(&[
@@ -82,7 +80,7 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
"GIT_BRANCH", "GIT_BRANCH",
"GIT_COMMIT", "GIT_COMMIT",
"GIT_COMMIT_SHORT", "GIT_COMMIT_SHORT",
"GIT_CLEAN", "GIT_DIRTY",
"PKG_VERSION", "PKG_VERSION",
]), ]),
vec![ vec![
@@ -91,7 +89,7 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
Arc::new(StringVector::from(vec![build_info Arc::new(StringVector::from(vec![build_info
.commit_short .commit_short
.to_string()])), .to_string()])),
Arc::new(StringVector::from(vec![build_info.clean.to_string()])), Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
Arc::new(StringVector::from(vec![build_info.version.to_string()])), Arc::new(StringVector::from(vec![build_info.version.to_string()])),
], ],
) )
@@ -416,3 +414,50 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
(Arc::new(Schema::new(column_schemas)), columns) (Arc::new(Schema::new(column_schemas)), columns)
} }
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
names.iter().map(|name| string_column(name)).collect()
}
fn string_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::string_datatype(),
false,
)
}
fn bigint_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::int64_datatype(),
false,
)
}
fn datetime_column(name: &str) -> ColumnSchema {
ColumnSchema::new(
str::to_lowercase(name),
ConcreteDataType::datetime_datatype(),
false,
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_string_columns() {
let columns = ["a", "b", "c"];
let column_schemas = string_columns(&columns);
assert_eq!(3, column_schemas.len());
for (i, name) in columns.iter().enumerate() {
let cs = column_schemas.get(i).unwrap();
assert_eq!(*name, cs.name);
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
}
}
}

View File

@@ -44,8 +44,8 @@ use crate::error::{
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result, CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
UpgradeWeakCatalogManagerRefSnafu, UpgradeWeakCatalogManagerRefSnafu,
}; };
use crate::information_schema::{InformationTable, Predicates};
use crate::kvbackend::KvBackendCatalogManager; use crate::kvbackend::KvBackendCatalogManager;
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::CatalogManager; use crate::CatalogManager;
const TABLE_CATALOG: &str = "table_catalog"; const TABLE_CATALOG: &str = "table_catalog";
@@ -240,9 +240,9 @@ impl InformationSchemaPartitionsBuilder {
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_info_stream = catalog_manager let table_info_stream = catalog_manager
.tables(&catalog_name, &schema_name, None) .tables(&catalog_name, &schema_name)
.try_filter_map(|t| async move { .try_filter_map(|t| async move {
let table_info = t.table_info(); let table_info = t.table_info();
if table_info.table_type == TableType::Temporary { if table_info.table_type == TableType::Temporary {

View File

@@ -25,7 +25,7 @@ type ColumnName = String;
/// we only support these simple predicates currently. /// we only support these simple predicates currently.
/// TODO(dennis): supports more predicate types. /// TODO(dennis): supports more predicate types.
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) enum Predicate { enum Predicate {
Eq(ColumnName, Value), Eq(ColumnName, Value),
Like(ColumnName, String, bool), Like(ColumnName, String, bool),
NotEq(ColumnName, Value), NotEq(ColumnName, Value),

Some files were not shown because too many files have changed in this diff Show More