mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
101 Commits
v0.10.0-ni
...
feat/geo-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0bff41e038 | ||
|
|
e56709b545 | ||
|
|
a64eb0a5bc | ||
|
|
2e4ab6dd91 | ||
|
|
f761798f93 | ||
|
|
420446f19f | ||
|
|
841e66c810 | ||
|
|
d1c635085c | ||
|
|
47657ebbc8 | ||
|
|
64ae32def0 | ||
|
|
744946957e | ||
|
|
d5455db2d5 | ||
|
|
28bf549907 | ||
|
|
4ea412249a | ||
|
|
eacc7bc471 | ||
|
|
b72d3bc71d | ||
|
|
0b102ef846 | ||
|
|
e404e9dafc | ||
|
|
63a442632e | ||
|
|
d39bafcfbd | ||
|
|
1717445ebe | ||
|
|
55d65da24d | ||
|
|
3297d5f657 | ||
|
|
d6865911ee | ||
|
|
63f2463273 | ||
|
|
da337a9635 | ||
|
|
3973d6b01f | ||
|
|
2c731c76ad | ||
|
|
40e7b58c80 | ||
|
|
5177717f71 | ||
|
|
8d61e6fe49 | ||
|
|
a3b8d2fe8f | ||
|
|
863ee073a9 | ||
|
|
25cd61b310 | ||
|
|
3517c13192 | ||
|
|
b9cedf2c1a | ||
|
|
883c5bc5b0 | ||
|
|
d628079f4c | ||
|
|
0025fa6ec7 | ||
|
|
ff04109ee6 | ||
|
|
9c1704d4cb | ||
|
|
a12a905578 | ||
|
|
449236360d | ||
|
|
bf16422cee | ||
|
|
9db08dbbe0 | ||
|
|
9d885fa0c2 | ||
|
|
b25a2b117e | ||
|
|
6fccff4810 | ||
|
|
30af78700f | ||
|
|
8de11a0e34 | ||
|
|
975b8c69e5 | ||
|
|
8036b44347 | ||
|
|
4c72b3f3fe | ||
|
|
76dc906574 | ||
|
|
2a73e0937f | ||
|
|
c8de8b80f4 | ||
|
|
ec59ce5c9a | ||
|
|
f578155602 | ||
|
|
d1472782d0 | ||
|
|
93be81c041 | ||
|
|
2c3fccb516 | ||
|
|
c1b1be47ba | ||
|
|
0f85037024 | ||
|
|
f88705080b | ||
|
|
cbb06cd0c6 | ||
|
|
b59a93dfbc | ||
|
|
202c730363 | ||
|
|
63e1892dc1 | ||
|
|
216bce6973 | ||
|
|
4466fee580 | ||
|
|
5aa4c70057 | ||
|
|
72a1732fb4 | ||
|
|
c821d21111 | ||
|
|
2e2eacf3b2 | ||
|
|
9bcaeaaa0e | ||
|
|
90cfe276b4 | ||
|
|
6694d2a930 | ||
|
|
9532ffb954 | ||
|
|
665b7e5c6e | ||
|
|
27d9aa0f3b | ||
|
|
8f3293d4fb | ||
|
|
7dd20b0348 | ||
|
|
4c1a3f29c0 | ||
|
|
0d70961448 | ||
|
|
a75cfaa516 | ||
|
|
aa3f53f08a | ||
|
|
8f0959fa9f | ||
|
|
4a3982ca60 | ||
|
|
559219496d | ||
|
|
685aa7dd8f | ||
|
|
be5364a056 | ||
|
|
a25d9f736f | ||
|
|
2cd4a78f17 | ||
|
|
188e182d75 | ||
|
|
d64cc79ab4 | ||
|
|
e6cc4df8c8 | ||
|
|
803780030d | ||
|
|
79f10d0415 | ||
|
|
3937e67694 | ||
|
|
4c93fe6c2d | ||
|
|
c4717abb68 |
@@ -14,10 +14,11 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_CREDENTIAL = GCS credential
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
# Settings for kafka wal test
|
||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
26
.github/actions/build-linux-artifacts/action.yml
vendored
26
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -17,6 +17,12 @@ inputs:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: "false"
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: true
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: true
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -31,8 +37,8 @@ runs:
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4 \
|
||||
IMAGE_NAMESPACE=i8k6a5e1/greptime \
|
||||
IMAGE_REGISTRY=public.ecr.aws
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
@@ -51,8 +57,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
@@ -64,8 +70,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -82,8 +88,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
@@ -94,5 +100,5 @@ runs:
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
@@ -4,9 +4,6 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -43,9 +40,8 @@ runs:
|
||||
brew install protobuf
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
|
||||
@@ -4,9 +4,6 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -28,9 +25,8 @@ runs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
@@ -69,7 +65,7 @@ runs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: C:\tmp\greptime-*.log
|
||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
global_rt_size = 4
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
@@ -7,7 +7,7 @@ meta:
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
@@ -16,7 +16,7 @@ datanode:
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
cache_capacity = "256MB"
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
@@ -7,12 +7,12 @@ meta:
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
@@ -13,7 +13,7 @@ meta:
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
@@ -23,7 +23,7 @@ datanode:
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
|
||||
30
.github/actions/setup-postgres-cluster/action.yml
vendored
Normal file
30
.github/actions/setup-postgres-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Setup PostgreSQL
|
||||
description: Deploy PostgreSQL on Kubernetes
|
||||
inputs:
|
||||
postgres-replicas:
|
||||
default: 1
|
||||
description: "Number of PostgreSQL replicas"
|
||||
namespace:
|
||||
default: "postgres-namespace"
|
||||
postgres-version:
|
||||
default: "14.2"
|
||||
description: "PostgreSQL version"
|
||||
storage-size:
|
||||
default: "1Gi"
|
||||
description: "Storage size for PostgreSQL"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install PostgreSQL
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||
--set image.tag=${{ inputs.postgres-version }} \
|
||||
--set persistence.size=${{ inputs.storage-size }} \
|
||||
--set postgresql.username=greptimedb \
|
||||
--set postgresql.password=admin \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
7
.github/workflows/apidoc.yml
vendored
7
.github/workflows/apidoc.yml
vendored
@@ -12,9 +12,6 @@ on:
|
||||
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -23,9 +20,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
|
||||
4
.github/workflows/dev-build.yml
vendored
4
.github/workflows/dev-build.yml
vendored
@@ -177,6 +177,8 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -206,6 +208,8 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
|
||||
149
.github/workflows/develop.yml
vendored
149
.github/workflows/develop.yml
vendored
@@ -29,9 +29,6 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
name: Check typos and docs
|
||||
@@ -64,9 +61,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -82,9 +77,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -107,9 +100,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
@@ -145,13 +136,23 @@ jobs:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -169,7 +170,7 @@ jobs:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
@@ -193,13 +194,23 @@ jobs:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -250,9 +261,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
@@ -263,7 +272,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -277,7 +286,7 @@ jobs:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
@@ -285,24 +294,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
- name: "Disk"
|
||||
minio: false
|
||||
kafka: false
|
||||
values: "with-disk.yaml"
|
||||
- name: "Minio"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- name: "Minio with Cache"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio-and-cache.yaml"
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
@@ -314,13 +323,13 @@ jobs:
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
- name: Setup Postgres cluser
|
||||
uses: ./.github/actions/setup-postgres-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -390,12 +399,12 @@ jobs:
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
@@ -407,13 +416,13 @@ jobs:
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
distributed-fuzztest-with-chaos:
|
||||
distributed-fuzztest-with-chaos:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
@@ -421,12 +430,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
@@ -440,13 +461,13 @@ jobs:
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
- name: Setup Postgres cluser
|
||||
uses: ./.github/actions/setup-postgres-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -517,12 +538,12 @@ jobs:
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
@@ -534,7 +555,7 @@ jobs:
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
@@ -557,6 +578,10 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -564,10 +589,6 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
@@ -587,9 +608,8 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: rustfmt
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -608,9 +628,8 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: clippy
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -634,9 +653,8 @@ jobs:
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -666,6 +684,9 @@ jobs:
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup postgres server
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
@@ -682,7 +703,9 @@ jobs:
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v4
|
||||
|
||||
4
.github/workflows/nightly-build.yml
vendored
4
.github/workflows/nightly-build.yml
vendored
@@ -154,6 +154,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -173,6 +175,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
|
||||
21
.github/workflows/nightly-ci.yml
vendored
21
.github/workflows/nightly-ci.yml
vendored
@@ -9,9 +9,6 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
@@ -33,6 +30,13 @@ jobs:
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
- name: Upload sqlness logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-kind
|
||||
path: /tmp/kind/
|
||||
retention-days: 3
|
||||
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
@@ -45,9 +49,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
@@ -55,11 +57,11 @@ jobs:
|
||||
env:
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: C:\tmp\greptime-*.log
|
||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
@@ -78,9 +80,8 @@ jobs:
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
133
.github/workflows/release-dev-builder-images.yaml
vendored
133
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -1,12 +1,14 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- rust-toolchain.toml
|
||||
- 'docker/dev-builder/**'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-ubuntu image
|
||||
@@ -28,22 +30,103 @@ jobs:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure build image version
|
||||
id: set-version
|
||||
shell: bash
|
||||
run: |
|
||||
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
|
||||
buildTime=`date +%Y%m%d%H%M%S`
|
||||
BUILD_VERSION="$commitShortSHA-$buildTime"
|
||||
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
|
||||
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
|
||||
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
version: ${{ env.VERSION }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.ECR_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr-public
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
env:
|
||||
AWS_REGION: ${{ vars.ECR_REGION }}
|
||||
with:
|
||||
registry-type: public
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -51,35 +134,39 @@ jobs:
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Login to AliCloud Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
17
.github/workflows/release.yml
vendored
17
.github/workflows/release.yml
vendored
@@ -82,7 +82,6 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -99,6 +98,16 @@ permissions:
|
||||
contents: write # Allows the action to create a release.
|
||||
|
||||
jobs:
|
||||
check-builder-rust-version:
|
||||
name: Check rust version in builder
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
run: |
|
||||
./scripts/check-builder-rust-version.sh
|
||||
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
@@ -183,6 +192,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -202,6 +213,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
@@ -240,7 +253,6 @@ jobs:
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
@@ -283,7 +295,6 @@ jobs:
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
502
Cargo.lock
generated
502
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
34
Cargo.toml
34
Cargo.toml
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.1"
|
||||
version = "0.9.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
@@ -104,15 +105,15 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = { version = "0.13" }
|
||||
@@ -124,7 +125,7 @@ humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "049171eb16cb4249d8099751a0c46750d1fe88e7" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
@@ -151,14 +152,18 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
shadow-rs = "0.31"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
@@ -169,6 +174,7 @@ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "5
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.36", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
@@ -238,14 +244,14 @@ table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "049171eb16cb4249d8099751a0c46750d1fe88e7"
|
||||
rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
[profile.nightly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
13
Makefile
13
Makefile
@@ -8,6 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-06-06-b4b105ad-20240827021230
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -77,7 +78,7 @@ build: ## Build debug version greptime.
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
@@ -91,7 +92,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
@@ -105,8 +106,8 @@ build-android-bin: ## Build greptime binary for android.
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
@@ -145,7 +146,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
@@ -203,7 +204,7 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: start-cluster
|
||||
|
||||
@@ -150,7 +150,7 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
@@ -172,6 +172,13 @@ In addition, you may:
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
## Commerial Support
|
||||
|
||||
If you are running GreptimeDB OSS in your organization, we offer additional
|
||||
enterprise addons, installation service, training and consulting. [Contact
|
||||
us](https://greptime.com/contactus) and we will reach out to you with more
|
||||
detail of our commerial license.
|
||||
|
||||
## License
|
||||
|
||||
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
||||
|
||||
@@ -67,6 +67,12 @@
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -93,6 +99,7 @@
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
@@ -110,11 +117,12 @@
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
@@ -145,17 +153,17 @@
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -230,17 +238,17 @@
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -253,12 +261,13 @@
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
|
||||
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -279,9 +288,10 @@
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||
@@ -292,17 +302,17 @@
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -323,6 +333,10 @@
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -368,6 +382,8 @@
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
@@ -382,6 +398,7 @@
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
@@ -399,11 +416,12 @@
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
@@ -432,17 +450,17 @@
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -477,7 +495,7 @@
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
|
||||
@@ -39,6 +39,18 @@ rpc_max_recv_message_size = "512MB"
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -187,6 +199,32 @@ backoff_base = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Whether to enable WAL index creation.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_index = true
|
||||
|
||||
## The interval for dumping WAL indexes.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
dump_index_interval = "60s"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
@@ -223,6 +261,7 @@ backoff_deadline = "5mins"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The data storage options.
|
||||
@@ -294,6 +333,11 @@ scope = "test"
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential= "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
@@ -362,9 +406,13 @@ sst_meta_cache_size = "128MB"
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
@@ -375,7 +423,8 @@ experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
## +toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
@@ -493,8 +542,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -516,12 +564,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -70,8 +70,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
@@ -177,8 +177,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -200,12 +199,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -7,14 +7,15 @@ bind_addr = "127.0.0.1:3002"
|
||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Etcd server address.
|
||||
## Store server address default to etcd store.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
|
||||
## Datanode selector type.
|
||||
## - `lease_based` (default value).
|
||||
## - `round_robin` (default value)
|
||||
## - `lease_based`
|
||||
## - `load_based`
|
||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
selector = "lease_based"
|
||||
selector = "round_robin"
|
||||
|
||||
## Store data in memory.
|
||||
use_memory_store = false
|
||||
@@ -31,6 +32,9 @@ store_key_prefix = ""
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The datastore for meta server.
|
||||
backend = "EtcdStore"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
@@ -95,7 +99,12 @@ provider = "raft_engine"
|
||||
## The broker endpoints of the Kafka cluster.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Number of topics to be created upon start.
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
auto_create_topics = true
|
||||
|
||||
## Number of topics.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
@@ -104,6 +113,7 @@ num_topics = 64
|
||||
selector_type = "round_robin"
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
@@ -123,6 +133,24 @@ backoff_base = 2
|
||||
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
@@ -136,8 +164,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -159,12 +186,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -171,6 +171,34 @@ sync_period = "10s"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
auto_create_topics = true
|
||||
|
||||
## Number of topics.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
## Available selector types:
|
||||
## - `round_robin` (default)
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
selector_type = "round_robin"
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_topic_timeout = "30s"
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -196,6 +224,24 @@ backoff_base = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Metadata storage options.
|
||||
[metadata_store]
|
||||
## Kv file size in bytes.
|
||||
@@ -246,6 +292,7 @@ retry_delay = "500ms"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The data storage options.
|
||||
@@ -317,6 +364,11 @@ scope = "test"
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
@@ -385,9 +437,13 @@ sst_meta_cache_size = "128MB"
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
@@ -398,7 +454,8 @@ experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
## +toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
@@ -522,8 +579,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -545,12 +601,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
51
docs/benchmarks/log/README.md
Normal file
51
docs/benchmarks/log/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Log benchmark configuration
|
||||
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
|
||||
|
||||
Here are the versions of databases we used in the benchmark
|
||||
|
||||
| name | version |
|
||||
| :------------ | :--------- |
|
||||
| GreptimeDB | v0.9.2 |
|
||||
| Clickhouse | 24.9.1.219 |
|
||||
| Elasticsearch | 8.15.0 |
|
||||
|
||||
## Structured model vs Unstructured model
|
||||
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
|
||||
|
||||
__Structured model__
|
||||
|
||||
The log data is pre-processed into columns by vector. For example an insert request looks like following
|
||||
```SQL
|
||||
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
|
||||
```
|
||||
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
|
||||
|
||||
__Unstructured model__
|
||||
|
||||
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
|
||||
```SQL
|
||||
INSERT INTO test_table (message, timestamp) VALUES ()
|
||||
```
|
||||
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
|
||||
|
||||
## Creating tables
|
||||
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
|
||||
The mapping of Elastic search is created automatically.
|
||||
|
||||
## Vector Configuration
|
||||
We use vector to generate random log data and send inserts to databases.
|
||||
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
|
||||
|
||||
## SQLs and payloads
|
||||
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
|
||||
|
||||
## Steps to reproduce
|
||||
0. Decide whether to run structured model test or unstructured mode test.
|
||||
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
|
||||
2. Create table in GreptimeDB and Clickhouse in advance.
|
||||
3. Run vector to insert data.
|
||||
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
|
||||
|
||||
## Addition
|
||||
- You can tune GreptimeDB's configuration to get better performance.
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).
|
||||
56
docs/benchmarks/log/create_table.sql
Normal file
56
docs/benchmarks/log/create_table.sql
Normal file
@@ -0,0 +1,56 @@
|
||||
-- GreptimeDB create table clause
|
||||
-- structured test, use vector to pre-process log data into fields
|
||||
CREATE TABLE IF NOT EXISTS `test_table` (
|
||||
`bytes` Int64 NULL,
|
||||
`http_version` STRING NULL,
|
||||
`ip` STRING NULL,
|
||||
`method` STRING NULL,
|
||||
`path` STRING NULL,
|
||||
`status` SMALLINT UNSIGNED NULL,
|
||||
`user` STRING NULL,
|
||||
`timestamp` TIMESTAMP(3) NOT NULL,
|
||||
PRIMARY KEY (`user`, `path`, `status`),
|
||||
TIME INDEX (`timestamp`)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
append_mode = 'true'
|
||||
);
|
||||
|
||||
-- unstructured test, build fulltext index on message column
|
||||
CREATE TABLE IF NOT EXISTS `test_table` (
|
||||
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
|
||||
`timestamp` TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX (`timestamp`)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
append_mode = 'true'
|
||||
);
|
||||
|
||||
-- Clickhouse create table clause
|
||||
-- structured test
|
||||
CREATE TABLE IF NOT EXISTS test_table
|
||||
(
|
||||
bytes UInt64 NOT NULL,
|
||||
http_version String NOT NULL,
|
||||
ip String NOT NULL,
|
||||
method String NOT NULL,
|
||||
path String NOT NULL,
|
||||
status UInt8 NOT NULL,
|
||||
user String NOT NULL,
|
||||
timestamp String NOT NULL,
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY (user, path, status);
|
||||
|
||||
-- unstructured test
|
||||
SET allow_experimental_full_text_index = true;
|
||||
CREATE TABLE IF NOT EXISTS test_table
|
||||
(
|
||||
message String,
|
||||
timestamp String,
|
||||
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY tuple();
|
||||
199
docs/benchmarks/log/query.md
Normal file
199
docs/benchmarks/log/query.md
Normal file
@@ -0,0 +1,199 @@
|
||||
# Query URL and payload for Elastic Search
|
||||
## Count
|
||||
URL: `http://127.0.0.1:9200/_count`
|
||||
|
||||
## Query by timerange
|
||||
URL: `http://127.0.0.1:9200/_search`
|
||||
|
||||
You can use the following payload to get the full timerange first.
|
||||
```JSON
|
||||
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
|
||||
```
|
||||
|
||||
And then use this payload to query by timerange.
|
||||
```JSON
|
||||
{
|
||||
"from": 0,
|
||||
"size": 1000,
|
||||
"query": {
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": "2024-08-16T04:30:44.000Z",
|
||||
"lte": "2024-08-16T04:51:52.000Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Query by condition
|
||||
URL: `http://127.0.0.1:9200/_search`
|
||||
### Structured payload
|
||||
```JSON
|
||||
{
|
||||
"from": 0,
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"user.keyword": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"method.keyword": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"path.keyword": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"http_version.keyword": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"status": "401"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
### Unstructured payload
|
||||
```JSON
|
||||
{
|
||||
"from": 0,
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "401"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Query by condition and timerange
|
||||
URL: `http://127.0.0.1:9200/_search`
|
||||
### Structured payload
|
||||
```JSON
|
||||
{
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"user.keyword": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"method.keyword": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"path.keyword": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"http_version.keyword": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"status": "401"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": "2024-08-19T07:03:37.383Z",
|
||||
"lte": "2024-08-19T07:24:58.883Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
### Unstructured payload
|
||||
```JSON
|
||||
{
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "401"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": "2024-08-19T05:16:17.099Z",
|
||||
"lte": "2024-08-19T05:46:02.722Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
50
docs/benchmarks/log/query.sql
Normal file
50
docs/benchmarks/log/query.sql
Normal file
@@ -0,0 +1,50 @@
|
||||
-- Structured query for GreptimeDB and Clickhouse
|
||||
|
||||
-- query count
|
||||
select count(*) from test_table;
|
||||
|
||||
-- query by timerange. Note: place the timestamp range in the where clause
|
||||
-- GreptimeDB
|
||||
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
|
||||
-- to get the full timestamp range
|
||||
select * from test_table where timestamp between 1723710843619 and 1723711367588;
|
||||
-- Clickhouse
|
||||
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
|
||||
-- to get the full timestamp range
|
||||
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
|
||||
|
||||
-- query by condition
|
||||
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
|
||||
|
||||
-- query by condition and timerange
|
||||
-- GreptimeDB
|
||||
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
|
||||
and timestamp between 1723774396760 and 1723774788760;
|
||||
-- Clickhouse
|
||||
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
|
||||
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
|
||||
|
||||
-- Unstructured query for GreptimeDB and Clickhouse
|
||||
|
||||
|
||||
-- query by condition
|
||||
-- GreptimeDB
|
||||
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
|
||||
-- Clickhouse
|
||||
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
|
||||
AND (message LIKE '%OPTION%')
|
||||
AND (message LIKE '%/user/booperbot124%')
|
||||
AND (message LIKE '%HTTP/1.1%')
|
||||
AND (message LIKE '%401%');
|
||||
|
||||
-- query by condition and timerange
|
||||
-- GreptimeDB
|
||||
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
|
||||
and timestamp between 1723710843619 and 1723711367588;
|
||||
-- Clickhouse
|
||||
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
|
||||
AND (message LIKE '%OPTION%')
|
||||
AND (message LIKE '%/user/booperbot124%')
|
||||
AND (message LIKE '%HTTP/1.1%')
|
||||
AND (message LIKE '%401%')
|
||||
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';
|
||||
57
docs/benchmarks/log/structured_vector.toml
Normal file
57
docs/benchmarks/log/structured_vector.toml
Normal file
@@ -0,0 +1,57 @@
|
||||
# Please note we use patched branch to build vector
|
||||
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
|
||||
|
||||
[sources.demo_logs]
|
||||
type = "demo_logs"
|
||||
format = "apache_common"
|
||||
# interval value = 1 / rps
|
||||
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
|
||||
# set to 0 to run as fast as possible
|
||||
interval = 0
|
||||
# total rows to insert
|
||||
count = 100000000
|
||||
lines = [ "line1" ]
|
||||
|
||||
[transforms.parse_logs]
|
||||
type = "remap"
|
||||
inputs = ["demo_logs"]
|
||||
source = '''
|
||||
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
|
||||
|
||||
# Convert timestamp to a standard format
|
||||
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
|
||||
|
||||
# Convert status and bytes to integers
|
||||
.status = to_int!(.status)
|
||||
.bytes = to_int!(.bytes)
|
||||
'''
|
||||
|
||||
[sinks.sink_greptime_logs]
|
||||
type = "greptimedb_logs"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
pipeline_name = "demo_pipeline"
|
||||
compression = "none"
|
||||
inputs = [ "parse_logs" ]
|
||||
endpoint = "http://127.0.0.1:4000"
|
||||
# Batch size for each insertion
|
||||
batch.max_events = 4000
|
||||
|
||||
[sinks.clickhouse]
|
||||
type = "clickhouse"
|
||||
inputs = [ "parse_logs" ]
|
||||
database = "default"
|
||||
endpoint = "http://127.0.0.1:8123"
|
||||
format = "json_each_row"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
|
||||
[sinks.sink_elasticsearch]
|
||||
type = "elasticsearch"
|
||||
inputs = [ "parse_logs" ]
|
||||
api_version = "auto"
|
||||
compression = "none"
|
||||
doc_type = "_doc"
|
||||
endpoints = [ "http://127.0.0.1:9200" ]
|
||||
id_key = "id"
|
||||
mode = "bulk"
|
||||
43
docs/benchmarks/log/unstructured_vector.toml
Normal file
43
docs/benchmarks/log/unstructured_vector.toml
Normal file
@@ -0,0 +1,43 @@
|
||||
# Please note we use patched branch to build vector
|
||||
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
|
||||
|
||||
[sources.demo_logs]
|
||||
type = "demo_logs"
|
||||
format = "apache_common"
|
||||
# interval value = 1 / rps
|
||||
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
|
||||
# set to 0 to run as fast as possible
|
||||
interval = 0
|
||||
# total rows to insert
|
||||
count = 100000000
|
||||
lines = [ "line1" ]
|
||||
|
||||
[sinks.sink_greptime_logs]
|
||||
type = "greptimedb_logs"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
pipeline_name = "demo_pipeline"
|
||||
compression = "none"
|
||||
inputs = [ "demo_logs" ]
|
||||
endpoint = "http://127.0.0.1:4000"
|
||||
# Batch size for each insertion
|
||||
batch.max_events = 500
|
||||
|
||||
[sinks.clickhouse]
|
||||
type = "clickhouse"
|
||||
inputs = [ "demo_logs" ]
|
||||
database = "default"
|
||||
endpoint = "http://127.0.0.1:8123"
|
||||
format = "json_each_row"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
|
||||
[sinks.sink_elasticsearch]
|
||||
type = "elasticsearch"
|
||||
inputs = [ "demo_logs" ]
|
||||
api_version = "auto"
|
||||
compression = "none"
|
||||
doc_type = "_doc"
|
||||
endpoints = [ "http://127.0.0.1:9200" ]
|
||||
id_key = "id"
|
||||
mode = "bulk"
|
||||
58
docs/benchmarks/tsbs/v0.9.1.md
Normal file
58
docs/benchmarks/tsbs/v0.9.1.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TSBS benchmark - v0.9.1
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | ----------------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 100GB (GP3) |
|
||||
| OS | Ubuntu Server 24.04 LTS |
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 387697.68 |
|
||||
| EC2 c5d.2xlarge | 234620.19 |
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 21.14 | 14.75 |
|
||||
| cpu-max-all-8 | 36.79 | 30.69 |
|
||||
| double-groupby-1 | 529.02 | 987.85 |
|
||||
| double-groupby-5 | 1064.53 | 1455.95 |
|
||||
| double-groupby-all | 1625.33 | 2143.96 |
|
||||
| groupby-orderby-limit | 529.19 | 1353.49 |
|
||||
| high-cpu-1 | 12.09 | 8.24 |
|
||||
| high-cpu-all | 3619.47 | 5312.82 |
|
||||
| lastpoint | 224.91 | 576.06 |
|
||||
| single-groupby-1-1-1 | 10.82 | 6.01 |
|
||||
| single-groupby-1-1-12 | 11.16 | 7.42 |
|
||||
| single-groupby-1-8-1 | 13.50 | 10.20 |
|
||||
| single-groupby-5-1-1 | 11.99 | 6.70 |
|
||||
| single-groupby-5-1-12 | 13.17 | 8.72 |
|
||||
| single-groupby-5-8-1 | 16.01 | 12.07 |
|
||||
|
||||
`single-groupby-1-1-1` query throughput
|
||||
|
||||
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||
| --------------- | ------------------ | -------------- | ----------------- |
|
||||
| Local | 50 | 33.04 | 1511.74 |
|
||||
| Local | 100 | 67.70 | 1476.14 |
|
||||
| EC2 c5d.2xlarge | 50 | 61.93 | 806.97 |
|
||||
| EC2 c5d.2xlarge | 100 | 126.31 | 791.40 |
|
||||
@@ -25,7 +25,7 @@ Please ensure the following configuration before importing the dashboard into Gr
|
||||
|
||||
__1. Prometheus scrape config__
|
||||
|
||||
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
||||
Configure Prometheus to scrape the cluster.
|
||||
|
||||
```yml
|
||||
# example config
|
||||
@@ -34,27 +34,15 @@ Assign `greptime_pod` label to each host target. We use this label to identify e
|
||||
scrape_configs:
|
||||
- job_name: metasrv
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: metasrv
|
||||
- targets: ['<metasrv-ip>:<port>']
|
||||
|
||||
- job_name: datanode
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode1
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode2
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode3
|
||||
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>']
|
||||
|
||||
- job_name: frontend
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: frontend
|
||||
- targets: ['<frontend-ip>:<port>']
|
||||
```
|
||||
|
||||
__2. Grafana config__
|
||||
@@ -63,4 +51,4 @@ Create a Prometheus data source in Grafana before using this dashboard. We use `
|
||||
|
||||
### Usage
|
||||
|
||||
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
||||
Use `datasource` or `instance` on the upper-left corner to filter data from certain node.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-04-20"
|
||||
channel = "nightly-2024-06-06"
|
||||
|
||||
|
||||
42
scripts/check-builder-rust-version.sh
Executable file
42
scripts/check-builder-rust-version.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
|
||||
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
|
||||
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
|
||||
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
|
||||
|
||||
function check_rust_toolchain_version() {
|
||||
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
|
||||
|
||||
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
|
||||
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Compare the version and the difference should be less than 1 day.
|
||||
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
|
||||
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
|
||||
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
|
||||
|
||||
if [ $date_diff -gt 1 ]; then
|
||||
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_rust_toolchain_version
|
||||
@@ -1,62 +1,72 @@
|
||||
#!/bin/sh
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ue
|
||||
|
||||
OS_TYPE=
|
||||
ARCH_TYPE=
|
||||
|
||||
# Set the GitHub token to avoid GitHub API rate limit.
|
||||
# You can run with `GITHUB_TOKEN`:
|
||||
# GITHUB_TOKEN=<your_token> ./scripts/install.sh
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN:-}
|
||||
|
||||
VERSION=${1:-latest}
|
||||
GITHUB_ORG=GreptimeTeam
|
||||
GITHUB_REPO=greptimedb
|
||||
BIN=greptime
|
||||
|
||||
get_os_type() {
|
||||
os_type="$(uname -s)"
|
||||
function get_os_type() {
|
||||
os_type="$(uname -s)"
|
||||
|
||||
case "$os_type" in
|
||||
case "$os_type" in
|
||||
Darwin)
|
||||
OS_TYPE=darwin
|
||||
;;
|
||||
OS_TYPE=darwin
|
||||
;;
|
||||
Linux)
|
||||
OS_TYPE=linux
|
||||
;;
|
||||
OS_TYPE=linux
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown OS type: $os_type"
|
||||
exit 1
|
||||
esac
|
||||
echo "Error: Unknown OS type: $os_type"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
get_arch_type() {
|
||||
arch_type="$(uname -m)"
|
||||
function get_arch_type() {
|
||||
arch_type="$(uname -m)"
|
||||
|
||||
case "$arch_type" in
|
||||
case "$arch_type" in
|
||||
arm64)
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
aarch64)
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
x86_64)
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
amd64)
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown CPU type: $arch_type"
|
||||
exit 1
|
||||
esac
|
||||
echo "Error: Unknown CPU type: $arch_type"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
get_os_type
|
||||
get_arch_type
|
||||
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest nightly version.
|
||||
function download_artifact() {
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest stable released version.
|
||||
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
|
||||
if [ "${VERSION}" = "latest" ]; then
|
||||
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest version."
|
||||
exit 1
|
||||
# To avoid other tools dependency, we choose to use `curl` to get the version metadata and parsed by `sed`.
|
||||
VERSION=$(curl -sL \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
${GITHUB_TOKEN:+-H "Authorization: Bearer $GITHUB_TOKEN"} \
|
||||
"https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest" | sed -n 's/.*"tag_name": "\([^"]*\)".*/\1/p')
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest stable released version."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -73,4 +83,9 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
get_os_type
|
||||
get_arch_type
|
||||
download_artifact
|
||||
|
||||
@@ -40,6 +40,7 @@ moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste = "1.0"
|
||||
prometheus.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -30,6 +30,7 @@ use pg_namespace::PGNamespace;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::utils::tables::u32_column;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
@@ -52,6 +53,9 @@ pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
|
||||
// Workaround to store mapping of schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
@@ -85,6 +89,7 @@ impl PGCatalogProvider {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
namespace_oid_map: Arc::new(PGNamespaceOidMap::new()),
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
@@ -122,10 +127,12 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use super::{OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
@@ -60,14 +61,22 @@ pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +84,7 @@ impl PGClass {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
string_column(RELNAMESPACE),
|
||||
u32_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
@@ -86,6 +95,7 @@ impl PGClass {
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -155,10 +165,11 @@ struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: StringVectorBuilder,
|
||||
relnamespace: UInt32VectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
@@ -168,15 +179,17 @@ impl PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
@@ -217,6 +230,7 @@ impl PGClassBuilder {
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let namespace_oid = self.namespace_oid_map.get_oid(schema);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
@@ -230,7 +244,7 @@ impl PGClassBuilder {
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(schema));
|
||||
self.relnamespace.push(Some(namespace_oid));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(super) mod oid_map;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -25,16 +27,16 @@ use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use super::{PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::string_column;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -48,21 +50,29 @@ pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
string_column(OID_COLUMN_NAME),
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
@@ -72,6 +82,7 @@ impl PGNamespace {
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -138,8 +149,9 @@ struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: StringVectorBuilder,
|
||||
oid: UInt32VectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
@@ -148,12 +160,14 @@ impl PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
namespace_oid_map,
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
@@ -178,14 +192,15 @@ impl PGNamespaceBuilder {
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(schema_name)),
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(schema_name));
|
||||
self.oid.push(Some(oid));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
|
||||
100
src/catalog/src/system_schema/pg_catalog/pg_namespace/oid_map.rs
Normal file
100
src/catalog/src/system_schema/pg_catalog/pg_namespace/oid_map.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::BuildHasher;
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use rustc_hash::FxSeededState;
|
||||
|
||||
pub type PGNamespaceOidMapRef = Arc<PGNamespaceOidMap>;
|
||||
// Workaround to convert schema_name to a numeric id,
|
||||
// remove this when we have numeric schema id in greptime
|
||||
pub struct PGNamespaceOidMap {
|
||||
oid_map: DashMap<String, u32>,
|
||||
|
||||
// Rust use SipHasher by default, which provides resistance against DOS attacks.
|
||||
// This will produce different hash value between each greptime instance. This will
|
||||
// cause the sqlness test fail. We need a deterministic hash here to provide
|
||||
// same oid for the same schema name with best effort and DOS attacks aren't concern here.
|
||||
hasher: FxSeededState,
|
||||
}
|
||||
|
||||
impl PGNamespaceOidMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
oid_map: DashMap::new(),
|
||||
hasher: FxSeededState::with_seed(0), // PLEASE DO NOT MODIFY THIS SEED VALUE!!!
|
||||
}
|
||||
}
|
||||
|
||||
fn oid_is_used(&self, oid: u32) -> bool {
|
||||
self.oid_map.iter().any(|e| *e.value() == oid)
|
||||
}
|
||||
|
||||
pub fn get_oid(&self, schema_name: &str) -> u32 {
|
||||
if let Some(oid) = self.oid_map.get(schema_name) {
|
||||
*oid
|
||||
} else {
|
||||
let mut oid = self.hasher.hash_one(schema_name) as u32;
|
||||
while self.oid_is_used(oid) {
|
||||
oid = self.hasher.hash_one(oid) as u32;
|
||||
}
|
||||
self.oid_map.insert(schema_name.to_string(), oid);
|
||||
oid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn oid_is_stable() {
|
||||
let oid_map_1 = PGNamespaceOidMap::new();
|
||||
let oid_map_2 = PGNamespaceOidMap::new();
|
||||
|
||||
let schema = "schema";
|
||||
let oid = oid_map_1.get_oid(schema);
|
||||
|
||||
// oid keep stable in the same instance
|
||||
assert_eq!(oid, oid_map_1.get_oid(schema));
|
||||
|
||||
// oid keep stable between different instances
|
||||
assert_eq!(oid, oid_map_2.get_oid(schema));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_collision() {
|
||||
let oid_map = PGNamespaceOidMap::new();
|
||||
|
||||
let key1 = "3178510";
|
||||
let key2 = "4215648";
|
||||
|
||||
// have collision
|
||||
assert_eq!(
|
||||
oid_map.hasher.hash_one(key1) as u32,
|
||||
oid_map.hasher.hash_one(key2) as u32
|
||||
);
|
||||
|
||||
// insert them into oid_map
|
||||
let oid1 = oid_map.get_oid(key1);
|
||||
let oid2 = oid_map.get_oid(key2);
|
||||
|
||||
// they should have different id
|
||||
assert_ne!(oid1, oid2);
|
||||
}
|
||||
}
|
||||
@@ -91,7 +91,7 @@ impl Database {
|
||||
///
|
||||
/// - the name of database when using GreptimeDB standalone or cluster
|
||||
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
|
||||
/// environment
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: String::default(),
|
||||
|
||||
@@ -16,7 +16,7 @@ use api::v1::flow::{FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::{location, Location, ResultExt};
|
||||
use snafu::{location, ResultExt};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::Client;
|
||||
|
||||
@@ -33,7 +33,7 @@ use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use prost::Message;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use snafu::{location, OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
human-panic = "2.0"
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
@@ -80,7 +80,7 @@ tonic.workspace = true
|
||||
tracing-appender = "0.2"
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemallocator = "0.5"
|
||||
tikv-jemallocator = "0.6"
|
||||
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
|
||||
@@ -62,8 +62,37 @@ enum SubCommand {
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
fn main() -> Result<()> {
|
||||
use snafu::ResultExt;
|
||||
// Set the stack size to 8MB for the thread so it wouldn't overflow on large stack usage in debug mode
|
||||
// see https://github.com/GreptimeTeam/greptimedb/pull/4317
|
||||
// and https://github.com/rust-lang/rust/issues/34283
|
||||
std::thread::Builder::new()
|
||||
.name("main_spawn".to_string())
|
||||
.stack_size(8 * 1024 * 1024)
|
||||
.spawn(|| {
|
||||
{
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.thread_stack_size(8 * 1024 * 1024)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(main_body())
|
||||
}
|
||||
})
|
||||
.context(cmd::error::SpawnThreadSnafu)?
|
||||
.join()
|
||||
.expect("Couldn't join on the associated thread")
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
main_body().await
|
||||
}
|
||||
|
||||
async fn main_body() -> Result<()> {
|
||||
setup_human_panic();
|
||||
start(Command::parse()).await
|
||||
}
|
||||
@@ -110,13 +139,10 @@ async fn start(cli: Command) -> Result<()> {
|
||||
}
|
||||
|
||||
fn setup_human_panic() {
|
||||
let metadata = human_panic::Metadata {
|
||||
version: env!("CARGO_PKG_VERSION").into(),
|
||||
name: "GreptimeDB".into(),
|
||||
authors: Default::default(),
|
||||
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
|
||||
};
|
||||
human_panic::setup_panic!(metadata);
|
||||
human_panic::setup_panic!(
|
||||
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
||||
);
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version(), short_version());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
@@ -305,6 +305,12 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to spawn thread"))]
|
||||
SpawnThread {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Other error"))]
|
||||
Other {
|
||||
source: BoxedError,
|
||||
@@ -395,7 +401,9 @@ impl ErrorExt for Error {
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
@@ -214,7 +215,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version(), short_version());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
@@ -296,11 +297,13 @@ impl StartCommand {
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts,
|
||||
Plugins::new(),
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
)
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.clone(),
|
||||
);
|
||||
log_versions(version(), short_version());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
@@ -30,7 +30,7 @@ pub mod standalone;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref APP_VERSION: prometheus::IntGaugeVec =
|
||||
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["short_version", "version"]).unwrap();
|
||||
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -74,16 +74,16 @@ pub trait App: Send {
|
||||
}
|
||||
|
||||
/// Log the versions of the application, and the arguments passed to the cli.
|
||||
/// `version_string` should be the same as the output of cli "--version";
|
||||
/// and the `app_version` is the short version of the codes, often consist of git branch and commit.
|
||||
pub fn log_versions(version_string: &str, app_version: &str) {
|
||||
/// `version` should be the same as the output of cli "--version";
|
||||
/// and the `short_version` is the short version of the codes, often consist of git branch and commit.
|
||||
pub fn log_versions(version: &str, short_version: &str, app: &str) {
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), app_version])
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version, app])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
info!("GreptimeDB version: {}", version_string);
|
||||
info!("GreptimeDB version: {}", version);
|
||||
|
||||
log_env_flags();
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_version::{short_version, version};
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use snafu::ResultExt;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
@@ -137,6 +138,9 @@ struct StartCommand {
|
||||
/// The max operations per txn
|
||||
#[clap(long)]
|
||||
max_txn_ops: Option<usize>,
|
||||
/// The database backend.
|
||||
#[clap(long, value_enum)]
|
||||
backend: Option<BackendImpl>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -219,6 +223,12 @@ impl StartCommand {
|
||||
opts.max_txn_ops = max_txn_ops;
|
||||
}
|
||||
|
||||
if let Some(backend) = &self.backend {
|
||||
opts.backend.clone_from(backend);
|
||||
} else {
|
||||
opts.backend = BackendImpl::default()
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
@@ -234,7 +244,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version(), short_version());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
@@ -178,6 +178,16 @@ impl Configurable for StandaloneOptions {
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`StandaloneOptions`] is only defined in cmd crate,
|
||||
/// we don't want to make `frontend` depends on it, so impl [`Into`]
|
||||
/// rather than [`From`].
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<FrontendOptions> for StandaloneOptions {
|
||||
fn into(self) -> FrontendOptions {
|
||||
self.frontend_options()
|
||||
}
|
||||
}
|
||||
|
||||
impl StandaloneOptions {
|
||||
pub fn frontend_options(&self) -> FrontendOptions {
|
||||
let cloned_opts = self.clone();
|
||||
@@ -415,7 +425,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
None,
|
||||
);
|
||||
log_versions(version(), short_version());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!("Standalone options: {opts:#?}");
|
||||
@@ -476,11 +486,13 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
);
|
||||
let flownode = Arc::new(
|
||||
flow_builder
|
||||
@@ -508,10 +520,9 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||
opts.wal.into(),
|
||||
opts.wal.clone().into(),
|
||||
kv_backend.clone(),
|
||||
));
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
wal_options_allocator.clone(),
|
||||
@@ -532,7 +543,7 @@ impl StartCommand {
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
fe_opts,
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
@@ -560,7 +571,7 @@ impl StartCommand {
|
||||
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), plugins)
|
||||
let servers = Services::new(opts, Arc::new(frontend.clone()), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -22,7 +22,7 @@ use common_grpc::channel_manager::{
|
||||
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
};
|
||||
use common_runtime::global::RuntimeOptions;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
@@ -82,13 +82,14 @@ fn test_load_datanode_example_config() {
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
],
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some("".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -136,7 +137,7 @@ fn test_load_frontend_example_config() {
|
||||
}),
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some("".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -169,12 +170,12 @@ fn test_load_metasrv_example_config() {
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::LeaseBased,
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some("".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -217,7 +218,9 @@ fn test_load_standalone_example_config() {
|
||||
sst_meta_cache_size: ReadableSize::mb(128),
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
selector_result_cache_size: ReadableSize::mb(512),
|
||||
max_background_jobs: 4,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
@@ -228,7 +231,7 @@ fn test_load_standalone_example_config() {
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some("".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
@@ -48,19 +48,19 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
/// The database name may come from different sources:
|
||||
///
|
||||
/// - MySQL `schema` name in MySQL protocol login request: it's optional and user
|
||||
/// and switch database using `USE` command
|
||||
/// and switch database using `USE` command
|
||||
/// - Postgres `database` parameter in Postgres wire protocol, required
|
||||
/// - HTTP RESTful API: the database parameter, optional
|
||||
/// - gRPC: the dbname field in header, optional but has a higher priority than
|
||||
/// original catalog/schema
|
||||
/// original catalog/schema
|
||||
///
|
||||
/// When database name is provided, we attempt to parse catalog and schema from
|
||||
/// it. We assume the format `[<catalog>-]<schema>`:
|
||||
///
|
||||
/// - If `[<catalog>-]` part is not provided, we use whole database name as
|
||||
/// schema name
|
||||
/// schema name
|
||||
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
|
||||
/// `<catalog>` and `<schema>`.
|
||||
/// `<catalog>` and `<schema>`.
|
||||
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
|
||||
match parse_optional_catalog_and_schema_from_db_string(db) {
|
||||
(Some(catalog), schema) => (catalog, schema),
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::DefaultLoggingInterceptor;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -22,13 +23,9 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let builder = Fs::default();
|
||||
let object_store = ObjectStore::new(builder.root(root))
|
||||
.context(BuildBackendSnafu)?
|
||||
.layer(
|
||||
object_store::layers::LoggingLayer::default()
|
||||
// Print the expected error only in DEBUG level.
|
||||
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
|
||||
.with_error_level(Some("debug"))
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::LoggingLayer::new(
|
||||
DefaultLoggingInterceptor,
|
||||
))
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.finish();
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::services::S3;
|
||||
use object_store::util::DefaultLoggingInterceptor;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -84,13 +85,9 @@ pub fn build_s3_backend(
|
||||
// TODO(weny): Consider finding a better way to eliminate duplicate code.
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.layer(
|
||||
object_store::layers::LoggingLayer::default()
|
||||
// Print the expected error only in DEBUG level.
|
||||
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
|
||||
.with_error_level(Some("debug"))
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::LoggingLayer::new(
|
||||
DefaultLoggingInterceptor,
|
||||
))
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer::new(true))
|
||||
.finish())
|
||||
|
||||
@@ -7,6 +7,10 @@ license.workspace = true
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = ["geo"]
|
||||
geo = ["geohash", "h3o"]
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
@@ -23,6 +27,8 @@ common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
@@ -39,3 +45,4 @@ table.workspace = true
|
||||
[dev-dependencies]
|
||||
ron = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -110,7 +110,7 @@ mod test {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_flush_flow_metadata() {
|
||||
@@ -130,8 +130,8 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_flow_service() {
|
||||
#[tokio::test]
|
||||
async fn test_missing_flow_service() {
|
||||
let f = FlushFlowFunction;
|
||||
|
||||
let args = vec!["flow_name"];
|
||||
@@ -140,7 +140,7 @@ mod test {
|
||||
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing FlowServiceHandler, not expected",
|
||||
result.to_string()
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct FunctionContext {
|
||||
|
||||
impl FunctionContext {
|
||||
/// Create a mock [`FunctionContext`] for test.
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
#[cfg(test)]
|
||||
pub fn mock() -> Self {
|
||||
Self {
|
||||
query_ctx: QueryContextBuilder::default().build().into(),
|
||||
@@ -56,8 +56,10 @@ pub trait Function: fmt::Display + Sync + Send {
|
||||
/// Returns the name of the function, should be unique.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// The returned data type of function execution.
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType>;
|
||||
|
||||
/// The signature of function.
|
||||
fn signature(&self) -> Signature;
|
||||
|
||||
/// Evaluate the function, e.g. run/execute the function.
|
||||
@@ -65,3 +67,22 @@ pub trait Function: fmt::Display + Sync + Send {
|
||||
}
|
||||
|
||||
pub type FunctionRef = Arc<dyn Function>;
|
||||
|
||||
/// Async Scalar function trait
|
||||
#[async_trait::async_trait]
|
||||
pub trait AsyncFunction: fmt::Display + Sync + Send {
|
||||
/// Returns the name of the function, should be unique.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// The returned data type of function execution.
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType>;
|
||||
|
||||
/// The signature of function.
|
||||
fn signature(&self) -> Signature;
|
||||
|
||||
/// Evaluate the function, e.g. run/execute the function.
|
||||
/// TODO(dennis): simplify the signature and refactor all the admin functions.
|
||||
async fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef>;
|
||||
}
|
||||
|
||||
pub type AsyncFunctionRef = Arc<dyn AsyncFunction>;
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::{Arc, RwLock};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::function::FunctionRef;
|
||||
use crate::function::{AsyncFunctionRef, FunctionRef};
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
@@ -32,6 +32,7 @@ use crate::table::TableFunction;
|
||||
#[derive(Default)]
|
||||
pub struct FunctionRegistry {
|
||||
functions: RwLock<HashMap<String, FunctionRef>>,
|
||||
async_functions: RwLock<HashMap<String, AsyncFunctionRef>>,
|
||||
aggregate_functions: RwLock<HashMap<String, AggregateFunctionMetaRef>>,
|
||||
}
|
||||
|
||||
@@ -44,6 +45,27 @@ impl FunctionRegistry {
|
||||
.insert(func.name().to_string(), func);
|
||||
}
|
||||
|
||||
pub fn register_async(&self, func: AsyncFunctionRef) {
|
||||
let _ = self
|
||||
.async_functions
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(func.name().to_string(), func);
|
||||
}
|
||||
|
||||
pub fn get_async_function(&self, name: &str) -> Option<AsyncFunctionRef> {
|
||||
self.async_functions.read().unwrap().get(name).cloned()
|
||||
}
|
||||
|
||||
pub fn async_functions(&self) -> Vec<AsyncFunctionRef> {
|
||||
self.async_functions
|
||||
.read()
|
||||
.unwrap()
|
||||
.values()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
|
||||
let _ = self
|
||||
.aggregate_functions
|
||||
@@ -94,6 +116,10 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
SystemFunction::register(&function_registry);
|
||||
TableFunction::register(&function_registry);
|
||||
|
||||
// Geo functions
|
||||
#[cfg(feature = "geo")]
|
||||
crate::scalars::geo::GeoFunctions::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
pub mod aggregate;
|
||||
pub(crate) mod date;
|
||||
pub mod expression;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod matches;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
|
||||
@@ -75,7 +75,7 @@ where
|
||||
// to keep the not_greater length == floor+1
|
||||
// so to ensure the peek of the not_greater is array[floor]
|
||||
// and the peek of the greater is array[floor+1]
|
||||
let p = if let Some(p) = self.p { p } else { 0.0_f64 };
|
||||
let p = self.p.unwrap_or(0.0_f64);
|
||||
let floor = (((self.n - 1) as f64) * p / (100_f64)).floor();
|
||||
if value <= *self.not_greater.peek().unwrap() {
|
||||
self.not_greater.push(value);
|
||||
|
||||
31
src/common/function/src/scalars/geo.rs
Normal file
31
src/common/function/src/scalars/geo.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod geohash;
|
||||
mod h3;
|
||||
|
||||
use geohash::GeohashFunction;
|
||||
use h3::H3Function;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct GeoFunctions;
|
||||
|
||||
impl GeoFunctions {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(GeohashFunction));
|
||||
registry.register(Arc::new(H3Function));
|
||||
}
|
||||
}
|
||||
135
src/common/function/src/scalars/geo/geohash.rs
Normal file
135
src/common/function/src/scalars/geo/geohash.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use geohash::Coord;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Function that return geohash string for a given geospatial coordinate.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GeohashFunction;
|
||||
|
||||
const NAME: &str = "geohash";
|
||||
|
||||
impl Function for GeohashFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in &[
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
// longitude
|
||||
coord_type.clone(),
|
||||
// resolution
|
||||
resolution_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
let resolution_vec = &columns[2];
|
||||
|
||||
let size = lat_vec.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let lat = lat_vec.get(i).as_f64_lossy();
|
||||
let lon = lon_vec.get(i).as_f64_lossy();
|
||||
let r = match resolution_vec.get(i) {
|
||||
Value::Int8(v) => v as usize,
|
||||
Value::Int16(v) => v as usize,
|
||||
Value::Int32(v) => v as usize,
|
||||
Value::Int64(v) => v as usize,
|
||||
Value::UInt8(v) => v as usize,
|
||||
Value::UInt16(v) => v as usize,
|
||||
Value::UInt32(v) => v as usize,
|
||||
Value::UInt64(v) => v as usize,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let result = match (lat, lon) {
|
||||
(Some(lat), Some(lon)) => {
|
||||
let coord = Coord { x: lon, y: lat };
|
||||
let encoded = geohash::encode(coord, r)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("Geohash error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
Some(encoded)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GeohashFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME)
|
||||
}
|
||||
}
|
||||
143
src/common/function/src/scalars/geo/h3.rs
Normal file
143
src/common/function/src/scalars/geo/h3.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use h3o::{LatLng, Resolution};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Function that return h3 encoding string for a given geospatial coordinate.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct H3Function;
|
||||
|
||||
const NAME: &str = "h3";
|
||||
|
||||
impl Function for H3Function {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in &[
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
// longitude
|
||||
coord_type.clone(),
|
||||
// resolution
|
||||
resolution_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
let resolution_vec = &columns[2];
|
||||
|
||||
let size = lat_vec.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let lat = lat_vec.get(i).as_f64_lossy();
|
||||
let lon = lon_vec.get(i).as_f64_lossy();
|
||||
let r = match resolution_vec.get(i) {
|
||||
Value::Int8(v) => v as u8,
|
||||
Value::Int16(v) => v as u8,
|
||||
Value::Int32(v) => v as u8,
|
||||
Value::Int64(v) => v as u8,
|
||||
Value::UInt8(v) => v,
|
||||
Value::UInt16(v) => v as u8,
|
||||
Value::UInt32(v) => v as u8,
|
||||
Value::UInt64(v) => v as u8,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let result = match (lat, lon) {
|
||||
(Some(lat), Some(lon)) => {
|
||||
let coord = LatLng::new(lat, lon)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
let r = Resolution::try_from(r as u8)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
let encoded = coord.to_cell(r).to_string();
|
||||
Some(encoded)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for H3Function {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME)
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ pub struct FunctionState {
|
||||
|
||||
impl FunctionState {
|
||||
/// Create a mock [`FunctionState`] for test.
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
#[cfg(test)]
|
||||
pub fn mock() -> Self {
|
||||
use std::sync::Arc;
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ mod version;
|
||||
use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::DatabaseFunction;
|
||||
use database::{CurrentSchemaFunction, DatabaseFunction};
|
||||
use pg_catalog::PGCatalogFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
@@ -37,8 +37,9 @@ impl SystemFunction {
|
||||
registry.register(Arc::new(BuildFunction));
|
||||
registry.register(Arc::new(VersionFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(CurrentSchemaFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register(Arc::new(ProcedureStateFunction));
|
||||
registry.register_async(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,11 +26,35 @@ use crate::function::{Function, FunctionContext};
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DatabaseFunction;
|
||||
|
||||
const NAME: &str = "database";
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct CurrentSchemaFunction;
|
||||
|
||||
const DATABASE_FUNCTION_NAME: &str = "database";
|
||||
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
|
||||
impl Function for DatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
DATABASE_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(0, vec![], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let db = func_ctx.query_ctx.current_schema();
|
||||
|
||||
Ok(Arc::new(StringVector::from_slice(&[&db])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for CurrentSchemaFunction {
|
||||
fn name(&self) -> &str {
|
||||
CURRENT_SCHEMA_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
@@ -54,6 +78,12 @@ impl fmt::Display for DatabaseFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CurrentSchemaFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "CURRENT_SCHEMA")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -14,11 +14,13 @@
|
||||
|
||||
mod pg_get_userbyid;
|
||||
mod table_is_visible;
|
||||
mod version;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use pg_get_userbyid::PGGetUserByIdFunction;
|
||||
use table_is_visible::PGTableIsVisibleFunction;
|
||||
use version::PGVersionFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -35,5 +37,6 @@ impl PGCatalogFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(PGTableIsVisibleFunction));
|
||||
registry.register(Arc::new(PGGetUserByIdFunction));
|
||||
registry.register(Arc::new(PGVersionFunction));
|
||||
}
|
||||
}
|
||||
|
||||
54
src/common/function/src/system/pg_catalog/version.rs
Normal file
54
src/common/function/src/system/pg_catalog/version.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{env, fmt};
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::vectors::{StringVector, VectorRef};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub(crate) struct PGVersionFunction;
|
||||
|
||||
impl fmt::Display for PGVersionFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, crate::pg_catalog_func_fullname!("VERSION"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for PGVersionFunction {
|
||||
fn name(&self) -> &str {
|
||||
crate::pg_catalog_func_fullname!("version")
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(vec![], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let result = StringVector::from(vec![format!(
|
||||
"PostgreSQL 16.3 GreptimeDB {}",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)]);
|
||||
Ok(Arc::new(result))
|
||||
}
|
||||
}
|
||||
@@ -96,7 +96,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_procedure_state_misc() {
|
||||
@@ -114,8 +114,8 @@ mod tests {
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_procedure_service() {
|
||||
#[tokio::test]
|
||||
async fn test_missing_procedure_service() {
|
||||
let f = ProcedureStateFunction;
|
||||
|
||||
let args = vec!["pid"];
|
||||
@@ -125,15 +125,15 @@ mod tests {
|
||||
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing ProcedureServiceHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_procedure_state() {
|
||||
#[tokio::test]
|
||||
async fn test_procedure_state() {
|
||||
let f = ProcedureStateFunction;
|
||||
|
||||
let args = vec!["pid"];
|
||||
@@ -143,7 +143,7 @@ mod tests {
|
||||
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).unwrap();
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec![
|
||||
"{\"status\":\"Done\",\"error\":\"OK\"}",
|
||||
|
||||
@@ -31,11 +31,11 @@ pub(crate) struct TableFunction;
|
||||
impl TableFunction {
|
||||
/// Register all table functions to [`FunctionRegistry`].
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(MigrateRegionFunction));
|
||||
registry.register(Arc::new(FlushRegionFunction));
|
||||
registry.register(Arc::new(CompactRegionFunction));
|
||||
registry.register(Arc::new(FlushTableFunction));
|
||||
registry.register(Arc::new(CompactTableFunction));
|
||||
registry.register(Arc::new(FlushFlowFunction));
|
||||
registry.register_async(Arc::new(MigrateRegionFunction));
|
||||
registry.register_async(Arc::new(FlushRegionFunction));
|
||||
registry.register_async(Arc::new(CompactRegionFunction));
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
registry.register_async(Arc::new(CompactTableFunction));
|
||||
registry.register_async(Arc::new(FlushFlowFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ mod tests {
|
||||
use datatypes::vectors::UInt64Vector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
macro_rules! define_region_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
@@ -97,8 +97,8 @@ mod tests {
|
||||
} if valid_types == ConcreteDataType::numerics()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name _missing_table_mutation>]() {
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name _missing_table_mutation>]() {
|
||||
let f = $func;
|
||||
|
||||
let args = vec![99];
|
||||
@@ -108,15 +108,15 @@ mod tests {
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing TableMutationHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name>]() {
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name>]() {
|
||||
let f = $func;
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ mod tests {
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).unwrap();
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
@@ -33,6 +33,8 @@ use crate::handlers::TableMutationHandlerRef;
|
||||
|
||||
/// Compact type: strict window.
|
||||
const COMPACT_TYPE_STRICT_WINDOW: &str = "strict_window";
|
||||
/// Compact type: strict window (short name).
|
||||
const COMPACT_TYPE_STRICT_WINDOW_SHORT: &str = "swcs";
|
||||
|
||||
#[admin_fn(
|
||||
name = FlushTableFunction,
|
||||
@@ -168,8 +170,12 @@ fn parse_compact_params(
|
||||
})
|
||||
}
|
||||
|
||||
/// Parses compaction strategy type. For `strict_window` or `swcs` strict window compaction is chose,
|
||||
/// otherwise choose regular (TWCS) compaction.
|
||||
fn parse_compact_type(type_str: &str, option: Option<&str>) -> Result<compact_request::Options> {
|
||||
if type_str.eq_ignore_ascii_case(COMPACT_TYPE_STRICT_WINDOW) {
|
||||
if type_str.eq_ignore_ascii_case(COMPACT_TYPE_STRICT_WINDOW)
|
||||
| type_str.eq_ignore_ascii_case(COMPACT_TYPE_STRICT_WINDOW_SHORT)
|
||||
{
|
||||
let window_seconds = option
|
||||
.map(|v| {
|
||||
i64::from_str(v).map_err(|_| {
|
||||
@@ -204,7 +210,7 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
macro_rules! define_table_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
@@ -224,8 +230,8 @@ mod tests {
|
||||
} if valid_types == vec![ConcreteDataType::string_datatype()]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name _missing_table_mutation>]() {
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name _missing_table_mutation>]() {
|
||||
let f = $func;
|
||||
|
||||
let args = vec!["test"];
|
||||
@@ -235,15 +241,15 @@ mod tests {
|
||||
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing TableMutationHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn [<test_ $name>]() {
|
||||
#[tokio::test]
|
||||
async fn [<test_ $name>]() {
|
||||
let f = $func;
|
||||
|
||||
|
||||
@@ -254,7 +260,7 @@ mod tests {
|
||||
.map(|arg| Arc::new(StringVector::from(vec![arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).unwrap();
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([42]));
|
||||
assert_eq!(expect, result);
|
||||
@@ -350,6 +356,17 @@ mod tests {
|
||||
compact_options: Options::Regular(Default::default()),
|
||||
},
|
||||
),
|
||||
(
|
||||
&["table", "swcs", "120"],
|
||||
CompactTableRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "table".to_string(),
|
||||
compact_options: Options::StrictWindow(StrictWindow {
|
||||
window_seconds: 120,
|
||||
}),
|
||||
},
|
||||
),
|
||||
]);
|
||||
|
||||
assert!(parse_compact_params(
|
||||
|
||||
@@ -123,7 +123,7 @@ mod tests {
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_migrate_region_misc() {
|
||||
@@ -140,8 +140,8 @@ mod tests {
|
||||
} if sigs.len() == 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_procedure_service() {
|
||||
#[tokio::test]
|
||||
async fn test_missing_procedure_service() {
|
||||
let f = MigrateRegionFunction;
|
||||
|
||||
let args = vec![1, 1, 1];
|
||||
@@ -151,15 +151,15 @@ mod tests {
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
let result = f.eval(FunctionContext::default(), &args).await.unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing ProcedureServiceHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrate_region() {
|
||||
#[tokio::test]
|
||||
async fn test_migrate_region() {
|
||||
let f = MigrateRegionFunction;
|
||||
|
||||
let args = vec![1, 1, 1];
|
||||
@@ -169,7 +169,7 @@ mod tests {
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).unwrap();
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
@@ -187,7 +187,8 @@ fn build_struct(
|
||||
}
|
||||
|
||||
|
||||
impl crate::function::Function for #name {
|
||||
#[async_trait::async_trait]
|
||||
impl crate::function::AsyncFunction for #name {
|
||||
fn name(&self) -> &'static str {
|
||||
#display_name
|
||||
}
|
||||
@@ -200,7 +201,7 @@ fn build_struct(
|
||||
#sig_fn()
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: crate::function::FunctionContext, columns: &[datatypes::vectors::VectorRef]) -> common_query::error::Result<datatypes::vectors::VectorRef> {
|
||||
async fn eval(&self, func_ctx: crate::function::FunctionContext, columns: &[datatypes::vectors::VectorRef]) -> common_query::error::Result<datatypes::vectors::VectorRef> {
|
||||
// Ensure under the `greptime` catalog for security
|
||||
crate::ensure_greptime!(func_ctx);
|
||||
|
||||
@@ -212,51 +213,36 @@ fn build_struct(
|
||||
};
|
||||
let columns = Vec::from(columns);
|
||||
|
||||
// TODO(dennis): DataFusion doesn't support async UDF currently
|
||||
std::thread::spawn(move || {
|
||||
use snafu::OptionExt;
|
||||
use datatypes::data_type::DataType;
|
||||
use snafu::OptionExt;
|
||||
use datatypes::data_type::DataType;
|
||||
|
||||
let query_ctx = &func_ctx.query_ctx;
|
||||
let handler = func_ctx
|
||||
.state
|
||||
.#handler
|
||||
.as_ref()
|
||||
.context(#snafu_type)?;
|
||||
let query_ctx = &func_ctx.query_ctx;
|
||||
let handler = func_ctx
|
||||
.state
|
||||
.#handler
|
||||
.as_ref()
|
||||
.context(#snafu_type)?;
|
||||
|
||||
let mut builder = store_api::storage::ConcreteDataType::#ret()
|
||||
.create_mutable_vector(rows_num);
|
||||
let mut builder = store_api::storage::ConcreteDataType::#ret()
|
||||
.create_mutable_vector(rows_num);
|
||||
|
||||
if columns_num == 0 {
|
||||
let result = common_runtime::block_on_global(async move {
|
||||
#fn_name(handler, query_ctx, &[]).await
|
||||
})?;
|
||||
if columns_num == 0 {
|
||||
let result = #fn_name(handler, query_ctx, &[]).await?;
|
||||
|
||||
builder.push_value_ref(result.as_value_ref());
|
||||
} else {
|
||||
for i in 0..rows_num {
|
||||
let args: Vec<_> = columns.iter()
|
||||
.map(|vector| vector.get_ref(i))
|
||||
.collect();
|
||||
|
||||
let result = #fn_name(handler, query_ctx, &args).await?;
|
||||
|
||||
builder.push_value_ref(result.as_value_ref());
|
||||
} else {
|
||||
for i in 0..rows_num {
|
||||
let args: Vec<_> = columns.iter()
|
||||
.map(|vector| vector.get_ref(i))
|
||||
.collect();
|
||||
|
||||
let result = common_runtime::block_on_global(async move {
|
||||
#fn_name(handler, query_ctx, &args).await
|
||||
})?;
|
||||
|
||||
builder.push_value_ref(result.as_value_ref());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(builder.to_vector())
|
||||
})
|
||||
.join()
|
||||
.map_err(|e| {
|
||||
common_telemetry::error!(e; "Join thread error");
|
||||
common_query::error::Error::ThreadJoin {
|
||||
location: snafu::Location::default(),
|
||||
}
|
||||
})?
|
||||
|
||||
Ok(builder.to_vector())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` or `FlowServiceHandlerRef` as the first argument,
|
||||
/// - `&QueryContextRef` as the second argument, and
|
||||
/// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row.
|
||||
///
|
||||
/// Return type must be `common_query::error::Result<Value>`.
|
||||
///
|
||||
/// # Example see `common/function/src/system/procedure_state.rs`.
|
||||
|
||||
@@ -15,8 +15,8 @@ tempfile = "3.4"
|
||||
tokio.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats"] }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies.tikv-jemalloc-sys]
|
||||
features = ["stats", "profiling", "unprefixed_malloc_on_supported_platforms"]
|
||||
version = "0.5"
|
||||
version = "0.6"
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -56,6 +57,7 @@ store-api.workspace = true
|
||||
strum.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-postgres = { workspace = true, optional = true }
|
||||
tonic.workspace = true
|
||||
typetag = "0.2"
|
||||
|
||||
|
||||
@@ -227,7 +227,7 @@ impl Procedure for DropTableProcedure {
|
||||
}
|
||||
|
||||
fn rollback_supported(&self) -> bool {
|
||||
!matches!(self.data.state, DropTableState::Prepare)
|
||||
!matches!(self.data.state, DropTableState::Prepare) && self.data.allow_rollback
|
||||
}
|
||||
|
||||
async fn rollback(&mut self, _: &ProcedureContext) -> ProcedureResult<()> {
|
||||
@@ -256,6 +256,8 @@ pub struct DropTableData {
|
||||
pub task: DropTableTask,
|
||||
pub physical_region_routes: Vec<RegionRoute>,
|
||||
pub physical_table_id: Option<TableId>,
|
||||
#[serde(default)]
|
||||
pub allow_rollback: bool,
|
||||
}
|
||||
|
||||
impl DropTableData {
|
||||
@@ -266,6 +268,7 @@ impl DropTableData {
|
||||
task,
|
||||
physical_region_routes: vec![],
|
||||
physical_table_id: None,
|
||||
allow_rollback: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::OptionExt;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::error::Result;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
impl DropTableProcedure {
|
||||
/// Fetches the table info and physical table route.
|
||||
@@ -29,6 +33,23 @@ impl DropTableProcedure {
|
||||
self.data.physical_region_routes = physical_table_route_value.region_routes;
|
||||
self.data.physical_table_id = Some(physical_table_id);
|
||||
|
||||
if physical_table_id == self.data.table_id() {
|
||||
let table_info_value = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(task.table_id)
|
||||
.await?
|
||||
.with_context(|| error::TableInfoNotFoundSnafu {
|
||||
table: format_full_table_name(&task.catalog, &task.schema, &task.table),
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let engine = table_info_value.table_info.meta.engine;
|
||||
// rollback only if dropping the metric physical table fails
|
||||
self.data.allow_rollback = engine.as_str() == METRIC_ENGINE_NAME
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ use std::collections::HashMap;
|
||||
use api::v1::meta::Partition;
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_procedure::Status;
|
||||
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
@@ -130,6 +131,11 @@ pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
|
||||
.time_index("ts")
|
||||
.primary_keys(["host".into()])
|
||||
.table_name(name)
|
||||
.engine(METRIC_ENGINE_NAME)
|
||||
.table_options(HashMap::from([(
|
||||
LOGICAL_TABLE_METADATA_KEY.to_string(),
|
||||
"phy".to_string(),
|
||||
)]))
|
||||
.build()
|
||||
.unwrap()
|
||||
.into();
|
||||
@@ -166,6 +172,7 @@ pub fn test_create_physical_table_task(name: &str) -> CreateTableTask {
|
||||
.time_index("ts")
|
||||
.primary_keys(["value".into()])
|
||||
.table_name(name)
|
||||
.engine(METRIC_ENGINE_NAME)
|
||||
.build()
|
||||
.unwrap()
|
||||
.into();
|
||||
|
||||
@@ -127,7 +127,7 @@ pub fn build_raw_table_info_from_expr(expr: &CreateTableExpr) -> RawTableInfo {
|
||||
engine: expr.engine.to_string(),
|
||||
next_column_id: expr.column_defs.len() as u32,
|
||||
region_numbers: vec![],
|
||||
options: TableOptions::default(),
|
||||
options: TableOptions::try_from_iter(&expr.table_options).unwrap(),
|
||||
created_on: DateTime::default(),
|
||||
partition_key_indices: vec![],
|
||||
},
|
||||
|
||||
@@ -91,6 +91,7 @@ async fn test_on_prepare_table() {
|
||||
// Drop if exists
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(!procedure.rollback_supported());
|
||||
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
@@ -224,9 +225,12 @@ async fn test_on_rollback() {
|
||||
let task = new_drop_table_task("phy_table", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(procedure.rollback_supported());
|
||||
procedure.on_delete_metadata().await.unwrap();
|
||||
assert!(procedure.rollback_supported());
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
// Rollback again
|
||||
assert!(procedure.rollback_supported());
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
let kvs = kv_backend.dump();
|
||||
assert_eq!(kvs, expected_kvs);
|
||||
@@ -236,12 +240,7 @@ async fn test_on_rollback() {
|
||||
let task = new_drop_table_task("foo", table_ids[0], false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_delete_metadata().await.unwrap();
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
// Rollback again
|
||||
procedure.rollback(&ctx).await.unwrap();
|
||||
let kvs = kv_backend.dump();
|
||||
assert_eq!(kvs, expected_kvs);
|
||||
assert!(!procedure.rollback_supported());
|
||||
}
|
||||
|
||||
fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool) -> DropTableTask {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::Error as ProcedureError;
|
||||
use snafu::{ensure, location, Location, OptionExt};
|
||||
use snafu::{ensure, location, OptionExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
use table::metadata::TableId;
|
||||
|
||||
|
||||
@@ -499,6 +499,13 @@ pub enum Error {
|
||||
error: rskafka::client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create TLS Config"))]
|
||||
TlsConfig {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_wal::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to resolve Kafka broker endpoint."))]
|
||||
ResolveKafkaEndpoint { source: common_wal::error::Error },
|
||||
|
||||
@@ -636,6 +643,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse {} from str to utf8", name))]
|
||||
StrFromUtf8 {
|
||||
name: String,
|
||||
#[snafu(source)]
|
||||
error: std::str::Utf8Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Value not exists"))]
|
||||
ValueNotExist {
|
||||
#[snafu(implicit)]
|
||||
@@ -644,6 +660,24 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to get cache"))]
|
||||
GetCache { source: Arc<Error> },
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to execute via Postgres"))]
|
||||
PostgresExecution {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -697,7 +731,8 @@ impl ErrorExt for Error {
|
||||
| UnexpectedLogicalRouteTable { .. }
|
||||
| ProcedureOutput { .. }
|
||||
| FromUtf8 { .. }
|
||||
| MetadataCorruption { .. } => StatusCode::Unexpected,
|
||||
| MetadataCorruption { .. }
|
||||
| StrFromUtf8 { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } | RenameTable { .. } => {
|
||||
StatusCode::Internal
|
||||
@@ -714,7 +749,8 @@ impl ErrorExt for Error {
|
||||
| AlterLogicalTablesInvalidArguments { .. }
|
||||
| CreateLogicalTablesInvalidArguments { .. }
|
||||
| MismatchPrefix { .. }
|
||||
| DelimiterNotFound { .. } => StatusCode::InvalidArguments,
|
||||
| DelimiterNotFound { .. }
|
||||
| TlsConfig { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
FlowRouteNotFound { .. } => StatusCode::Unexpected,
|
||||
@@ -741,6 +777,11 @@ impl ErrorExt for Error {
|
||||
| ParseNum { .. }
|
||||
| InvalidRole { .. }
|
||||
| EmptyDdlTasks { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
PostgresExecution { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
ConnectPostgres { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -153,6 +153,9 @@ pub struct UpgradeRegion {
|
||||
/// it's helpful to verify whether the leader region is ready.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub wait_for_replay_timeout: Option<Duration>,
|
||||
/// The hint for replaying memtable.
|
||||
#[serde(default)]
|
||||
pub location_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
|
||||
@@ -31,6 +31,8 @@ use crate::rpc::KeyValue;
|
||||
pub mod chroot;
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub mod postgres;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
|
||||
626
src/common/meta/src/kv_backend/postgres.rs
Normal file
626
src/common/meta/src/kv_backend/postgres.rs
Normal file
@@ -0,0 +1,626 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use snafu::ResultExt;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{Client, NoTls};
|
||||
|
||||
use super::{KvBackend, TxnService};
|
||||
use crate::error::{ConnectPostgresSnafu, Error, PostgresExecutionSnafu, Result, StrFromUtf8Snafu};
|
||||
use crate::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
/// Posgres backend store for metasrv
|
||||
pub struct PgStore {
|
||||
// TODO: Consider using sqlx crate.
|
||||
client: Client,
|
||||
}
|
||||
|
||||
const EMPTY: &[u8] = &[0];
|
||||
|
||||
// TODO: allow users to configure metadata table name.
|
||||
const METADKV_CREATION: &str =
|
||||
"CREATE TABLE IF NOT EXISTS greptime_metakv(k varchar PRIMARY KEY, v varchar)";
|
||||
|
||||
const FULL_TABLE_SCAN: &str = "SELECT k, v FROM greptime_metakv $1 ORDER BY K";
|
||||
|
||||
const POINT_GET: &str = "SELECT k, v FROM greptime_metakv WHERE k = $1";
|
||||
|
||||
const PREFIX_SCAN: &str = "SELECT k, v FROM greptime_metakv WHERE k LIKE $1 ORDER BY K";
|
||||
|
||||
const RANGE_SCAN_LEFT_BOUNDED: &str = "SELECT k, v FROM greptime_metakv WHERE k >= $1 ORDER BY K";
|
||||
|
||||
const RANGE_SCAN_FULL_RANGE: &str =
|
||||
"SELECT k, v FROM greptime_metakv WHERE k >= $1 AND K < $2 ORDER BY K";
|
||||
|
||||
const FULL_TABLE_DELETE: &str = "DELETE FROM greptime_metakv RETURNING k,v";
|
||||
|
||||
const POINT_DELETE: &str = "DELETE FROM greptime_metakv WHERE K = $1 RETURNING k,v;";
|
||||
|
||||
const PREFIX_DELETE: &str = "DELETE FROM greptime_metakv WHERE k LIKE $1 RETURNING k,v;";
|
||||
|
||||
const RANGE_DELETE_LEFT_BOUNDED: &str = "DELETE FROM greptime_metakv WHERE k >= $1 RETURNING k,v;";
|
||||
|
||||
const RANGE_DELETE_FULL_RANGE: &str =
|
||||
"DELETE FROM greptime_metakv WHERE k >= $1 AND K < $2 RETURNING k,v;";
|
||||
|
||||
const CAS: &str = r#"
|
||||
WITH prev AS (
|
||||
SELECT k,v FROM greptime_metakv WHERE k = $1 AND v = $2
|
||||
), update AS (
|
||||
UPDATE greptime_metakv
|
||||
SET k=$1,
|
||||
v=$2
|
||||
WHERE
|
||||
k=$1 AND v=$3
|
||||
)
|
||||
|
||||
SELECT k, v FROM prev;
|
||||
"#;
|
||||
|
||||
const PUT_IF_NOT_EXISTS: &str = r#"
|
||||
WITH prev AS (
|
||||
select k,v from greptime_metakv where k = $1
|
||||
), insert AS (
|
||||
INSERT INTO greptime_metakv
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (k) DO NOTHING
|
||||
)
|
||||
|
||||
SELECT k, v FROM prev;"#;
|
||||
|
||||
impl PgStore {
|
||||
/// Create pgstore impl of KvBackendRef from url.
|
||||
pub async fn with_url(url: &str) -> Result<KvBackendRef> {
|
||||
// TODO: support tls.
|
||||
let (client, conn) = tokio_postgres::connect(url, NoTls)
|
||||
.await
|
||||
.context(ConnectPostgresSnafu)?;
|
||||
tokio::spawn(async move { conn.await.context(ConnectPostgresSnafu) });
|
||||
Self::with_pg_client(client).await
|
||||
}
|
||||
|
||||
/// Create pgstore impl of KvBackendRef from tokio-postgres client.
|
||||
pub async fn with_pg_client(client: Client) -> Result<KvBackendRef> {
|
||||
// This step ensures the postgres metadata backend is ready to use.
|
||||
// We check if greptime_metakv table exists, and we will create a new table
|
||||
// if it does not exist.
|
||||
client
|
||||
.execute(METADKV_CREATION, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
Ok(Arc::new(Self { client }))
|
||||
}
|
||||
|
||||
async fn put_if_not_exists(&self, key: &str, value: &str) -> Result<bool> {
|
||||
let res = self
|
||||
.client
|
||||
.query(PUT_IF_NOT_EXISTS, &[&key, &value])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
Ok(res.is_empty())
|
||||
}
|
||||
}
|
||||
|
||||
fn select_range_template(req: &RangeRequest) -> &str {
|
||||
if req.range_end.is_empty() {
|
||||
return POINT_GET;
|
||||
}
|
||||
if req.key == EMPTY && req.range_end == EMPTY {
|
||||
FULL_TABLE_SCAN
|
||||
} else if req.range_end == EMPTY {
|
||||
RANGE_SCAN_LEFT_BOUNDED
|
||||
} else if is_prefix_range(&req.key, &req.range_end) {
|
||||
PREFIX_SCAN
|
||||
} else {
|
||||
RANGE_SCAN_FULL_RANGE
|
||||
}
|
||||
}
|
||||
|
||||
fn select_range_delete_template(req: &DeleteRangeRequest) -> &str {
|
||||
if req.range_end.is_empty() {
|
||||
return POINT_DELETE;
|
||||
}
|
||||
if req.key == EMPTY && req.range_end == EMPTY {
|
||||
FULL_TABLE_DELETE
|
||||
} else if req.range_end == EMPTY {
|
||||
RANGE_DELETE_LEFT_BOUNDED
|
||||
} else if is_prefix_range(&req.key, &req.range_end) {
|
||||
PREFIX_DELETE
|
||||
} else {
|
||||
RANGE_DELETE_FULL_RANGE
|
||||
}
|
||||
}
|
||||
|
||||
// Generate dynamic parameterized sql for batch get.
|
||||
fn generate_batch_get_query(key_len: usize) -> String {
|
||||
let in_placeholders: Vec<String> = (1..=key_len).map(|i| format!("${}", i)).collect();
|
||||
let in_clause = in_placeholders.join(", ");
|
||||
format!(
|
||||
"SELECT k, v FROM greptime_metakv WHERE k in ({});",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
// Generate dynamic parameterized sql for batch delete.
|
||||
fn generate_batch_delete_query(key_len: usize) -> String {
|
||||
let in_placeholders: Vec<String> = (1..=key_len).map(|i| format!("${}", i)).collect();
|
||||
let in_clause = in_placeholders.join(", ");
|
||||
format!(
|
||||
"DELETE FROM greptime_metakv WHERE k in ({}) RETURNING k, v;",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
// Generate dynamic parameterized sql for batch upsert.
|
||||
fn generate_batch_upsert_query(kv_len: usize) -> String {
|
||||
let in_placeholders: Vec<String> = (1..=kv_len).map(|i| format!("${}", i)).collect();
|
||||
let in_clause = in_placeholders.join(", ");
|
||||
let mut param_index = kv_len + 1;
|
||||
let mut values_placeholders = Vec::new();
|
||||
for _ in 0..kv_len {
|
||||
values_placeholders.push(format!("(${0}, ${1})", param_index, param_index + 1));
|
||||
param_index += 2;
|
||||
}
|
||||
let values_clause = values_placeholders.join(", ");
|
||||
|
||||
format!(
|
||||
r#"
|
||||
WITH prev AS (
|
||||
SELECT k,v FROM greptime_metakv WHERE k IN ({in_clause})
|
||||
), update AS (
|
||||
INSERT INTO greptime_metakv (k, v) VALUES
|
||||
{values_clause}
|
||||
ON CONFLICT (
|
||||
k
|
||||
) DO UPDATE SET
|
||||
v = excluded.v
|
||||
)
|
||||
|
||||
SELECT k, v FROM prev;
|
||||
"#
|
||||
)
|
||||
}
|
||||
|
||||
// Trim null byte at the end and convert bytes to string.
|
||||
fn process_bytes<'a>(data: &'a [u8], name: &str) -> Result<&'a str> {
|
||||
let mut len = data.len();
|
||||
// remove trailing null bytes to avoid error in postgres encoding.
|
||||
while len > 0 && data[len - 1] == 0 {
|
||||
len -= 1;
|
||||
}
|
||||
let res = std::str::from_utf8(&data[0..len]).context(StrFromUtf8Snafu { name })?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for PgStore {
|
||||
fn name(&self) -> &str {
|
||||
"Postgres"
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
let mut params = vec![];
|
||||
let template = select_range_template(&req);
|
||||
if req.key != EMPTY {
|
||||
let key = process_bytes(&req.key, "rangeKey")?;
|
||||
if template == PREFIX_SCAN {
|
||||
let prefix = format!("{key}%");
|
||||
params.push(Cow::Owned(prefix))
|
||||
} else {
|
||||
params.push(Cow::Borrowed(key))
|
||||
}
|
||||
}
|
||||
if template == RANGE_SCAN_FULL_RANGE && req.range_end != EMPTY {
|
||||
let range_end = process_bytes(&req.range_end, "rangeEnd")?;
|
||||
params.push(Cow::Borrowed(range_end));
|
||||
}
|
||||
let limit = req.limit as usize;
|
||||
let limit_cause = match limit > 0 {
|
||||
true => format!(" LIMIT {};", limit + 1),
|
||||
false => ";".to_string(),
|
||||
};
|
||||
let template = format!("{}{}", template, limit_cause);
|
||||
let params: Vec<&(dyn ToSql + Sync)> = params
|
||||
.iter()
|
||||
.map(|x| match x {
|
||||
Cow::Borrowed(borrowed) => borrowed as &(dyn ToSql + Sync),
|
||||
Cow::Owned(owned) => owned as &(dyn ToSql + Sync),
|
||||
})
|
||||
.collect();
|
||||
let res = self
|
||||
.client
|
||||
.query(&template, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
let kvs: Vec<KeyValue> = res
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let key: String = r.get(0);
|
||||
if req.keys_only {
|
||||
return KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: vec![],
|
||||
};
|
||||
}
|
||||
let value: String = r.get(1);
|
||||
KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: value.into_bytes(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
if limit == 0 || limit > kvs.len() {
|
||||
return Ok(RangeResponse { kvs, more: false });
|
||||
}
|
||||
let (filtered_kvs, _) = kvs.split_at(limit);
|
||||
Ok(RangeResponse {
|
||||
kvs: filtered_kvs.to_vec(),
|
||||
more: kvs.len() > limit,
|
||||
})
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
let kv = KeyValue {
|
||||
key: req.key,
|
||||
value: req.value,
|
||||
};
|
||||
let mut res = self
|
||||
.batch_put(BatchPutRequest {
|
||||
kvs: vec![kv],
|
||||
prev_kv: req.prev_kv,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !res.prev_kvs.is_empty() {
|
||||
return Ok(PutResponse {
|
||||
prev_kv: Some(res.prev_kvs.remove(0)),
|
||||
});
|
||||
}
|
||||
Ok(PutResponse { prev_kv: None })
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let mut in_params = Vec::with_capacity(req.kvs.len());
|
||||
let mut values_params = Vec::with_capacity(req.kvs.len() * 2);
|
||||
|
||||
for kv in &req.kvs {
|
||||
let processed_key = process_bytes(&kv.key, "BatchPutRequestKey")?;
|
||||
in_params.push(processed_key);
|
||||
|
||||
let processed_value = process_bytes(&kv.value, "BatchPutRequestValue")?;
|
||||
values_params.push(processed_key);
|
||||
values_params.push(processed_value);
|
||||
}
|
||||
in_params.extend(values_params);
|
||||
let params: Vec<&(dyn ToSql + Sync)> =
|
||||
in_params.iter().map(|x| x as &(dyn ToSql + Sync)).collect();
|
||||
|
||||
let query = generate_batch_upsert_query(req.kvs.len());
|
||||
let res = self
|
||||
.client
|
||||
.query(&query, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
if req.prev_kv {
|
||||
let kvs: Vec<KeyValue> = res
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let key: String = r.get(0);
|
||||
let value: String = r.get(1);
|
||||
KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: value.into_bytes(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
if !kvs.is_empty() {
|
||||
return Ok(BatchPutResponse { prev_kvs: kvs });
|
||||
}
|
||||
}
|
||||
Ok(BatchPutResponse { prev_kvs: vec![] })
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
if req.keys.is_empty() {
|
||||
return Ok(BatchGetResponse { kvs: vec![] });
|
||||
}
|
||||
let query = generate_batch_get_query(req.keys.len());
|
||||
let value_params = req
|
||||
.keys
|
||||
.iter()
|
||||
.map(|k| process_bytes(k, "BatchGetRequestKey"))
|
||||
.collect::<Result<Vec<&str>>>()?;
|
||||
let params: Vec<&(dyn ToSql + Sync)> = value_params
|
||||
.iter()
|
||||
.map(|x| x as &(dyn ToSql + Sync))
|
||||
.collect();
|
||||
let res = self
|
||||
.client
|
||||
.query(&query, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
let kvs: Vec<KeyValue> = res
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let key: String = r.get(0);
|
||||
let value: String = r.get(1);
|
||||
KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: value.into_bytes(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(BatchGetResponse { kvs })
|
||||
}
|
||||
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let mut params = vec![];
|
||||
let template = select_range_delete_template(&req);
|
||||
if req.key != EMPTY {
|
||||
let key = process_bytes(&req.key, "deleteRangeKey")?;
|
||||
if template == PREFIX_DELETE {
|
||||
let prefix = format!("{key}%");
|
||||
params.push(Cow::Owned(prefix));
|
||||
} else {
|
||||
params.push(Cow::Borrowed(key));
|
||||
}
|
||||
}
|
||||
if template == RANGE_DELETE_FULL_RANGE && req.range_end != EMPTY {
|
||||
let range_end = process_bytes(&req.range_end, "deleteRangeEnd")?;
|
||||
params.push(Cow::Borrowed(range_end));
|
||||
}
|
||||
let params: Vec<&(dyn ToSql + Sync)> = params
|
||||
.iter()
|
||||
.map(|x| match x {
|
||||
Cow::Borrowed(borrowed) => borrowed as &(dyn ToSql + Sync),
|
||||
Cow::Owned(owned) => owned as &(dyn ToSql + Sync),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let res = self
|
||||
.client
|
||||
.query(template, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
let deleted = res.len() as i64;
|
||||
if !req.prev_kv {
|
||||
return Ok({
|
||||
DeleteRangeResponse {
|
||||
deleted,
|
||||
prev_kvs: vec![],
|
||||
}
|
||||
});
|
||||
}
|
||||
let kvs: Vec<KeyValue> = res
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let key: String = r.get(0);
|
||||
let value: String = r.get(1);
|
||||
KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: value.into_bytes(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(DeleteRangeResponse {
|
||||
deleted,
|
||||
prev_kvs: kvs,
|
||||
})
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
if req.keys.is_empty() {
|
||||
return Ok(BatchDeleteResponse { prev_kvs: vec![] });
|
||||
}
|
||||
let query = generate_batch_delete_query(req.keys.len());
|
||||
let value_params = req
|
||||
.keys
|
||||
.iter()
|
||||
.map(|k| process_bytes(k, "BatchDeleteRequestKey"))
|
||||
.collect::<Result<Vec<&str>>>()?;
|
||||
let params: Vec<&(dyn ToSql + Sync)> = value_params
|
||||
.iter()
|
||||
.map(|x| x as &(dyn ToSql + Sync))
|
||||
.collect();
|
||||
let res = self
|
||||
.client
|
||||
.query(&query, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
if !req.prev_kv {
|
||||
return Ok(BatchDeleteResponse { prev_kvs: vec![] });
|
||||
}
|
||||
let kvs: Vec<KeyValue> = res
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let key: String = r.get(0);
|
||||
let value: String = r.get(1);
|
||||
KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: value.into_bytes(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(BatchDeleteResponse { prev_kvs: kvs })
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let key = process_bytes(&req.key, "CASKey")?;
|
||||
let value = process_bytes(&req.value, "CASValue")?;
|
||||
if req.expect.is_empty() {
|
||||
let put_res = self.put_if_not_exists(key, value).await?;
|
||||
return Ok(CompareAndPutResponse {
|
||||
success: put_res,
|
||||
prev_kv: None,
|
||||
});
|
||||
}
|
||||
let expect = process_bytes(&req.expect, "CASExpect")?;
|
||||
|
||||
let res = self
|
||||
.client
|
||||
.query(CAS, &[&key, &value, &expect])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
match res.is_empty() {
|
||||
true => Ok(CompareAndPutResponse {
|
||||
success: false,
|
||||
prev_kv: None,
|
||||
}),
|
||||
false => {
|
||||
let mut kvs: Vec<KeyValue> = res
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let key: String = r.get(0);
|
||||
let value: String = r.get(1);
|
||||
KeyValue {
|
||||
key: key.into_bytes(),
|
||||
value: value.into_bytes(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(CompareAndPutResponse {
|
||||
success: true,
|
||||
prev_kv: Some(kvs.remove(0)),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TxnService for PgStore {
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, _txn: KvTxn) -> Result<KvTxnResponse> {
|
||||
// TODO: implement txn for pg kv backend.
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
unreachable!("postgres backend does not support max_txn_ops!")
|
||||
}
|
||||
}
|
||||
|
||||
fn is_prefix_range(start: &[u8], end: &[u8]) -> bool {
|
||||
if start.len() != end.len() {
|
||||
return false;
|
||||
}
|
||||
let l = start.len();
|
||||
let same_prefix = start[0..l - 1] == end[0..l - 1];
|
||||
if let (Some(rhs), Some(lhs)) = (start.last(), end.last()) {
|
||||
return same_prefix && (*rhs + 1) == *lhs;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
unprepare_kv,
|
||||
};
|
||||
|
||||
async fn build_pg_kv_backend() -> Option<PgStore> {
|
||||
let endpoints = std::env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
|
||||
if endpoints.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (client, connection) = tokio_postgres::connect(&endpoints, NoTls).await.unwrap();
|
||||
tokio::spawn(connection);
|
||||
let _ = client.execute(METADKV_CREATION, &[]).await;
|
||||
Some(PgStore { client })
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
let prefix = b"put/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_put_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
let prefix = b"range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range_2() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
test_kv_range_2_with_prefix(kv_backend, b"range2/".to_vec()).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_get() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
let prefix = b"batchGet/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_get_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_compare_and_put() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
let kv_backend = Arc::new(kv_backend);
|
||||
test_kv_compare_and_put_with_prefix(kv_backend, b"compareAndPut/".to_vec()).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_range() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
let prefix = b"deleteRange/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(kv_backend, prefix.to_vec()).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
if let Some(kv_backend) = build_pg_kv_backend().await {
|
||||
let prefix = b"batchDelete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_delete_with_prefix(kv_backend, prefix.to_vec()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -172,8 +172,8 @@ impl From<TableLock> for StringKey {
|
||||
///
|
||||
/// Note:
|
||||
/// - Allows modification the corresponding region's [TableRouteValue](crate::key::table_route::TableRouteValue),
|
||||
/// [TableDatanodeValue](crate::key::datanode_table::DatanodeTableValue) even if
|
||||
/// it acquires the [RegionLock::Write] only without acquiring the [TableLock::Write].
|
||||
/// [TableDatanodeValue](crate::key::datanode_table::DatanodeTableValue) even if
|
||||
/// it acquires the [RegionLock::Write] only without acquiring the [TableLock::Write].
|
||||
///
|
||||
/// - Should acquire [TableLock] of the table at same procedure.
|
||||
///
|
||||
|
||||
@@ -123,7 +123,7 @@ pub fn prepare_wal_options(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_wal::config::kafka::common::KafkaTopicConfig;
|
||||
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
||||
use common_wal::config::kafka::MetasrvKafkaConfig;
|
||||
use common_wal::test_util::run_test_with_kafka_wal;
|
||||
|
||||
@@ -166,7 +166,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let config = MetasrvKafkaConfig {
|
||||
broker_endpoints,
|
||||
connection: KafkaConnectionConfig {
|
||||
broker_endpoints,
|
||||
..Default::default()
|
||||
},
|
||||
kafka_topic,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -30,7 +30,7 @@ use snafu::{ensure, ResultExt};
|
||||
use crate::error::{
|
||||
BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, BuildKafkaPartitionClientSnafu,
|
||||
CreateKafkaWalTopicSnafu, DecodeJsonSnafu, EncodeJsonSnafu, InvalidNumTopicsSnafu,
|
||||
ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result,
|
||||
ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result, TlsConfigSnafu,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
@@ -76,6 +76,10 @@ impl TopicManager {
|
||||
/// The initializer first tries to restore persisted topics from the kv backend.
|
||||
/// If not enough topics retrieved, the initializer will try to contact the Kafka cluster and request creating more topics.
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
// Skip creating topics.
|
||||
if !self.config.auto_create_topics {
|
||||
return Ok(());
|
||||
}
|
||||
let num_topics = self.config.kafka_topic.num_topics;
|
||||
ensure!(num_topics > 0, InvalidNumTopicsSnafu { num_topics });
|
||||
|
||||
@@ -117,15 +121,22 @@ impl TopicManager {
|
||||
base: self.config.backoff.base as f64,
|
||||
deadline: self.config.backoff.deadline,
|
||||
};
|
||||
let broker_endpoints = common_wal::resolve_to_ipv4(&self.config.broker_endpoints)
|
||||
.await
|
||||
.context(ResolveKafkaEndpointSnafu)?;
|
||||
let client = ClientBuilder::new(broker_endpoints)
|
||||
.backoff_config(backoff_config)
|
||||
let broker_endpoints =
|
||||
common_wal::resolve_to_ipv4(&self.config.connection.broker_endpoints)
|
||||
.await
|
||||
.context(ResolveKafkaEndpointSnafu)?;
|
||||
let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(backoff_config);
|
||||
if let Some(sasl) = &self.config.connection.sasl {
|
||||
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
|
||||
};
|
||||
if let Some(tls) = &self.config.connection.tls {
|
||||
builder = builder.tls_config(tls.to_tls_config().await.context(TlsConfigSnafu)?)
|
||||
};
|
||||
let client = builder
|
||||
.build()
|
||||
.await
|
||||
.with_context(|_| BuildKafkaClientSnafu {
|
||||
broker_endpoints: self.config.broker_endpoints.clone(),
|
||||
broker_endpoints: self.config.connection.broker_endpoints.clone(),
|
||||
})?;
|
||||
|
||||
let control_client = client
|
||||
@@ -242,7 +253,7 @@ impl TopicManager {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_wal::config::kafka::common::KafkaTopicConfig;
|
||||
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
||||
use common_wal::test_util::run_test_with_kafka_wal;
|
||||
|
||||
use super::*;
|
||||
@@ -289,7 +300,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let config = MetasrvKafkaConfig {
|
||||
broker_endpoints,
|
||||
connection: KafkaConnectionConfig {
|
||||
broker_endpoints,
|
||||
..Default::default()
|
||||
},
|
||||
kafka_topic,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -13,7 +13,7 @@ workspace = true
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon = "0.4"
|
||||
backon = "1"
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -51,7 +51,7 @@ const META_TTL: Duration = Duration::from_secs(60 * 10);
|
||||
/// [Notify] is not a condition variable, we can't guarantee the waiters are notified
|
||||
/// if they didn't call `notified()` before we signal the notify. So we
|
||||
/// 1. use dedicated notify for each condition, such as waiting for a lock, waiting
|
||||
/// for children;
|
||||
/// for children;
|
||||
/// 2. always use `notify_one` and ensure there are only one waiter.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ProcedureMeta {
|
||||
|
||||
@@ -373,7 +373,7 @@ impl Runner {
|
||||
procedure,
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
step,
|
||||
exponential_builder: self.exponential_builder.clone(),
|
||||
exponential_builder: self.exponential_builder,
|
||||
store: self.store.clone(),
|
||||
rolling_back: false,
|
||||
};
|
||||
|
||||
@@ -31,4 +31,5 @@ store-api.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-base.workspace = true
|
||||
futures-util.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user