mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
241 Commits
release/v0
...
v0.14.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e817a65d75 | ||
|
|
41814bb49f | ||
|
|
1e394af583 | ||
|
|
a9065f5319 | ||
|
|
b8c6f1c8ed | ||
|
|
115e5a03a8 | ||
|
|
a5c443f734 | ||
|
|
5287b87925 | ||
|
|
4d38d8aa1e | ||
|
|
cc1b297831 | ||
|
|
e4556ce12b | ||
|
|
0f252c4d24 | ||
|
|
c58217ccec | ||
|
|
d27b9fc3a1 | ||
|
|
fdab5d198e | ||
|
|
7274ceba30 | ||
|
|
55c9a0de42 | ||
|
|
0fb9e1995e | ||
|
|
799c7cbfa9 | ||
|
|
dcf1a486f6 | ||
|
|
6700c0762d | ||
|
|
032df4c533 | ||
|
|
7b13376239 | ||
|
|
2189631efd | ||
|
|
96fbce1797 | ||
|
|
8d485e9be0 | ||
|
|
6a50d71920 | ||
|
|
747b71bf74 | ||
|
|
c522893552 | ||
|
|
7ddd7a9888 | ||
|
|
e3675494b4 | ||
|
|
7cd6b0f04b | ||
|
|
be837ddc24 | ||
|
|
5b0c75c85f | ||
|
|
5a36fa5e18 | ||
|
|
84e2bc52c2 | ||
|
|
71255b3cbd | ||
|
|
382eacdc13 | ||
|
|
74d8fd00a4 | ||
|
|
dce5e35d7c | ||
|
|
54ef29f394 | ||
|
|
e052c65a58 | ||
|
|
e23979df9f | ||
|
|
4b82ec7409 | ||
|
|
08d0f31865 | ||
|
|
dda7496265 | ||
|
|
df362be012 | ||
|
|
2ebe005e3c | ||
|
|
746b4e2369 | ||
|
|
6c66ec3ffc | ||
|
|
95d0c650ec | ||
|
|
311727939d | ||
|
|
7e3cad8a55 | ||
|
|
72625958bf | ||
|
|
7ea04817bd | ||
|
|
c26e165887 | ||
|
|
7335293983 | ||
|
|
609e228852 | ||
|
|
c16bae32c4 | ||
|
|
ee4fe9d273 | ||
|
|
6e6e335a81 | ||
|
|
981d51785b | ||
|
|
cf1eda28aa | ||
|
|
cf1440fc32 | ||
|
|
21a209f7ba | ||
|
|
917510ffd0 | ||
|
|
7b48ef1e97 | ||
|
|
ac0f9ab575 | ||
|
|
f2907bb009 | ||
|
|
1695919ee7 | ||
|
|
eab702cc02 | ||
|
|
dd63068df6 | ||
|
|
f73b61e767 | ||
|
|
2acecd3620 | ||
|
|
f797de3497 | ||
|
|
d53afa849d | ||
|
|
3aebfc1716 | ||
|
|
dbb79c9671 | ||
|
|
054056fcbb | ||
|
|
aa486db8b7 | ||
|
|
4ef9afd8d8 | ||
|
|
f9221e9e66 | ||
|
|
6c26fe9c80 | ||
|
|
33c9fb737c | ||
|
|
68ce796771 | ||
|
|
d701c18150 | ||
|
|
d3a60d8821 | ||
|
|
5d688c6565 | ||
|
|
41aee1f1b7 | ||
|
|
c5b55fd8cf | ||
|
|
8051dbbc31 | ||
|
|
2d3192984d | ||
|
|
bef45ed0e8 | ||
|
|
a9e990768d | ||
|
|
7e1ba49d3d | ||
|
|
737558ef53 | ||
|
|
dbc25dd8da | ||
|
|
76a58a07e1 | ||
|
|
c2ba7fb16c | ||
|
|
09ef24fd75 | ||
|
|
9b7b012620 | ||
|
|
898e0bd828 | ||
|
|
2b4ed43692 | ||
|
|
8f2ae4e136 | ||
|
|
0cd219a5d2 | ||
|
|
2b2ea5bf72 | ||
|
|
e107bd5529 | ||
|
|
a31f0e255b | ||
|
|
40b52f3b13 | ||
|
|
f13a43647a | ||
|
|
7bcb01d269 | ||
|
|
e81213728b | ||
|
|
d88482b996 | ||
|
|
3b547d9d13 | ||
|
|
278553fc3f | ||
|
|
a36901a653 | ||
|
|
c4ac242c69 | ||
|
|
9f9307de73 | ||
|
|
c77ce958a3 | ||
|
|
5ad2d8b3b8 | ||
|
|
2724c3c142 | ||
|
|
4eb0771afe | ||
|
|
a0739a96e4 | ||
|
|
77ccf1eac8 | ||
|
|
1dc4a196bf | ||
|
|
2431cd3bdf | ||
|
|
cd730e0486 | ||
|
|
a19441bed8 | ||
|
|
162e3b8620 | ||
|
|
83642dab87 | ||
|
|
46070958c9 | ||
|
|
eea8b1c730 | ||
|
|
1ab4ddab8d | ||
|
|
9e63018198 | ||
|
|
594bec8c36 | ||
|
|
1586732d20 | ||
|
|
16fddd97a7 | ||
|
|
2260782c12 | ||
|
|
09dacc8e9b | ||
|
|
dec439db2b | ||
|
|
dc76571166 | ||
|
|
3e17f8c426 | ||
|
|
a5df3954f3 | ||
|
|
32fd850c20 | ||
|
|
2bfdae4f8f | ||
|
|
fcb898e9a4 | ||
|
|
8fa2fdfc42 | ||
|
|
4dc1a1d60f | ||
|
|
e375a18011 | ||
|
|
e0ff701e51 | ||
|
|
25645a3303 | ||
|
|
b32ea7d84c | ||
|
|
f164f6eaf3 | ||
|
|
af1920defc | ||
|
|
7c97fae522 | ||
|
|
b8070adc3a | ||
|
|
11bfb17328 | ||
|
|
1d87bd2d43 | ||
|
|
ababeaf538 | ||
|
|
2cbf51d0be | ||
|
|
3059b04b19 | ||
|
|
352b197be4 | ||
|
|
d0254f9705 | ||
|
|
8a86903c73 | ||
|
|
0bd322a078 | ||
|
|
3811e3f632 | ||
|
|
c14aa176b5 | ||
|
|
a922dcd9df | ||
|
|
530ff53422 | ||
|
|
73ca39f37e | ||
|
|
0acc6b0354 | ||
|
|
face361fcb | ||
|
|
9860bca986 | ||
|
|
3a83c33a48 | ||
|
|
373bd59b07 | ||
|
|
c8db4b286d | ||
|
|
56c8c0651f | ||
|
|
448e588fa7 | ||
|
|
f4cbf1d776 | ||
|
|
b35eefcf45 | ||
|
|
408dd55a2f | ||
|
|
e463942a5b | ||
|
|
0124a0d156 | ||
|
|
e23628a4e0 | ||
|
|
1d637cad51 | ||
|
|
a56030e6a5 | ||
|
|
a71b93dd84 | ||
|
|
37f8341963 | ||
|
|
b90ef10523 | ||
|
|
c8ffa70ab8 | ||
|
|
e0065a5159 | ||
|
|
abf1680d14 | ||
|
|
0e2fd8e2bd | ||
|
|
0e097732ca | ||
|
|
bb62dc2491 | ||
|
|
40cf63d3c4 | ||
|
|
6187fd975f | ||
|
|
6c90f25299 | ||
|
|
dc24c462dc | ||
|
|
31f29d8a77 | ||
|
|
4a277c21ef | ||
|
|
ca81fc6a70 | ||
|
|
e714f7df6c | ||
|
|
1c04ace4b0 | ||
|
|
95d7ca5382 | ||
|
|
a693583a97 | ||
|
|
87b1408d76 | ||
|
|
dee76f0a73 | ||
|
|
11a4f54c49 | ||
|
|
d363c8ee3c | ||
|
|
50b521c526 | ||
|
|
c9d70e0e28 | ||
|
|
c0c87652c3 | ||
|
|
faaa0affd0 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab | ||
|
|
c1d18d9980 | ||
|
|
5d9faaaf39 | ||
|
|
538875abee | ||
|
|
5ed09c4584 | ||
|
|
3f6a41eac5 | ||
|
|
ff0dcf12c5 | ||
|
|
5b1fca825a | ||
|
|
7bd108e2be | ||
|
|
286f225e50 | ||
|
|
4f988b5ba9 | ||
|
|
500d0852eb | ||
|
|
8d05fb3503 | ||
|
|
d7b6718be0 | ||
|
|
6f0783e17e | ||
|
|
d69e93b91a | ||
|
|
76083892cd | ||
|
|
7981c06989 | ||
|
|
97bb1519f8 | ||
|
|
1d8c9c1843 | ||
|
|
71007e200c | ||
|
|
a0ff9e751e |
@@ -3,3 +3,12 @@ linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
[unstable.git]
|
||||
shallow_index = true
|
||||
shallow_deps = true
|
||||
[unstable.gitoxide]
|
||||
fetch = true
|
||||
checkout = true
|
||||
list_files = true
|
||||
internal_use_git2 = false
|
||||
|
||||
@@ -41,7 +41,14 @@ runs:
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
@@ -52,7 +59,7 @@ runs:
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
@@ -69,8 +76,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
@@ -52,7 +52,7 @@ runs:
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -70,7 +70,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -47,7 +47,6 @@ runs:
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ inputs:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 1
|
||||
default: 2
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
|
||||
@@ -2,13 +2,13 @@ meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
num_topics = 3
|
||||
auto_prune_topic_records = true
|
||||
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "120s"
|
||||
|
||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -56,7 +56,7 @@ runs:
|
||||
|
||||
- name: Start EC2 runner
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
uses: machulav/ec2-github-runner@v2.3.8
|
||||
id: start-linux-arm64-ec2-runner
|
||||
with:
|
||||
mode: start
|
||||
|
||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -33,7 +33,7 @@ runs:
|
||||
|
||||
- name: Stop EC2 runner
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
uses: machulav/ec2-github-runner@v2.3.8
|
||||
with:
|
||||
mode: stop
|
||||
label: ${{ inputs.label }}
|
||||
|
||||
12
.github/scripts/create-version.sh
vendored
12
.github/scripts/create-version.sh
vendored
@@ -25,7 +25,7 @@ function create_version() {
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||
# It will be like 'nightly-20230808-7d0d8dc6'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||
exit 0
|
||||
@@ -60,9 +60,9 @@ function create_version() {
|
||||
}
|
||||
|
||||
# You can run as following examples:
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
create_version
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -14,7 +14,7 @@ name: Build API docs
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
29
.github/workflows/dev-build.yml
vendored
29
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -218,7 +218,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
@@ -238,6 +238,13 @@ jobs:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Echo Docker image tag to step summary
|
||||
run: |
|
||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
@@ -251,7 +258,7 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -283,7 +290,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -309,7 +316,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -337,7 +344,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
|
||||
37
.github/workflows/develop.yml
vendored
37
.github/workflows/develop.yml
vendored
@@ -23,7 +23,7 @@ concurrency:
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
name: Check typos and docs
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
toml:
|
||||
name: Toml Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -270,7 +270,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -568,7 +568,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
opts: ""
|
||||
@@ -576,9 +576,12 @@ jobs:
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "Pg Kvbackend"
|
||||
- name: "PostgreSQL KvBackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
- name: "MySQL Kvbackend"
|
||||
opts: "--setup-mysql"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -607,7 +610,7 @@ jobs:
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -624,7 +627,7 @@ jobs:
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -687,7 +690,7 @@ jobs:
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -704,13 +707,14 @@ jobs:
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
coverage:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
runs-on: ubuntu-22.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -739,7 +743,7 @@ jobs:
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -755,6 +759,7 @@ jobs:
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
@@ -770,7 +775,7 @@ jobs:
|
||||
# compat:
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-20.04
|
||||
# runs-on: ubuntu-22.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
|
||||
6
.github/workflows/docbot.yml
vendored
6
.github/workflows/docbot.yml
vendored
@@ -3,9 +3,13 @@ on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
16
.github/workflows/docs.yml
vendored
16
.github/workflows/docs.yml
vendored
@@ -31,7 +31,7 @@ name: CI
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -49,29 +49,29 @@ jobs:
|
||||
|
||||
check:
|
||||
name: Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
|
||||
52
.github/workflows/grafana.yml
vendored
Normal file
52
.github/workflows/grafana.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Check Grafana Panels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'grafana/**' # Trigger only when files under the grafana/ directory change
|
||||
|
||||
jobs:
|
||||
check-panels:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Install jq (required for the script)
|
||||
- name: Install jq
|
||||
run: sudo apt-get install -y jq
|
||||
|
||||
# Make the check.sh script executable
|
||||
- name: Make check.sh executable
|
||||
run: chmod +x grafana/check.sh
|
||||
|
||||
# Run the check.sh script
|
||||
- name: Run check.sh
|
||||
run: ./grafana/check.sh
|
||||
|
||||
# Only run summary.sh for pull_request events (not for merge queues or final pushes)
|
||||
- name: Check if this is a pull request
|
||||
id: check-pr
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
echo "is_pull_request=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_pull_request=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Make the summary.sh script executable
|
||||
- name: Make summary.sh executable
|
||||
if: steps.check-pr.outputs.is_pull_request == 'true'
|
||||
run: chmod +x grafana/summary.sh
|
||||
|
||||
# Run the summary.sh script and add its output to the GitHub Job Summary
|
||||
- name: Run summary.sh and add to Job Summary
|
||||
if: steps.check-pr.outputs.is_pull_request == 'true'
|
||||
run: |
|
||||
SUMMARY=$(./grafana/summary.sh)
|
||||
echo "### Summary of Grafana Panels" >> $GITHUB_STEP_SUMMARY
|
||||
echo "$SUMMARY" >> $GITHUB_STEP_SUMMARY
|
||||
22
.github/workflows/nightly-build.yml
vendored
22
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -182,7 +182,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
@@ -214,7 +214,7 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -249,7 +249,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -275,7 +275,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -303,7 +303,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
env:
|
||||
|
||||
7
.github/workflows/nightly-ci.yml
vendored
7
.github/workflows/nightly-ci.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -107,7 +107,6 @@ jobs:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
@@ -133,7 +132,7 @@ jobs:
|
||||
name: Check status
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||
steps:
|
||||
@@ -146,7 +145,7 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
steps:
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -91,13 +91,13 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
NEXT_RELEASE_VERSION: v0.14.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -299,7 +299,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||
steps:
|
||||
@@ -317,7 +317,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: true
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
@@ -335,7 +335,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -364,7 +364,7 @@ jobs:
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: true
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
@@ -377,7 +377,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -396,7 +396,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -422,7 +422,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -448,7 +448,7 @@ jobs:
|
||||
name: Bump doc version
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
@@ -475,7 +475,7 @@ jobs:
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
|
||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -7,9 +7,13 @@ on:
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -54,3 +54,6 @@ tests-fuzz/corpus/
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
21
AUTHOR.md
21
AUTHOR.md
@@ -3,30 +3,28 @@
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
@@ -34,11 +32,14 @@
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
|
||||
## All Contributors
|
||||
|
||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
|
||||
2175
Cargo.lock
generated
2175
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
100
Cargo.toml
100
Cargo.toml
@@ -29,6 +29,7 @@ members = [
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/session",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
@@ -67,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.12.0"
|
||||
version = "0.14.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -76,7 +77,6 @@ clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
@@ -88,20 +88,20 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
#
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "53.0.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-flight = "53.0"
|
||||
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "53.0", features = ["serde"] }
|
||||
aquamarine = "0.6"
|
||||
arrow = { version = "54.2", features = ["prettyprint"] }
|
||||
arrow-array = { version = "54.2", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-flight = "54.2"
|
||||
arrow-ipc = { version = "54.2", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "54.2", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# Remember to update axum-extra, axum-macros when updating axum
|
||||
axum = "0.8"
|
||||
axum-extra = "0.10"
|
||||
axum-macros = "0.4"
|
||||
axum-macros = "0.5"
|
||||
backon = "1"
|
||||
base64 = "0.21"
|
||||
base64 = "0.22"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
@@ -111,42 +111,43 @@ chrono-tz = "0.10.1"
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
deadpool = "0.10"
|
||||
deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a25adc8a01340231121646d8f0a29d0e92f45461" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b6d9cffd43c4e6358805a798f17e03e232994b82" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
hyper = "1.1"
|
||||
hyper-util = "0.1"
|
||||
itertools = "0.10"
|
||||
itertools = "0.14"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||
mockall = "0.11.4"
|
||||
mockall = "0.13"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
notify = "6.1"
|
||||
notify = "8.0"
|
||||
num_cpus = "1.16"
|
||||
object_store_opendal = "0.50"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.27", features = [
|
||||
"gen-tonic",
|
||||
@@ -156,17 +157,15 @@ opentelemetry-proto = { version = "0.27", features = [
|
||||
"logs",
|
||||
] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
parquet = { version = "54.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", features = [
|
||||
"ser",
|
||||
], rev = "27abb8e16003a50c720f00d6c85f41f5fa2a2a8e" }
|
||||
promql-parser = { version = "0.5.1", features = ["ser"] }
|
||||
prost = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
ratelimit = "0.9"
|
||||
rand = "0.9"
|
||||
ratelimit = "0.10"
|
||||
regex = "1.8"
|
||||
regex-automata = "0.4"
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
@@ -178,29 +177,36 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.21"
|
||||
rstest = "0.25"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
shadow-rs = "0.38"
|
||||
shadow-rs = "1.1"
|
||||
simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.52.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e98e6b322426a9d397a71efef17075966223c089", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # on branch v0.44.x
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
] } # branch = "v0.54.x"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"chrono",
|
||||
] }
|
||||
strum = { version = "0.27", features = ["derive"] }
|
||||
sysinfo = "0.33"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = "0.1"
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
@@ -243,6 +249,7 @@ common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
@@ -275,15 +282,6 @@ store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
[patch.crates-io]
|
||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
|
||||
9
Makefile
9
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -32,6 +32,10 @@ ifneq ($(strip $(BUILD_JOBS)),)
|
||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_JOBS)),)
|
||||
SQLNESS_OPTS += --jobs ${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||
endif
|
||||
@@ -60,6 +64,8 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
@@ -191,6 +197,7 @@ fix-clippy: ## Fix clippy violations.
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
python3 scripts/check-snafu.py
|
||||
python3 scripts/check-super-imports.py
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
|
||||
34
README.md
34
README.md
@@ -6,7 +6,7 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2>
|
||||
<h2 align="center">Unified & Cost-Effective Observability Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
@@ -62,31 +62,35 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
**GreptimeDB** is an open-source, cloud-native, unified & cost-effective observability database for **Metrics**, **Logs**, and **Traces**. You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
|
||||
## News
|
||||
|
||||
**[GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)**
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
Our core developers have been building observability data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
* **Unified Processing of Observability Data**
|
||||
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
A unified database that treats metrics, logs, and traces as timestamped wide events with context, supporting [SQL](https://docs.greptime.com/user-guide/query-data/sql)/[PromQL](https://docs.greptime.com/user-guide/query-data/promql) queries and [stream processing](https://docs.greptime.com/user-guide/flow-computation/overview) to simplify complex data stacks.
|
||||
|
||||
* **High Performance and Cost-effective**
|
||||
|
||||
Written in Rust, combines a distributed query engine with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skip data, and vector) and optimized columnar storage to deliver sub-second responses on petabyte-scale data and high-cost efficiency.
|
||||
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
* **Developer-Friendly**
|
||||
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
Access standardized SQL/PromQL interfaces through built-in web dashboard, REST API, and MySQL/PostgreSQL protocols. Supports widely adopted data ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview) for seamless migration and integration.
|
||||
|
||||
* **Cloud-Edge Collaboration**
|
||||
* **Flexible Deployment Options**
|
||||
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
Deploy GreptimeDB anywhere from ARM-based edge devices to cloud environments with unified APIs and bandwidth-efficient data synchronization. Query edge and cloud data seamlessly through identical APIs. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||
|
||||
@@ -112,7 +116,7 @@ Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
-v "$(pwd)/greptimedb:./greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
@@ -229,3 +233,5 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
|
||||
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
|
||||
@@ -12,7 +12,6 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
@@ -24,7 +23,7 @@
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
@@ -86,10 +85,6 @@
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
||||
@@ -98,10 +93,13 @@
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -181,7 +179,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -222,7 +220,7 @@
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
@@ -274,12 +272,14 @@
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -308,7 +308,7 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
@@ -319,6 +319,7 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
@@ -327,6 +328,7 @@
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
@@ -341,17 +343,16 @@
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.auto_prune_interval` | String | `0s` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically. |
|
||||
| `wal.trigger_flush_threshold` | Integer | `0` | The threshold to trigger a flush operation of a region in automatically WAL pruning.<br/>Metasrv will send a flush request to flush the region when:<br/>`trigger_flush_threshold` + `prunable_entry_id` < `max_prunable_entry_id`<br/>where:<br/>- `prunable_entry_id` is the maximum entry id that can be pruned of the region.<br/>- `max_prunable_entry_id` is the maximum prunable entry id among all regions in the same topic.<br/>Set to `0` to disable the flush operation. |
|
||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -380,7 +381,6 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
@@ -389,7 +389,7 @@
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
@@ -433,15 +433,13 @@
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -521,7 +519,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -550,7 +548,6 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
@@ -562,7 +559,7 @@
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
@@ -578,7 +575,7 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The datanode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
node_id = 42
|
||||
@@ -27,7 +24,7 @@ max_concurrent_queries = 0
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -119,7 +116,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -169,22 +166,6 @@ max_batch_bytes = "1MB"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## The initial backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_max = "10s"
|
||||
|
||||
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_base = 2
|
||||
|
||||
## The deadline of retries.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Whether to enable WAL index creation.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_index = true
|
||||
@@ -231,6 +212,7 @@ overwrite_entry_start_id = false
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -261,10 +243,16 @@ overwrite_entry_start_id = false
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The query engine options.
|
||||
[query]
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -617,7 +605,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the flownode. It can be `standalone` or `distributed`.
|
||||
mode = "distributed"
|
||||
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
node_id = 14
|
||||
@@ -30,7 +27,7 @@ max_send_message_size = "512MB"
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -76,7 +73,7 @@ retry_interval = "3s"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
@@ -121,4 +118,3 @@ sample_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ retry_interval = "3s"
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -179,6 +179,12 @@ metadata_cache_ttl = "10m"
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The query engine options.
|
||||
[query]
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
@@ -189,7 +195,7 @@ tcp_nodelay = true
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
@@ -50,6 +50,9 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
@@ -76,6 +79,11 @@ retry_delay = "500ms"
|
||||
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
||||
max_metadata_value_size = "1500KiB"
|
||||
|
||||
## Max running procedures.
|
||||
## The maximum number of procedures that can be running at the same time.
|
||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
||||
max_running_procedures = 128
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
@@ -122,6 +130,22 @@ broker_endpoints = ["127.0.0.1:9092"]
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
auto_create_topics = true
|
||||
|
||||
## Interval of automatically WAL pruning.
|
||||
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
||||
auto_prune_interval = "0s"
|
||||
|
||||
## The threshold to trigger a flush operation of a region in automatically WAL pruning.
|
||||
## Metasrv will send a flush request to flush the region when:
|
||||
## `trigger_flush_threshold` + `prunable_entry_id` < `max_prunable_entry_id`
|
||||
## where:
|
||||
## - `prunable_entry_id` is the maximum entry id that can be pruned of the region.
|
||||
## - `max_prunable_entry_id` is the maximum prunable entry id among all regions in the same topic.
|
||||
## Set to `0` to disable the flush operation.
|
||||
trigger_flush_threshold = 0
|
||||
|
||||
## Concurrent task limit for automatically WAL pruning.
|
||||
auto_prune_parallelism = 10
|
||||
|
||||
## Number of topics.
|
||||
num_topics = 64
|
||||
|
||||
@@ -141,17 +165,6 @@ replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
create_topic_timeout = "30s"
|
||||
## The initial backoff for kafka clients.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff for kafka clients.
|
||||
backoff_max = "10s"
|
||||
|
||||
## Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
backoff_base = 2
|
||||
|
||||
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
@@ -174,7 +187,7 @@ backoff_deadline = "5mins"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
@@ -34,7 +31,7 @@ max_concurrent_queries = 0
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -164,7 +161,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -242,22 +239,6 @@ max_batch_bytes = "1MB"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## The initial backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_max = "10s"
|
||||
|
||||
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_base = 2
|
||||
|
||||
## The deadline of retries.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Ignore missing entries during read WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
##
|
||||
@@ -302,6 +283,10 @@ purge_interval = "1m"
|
||||
max_retry_times = 3
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
## Max running procedures.
|
||||
## The maximum number of procedures that can be running at the same time.
|
||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
||||
max_running_procedures = 128
|
||||
|
||||
## flow engine options.
|
||||
[flow]
|
||||
@@ -318,6 +303,7 @@ retry_delay = "500ms"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -348,10 +334,16 @@ retry_delay = "500ms"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The query engine options.
|
||||
[query]
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -704,7 +696,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:20.04 as builder
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04
|
||||
FROM ubuntu:latest
|
||||
|
||||
# The binary name of GreptimeDB executable.
|
||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:20.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||
ARG DOCKER_BUILD_ROOT=.
|
||||
@@ -41,7 +41,7 @@ RUN mv protoc3/include/* /usr/local/include/
|
||||
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
||||
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
||||
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
||||
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
||||
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||
# it can be a different user that have prepared the submodules.
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
# Use the legacy glibc 2.28.
|
||||
FROM ubuntu:18.10
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
||||
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
unzip \
|
||||
pkg-config
|
||||
|
||||
# Install protoc.
|
||||
ENV PROTOC_VERSION=29.3
|
||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi && \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
||||
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
||||
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
||||
rm -f ${PROTOC_ZIP}
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
66
docker/dev-builder/ubuntu/Dockerfile-20.04
Normal file
66
docker/dev-builder/ubuntu/Dockerfile-20.04
Normal file
@@ -0,0 +1,66 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||
ARG DOCKER_BUILD_ROOT=.
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
curl \
|
||||
unzip \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN echo "target platform: $TARGETPLATFORM"
|
||||
|
||||
ARG PROTOBUF_VERSION=29.3
|
||||
|
||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
||||
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
||||
fi
|
||||
RUN mv protoc3/bin/* /usr/local/bin/
|
||||
RUN mv protoc3/include/* /usr/local/include/
|
||||
|
||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
||||
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
||||
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
||||
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||
# it can be a different user that have prepared the submodules.
|
||||
RUN git config --global --add safe.directory '*'
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
# compile from source take too long, so we use the precompiled binary instead
|
||||
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
||||
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
@@ -68,12 +68,13 @@ services:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --data-home=/greptimedb_data
|
||||
- --rpc-bind-addr=0.0.0.0:3001
|
||||
- --rpc-server-addr=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
|
||||
40
docs/benchmarks/tsbs/v0.12.0.md
Normal file
40
docs/benchmarks/tsbs/v0.12.0.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# TSBS benchmark - v0.12.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
|---------|-------------------------|
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 100GB (GP3) |
|
||||
| OS | Ubuntu Server 24.04 LTS |
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
|-----------------|----------------------|
|
||||
| EC2 c5d.2xlarge | 326839.28 |
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | EC2 c5d.2xlarge (ms) |
|
||||
|-----------------------|----------------------|
|
||||
| cpu-max-all-1 | 12.46 |
|
||||
| cpu-max-all-8 | 24.20 |
|
||||
| double-groupby-1 | 673.08 |
|
||||
| double-groupby-5 | 963.99 |
|
||||
| double-groupby-all | 1330.05 |
|
||||
| groupby-orderby-limit | 952.46 |
|
||||
| high-cpu-1 | 5.08 |
|
||||
| high-cpu-all | 4638.57 |
|
||||
| lastpoint | 591.02 |
|
||||
| single-groupby-1-1-1 | 4.06 |
|
||||
| single-groupby-1-1-12 | 4.73 |
|
||||
| single-groupby-1-8-1 | 8.23 |
|
||||
| single-groupby-5-1-1 | 4.61 |
|
||||
| single-groupby-5-1-12 | 5.61 |
|
||||
| single-groupby-5-8-1 | 9.74 |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Profile memory usage of GreptimeDB
|
||||
|
||||
This crate provides an easy approach to dump memory profiling info.
|
||||
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](docs/how-to/memory-profile-scripts).
|
||||
|
||||
## Prerequisites
|
||||
### jemalloc
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
@@ -13,7 +13,7 @@ Fuzz test is tool that leverage deterministic random generation to assist in fin
|
||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
||||
|
||||
### Fundamental components
|
||||
### Fundamental components
|
||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
||||
|
||||
### Test targets
|
||||
@@ -21,25 +21,25 @@ They are located in the `/tests-fuzz/targets` directory, with each file represen
|
||||
|
||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
||||
```
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
|
|
||||
|
|
||||
v
|
||||
@@ -133,4 +133,4 @@ fuzz_target!(|input: FuzzInput| {
|
||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
||||
```
|
||||
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
|
||||
52
docs/how-to/memory-profile-scripts/scripts/README.md
Normal file
52
docs/how-to/memory-profile-scripts/scripts/README.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Memory Analysis Process
|
||||
This section will guide you through the process of analyzing memory usage for greptimedb.
|
||||
|
||||
1. Get the `jeprof` tool script, see the next section("Getting the `jeprof` tool") for details.
|
||||
|
||||
2. After starting `greptimedb`(with env var `MALLOC_CONF=prof:true`), execute the `dump.sh` script with the PID of the `greptimedb` process as an argument. This continuously monitors memory usage and captures profiles when exceeding thresholds (e.g. +20MB within 10 minutes). Outputs `greptime-{timestamp}.gprof` files.
|
||||
|
||||
3. With 2-3 gprof files, run `gen_flamegraph.sh` in the same environment to generate flame graphs showing memory allocation call stacks.
|
||||
|
||||
4. **NOTE:** The `gen_flamegraph.sh` script requires `jeprof` and optionally `flamegraph.pl` to be in the current directory. If needed to gen flamegraph now, run the `get_flamegraph_tool.sh` script, which downloads the flame graph generation tool `flamegraph.pl` to the current directory.
|
||||
The usage of `gen_flamegraph.sh` is:
|
||||
|
||||
`Usage: ./gen_flamegraph.sh <binary_path> <gprof_directory>`
|
||||
where `<binary_path>` is the path to the greptimedb binary, `<gprof_directory>` is the directory containing the gprof files(the directory `dump.sh` is dumping profiles to).
|
||||
Example call: `./gen_flamegraph.sh ./greptime .`
|
||||
|
||||
Generating the flame graph might take a few minutes. The generated flame graphs are located in the `<gprof_directory>/flamegraphs` directory. Or if no `flamegraph.pl` is found, it will only contain `.collapse` files which is also fine.
|
||||
5. You can send the generated flame graphs(the entire folder of `<gprof_directory>/flamegraphs`) to developers for further analysis.
|
||||
|
||||
|
||||
## Getting the `jeprof` tool
|
||||
there are three ways to get `jeprof`, list in here from simple to complex, using any one of those methods is ok, as long as it's the same environment as the `greptimedb` will be running on:
|
||||
1. If you are compiling greptimedb from source, then `jeprof` is already produced during compilation. After running `cargo build`, execute `find_compiled_jeprof.sh`. This will copy `jeprof` to the current directory.
|
||||
2. Or, if you have the Rust toolchain installed locally, simply follow these commands:
|
||||
```bash
|
||||
cargo new get_jeprof
|
||||
cd get_jeprof
|
||||
```
|
||||
Then add this line to `Cargo.toml`:
|
||||
```toml
|
||||
[dependencies]
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats"] }
|
||||
```
|
||||
then run:
|
||||
```bash
|
||||
cargo build
|
||||
```
|
||||
after that the `jeprof` tool is produced. Now run `find_compiled_jeprof.sh` in current directory, it will copy the `jeprof` tool to the current directory.
|
||||
|
||||
3. compile jemalloc from source
|
||||
you can first clone this repo, and checkout to this commit:
|
||||
```bash
|
||||
git clone https://github.com/tikv/jemalloc.git
|
||||
cd jemalloc
|
||||
git checkout e13ca993e8ccb9ba9847cc330696e02839f328f7
|
||||
```
|
||||
then run:
|
||||
```bash
|
||||
./configure
|
||||
make
|
||||
```
|
||||
and `jeprof` is in `.bin/` directory. Copy it to the current directory.
|
||||
78
docs/how-to/memory-profile-scripts/scripts/dump.sh
Executable file
78
docs/how-to/memory-profile-scripts/scripts/dump.sh
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Monitors greptime process memory usage every 10 minutes
|
||||
# Triggers memory profile capture via `curl -X POST localhost:4000/debug/prof/mem > greptime-{timestamp}.gprof`
|
||||
# when memory increases by more than 20MB since last check
|
||||
# Generated profiles can be analyzed using flame graphs as described in `how-to-profile-memory.md`
|
||||
# (jeprof is compiled with the database - see documentation)
|
||||
# Alternative: Share binaries + profiles for analysis (Docker images preferred)
|
||||
|
||||
# Threshold in Kilobytes (20 MB)
|
||||
threshold_kb=$((20 * 1024))
|
||||
sleep_interval=$((10 * 60))
|
||||
|
||||
# Variable to store the last measured memory usage in KB
|
||||
last_mem_kb=0
|
||||
|
||||
echo "Starting memory monitoring for 'greptime' process..."
|
||||
|
||||
while true; do
|
||||
|
||||
# Check if PID is provided as an argument
|
||||
if [ -z "$1" ]; then
|
||||
echo "$(date): PID must be provided as a command-line argument."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pid="$1"
|
||||
|
||||
# Validate that the PID is a number
|
||||
if ! [[ "$pid" =~ ^[0-9]+$ ]]; then
|
||||
echo "$(date): Invalid PID: '$pid'. PID must be a number."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the current Resident Set Size (RSS) in Kilobytes
|
||||
current_mem_kb=$(ps -o rss= -p "$pid")
|
||||
|
||||
# Check if ps command was successful and returned a number
|
||||
if ! [[ "$current_mem_kb" =~ ^[0-9]+$ ]]; then
|
||||
echo "$(date): Failed to get memory usage for PID $pid. Skipping check."
|
||||
# Keep last_mem_kb to avoid false positives if the process briefly becomes unreadable.
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "$(date): Current memory usage for PID $pid: ${current_mem_kb} KB"
|
||||
|
||||
# Compare with the last measurement
|
||||
# if it's the first run, also do a baseline dump just to make sure we can dump
|
||||
|
||||
diff_kb=$((current_mem_kb - last_mem_kb))
|
||||
echo "$(date): Memory usage change since last check: ${diff_kb} KB"
|
||||
|
||||
if [ "$diff_kb" -gt "$threshold_kb" ]; then
|
||||
echo "$(date): Memory increase (${diff_kb} KB) exceeded threshold (${threshold_kb} KB). Dumping profile..."
|
||||
timestamp=$(date +%Y%m%d%H%M%S)
|
||||
profile_file="greptime-${timestamp}.gprof"
|
||||
# Execute curl and capture output to file
|
||||
if curl -sf -X POST localhost:4000/debug/prof/mem > "$profile_file"; then
|
||||
echo "$(date): Memory profile saved to $profile_file"
|
||||
else
|
||||
echo "$(date): Failed to dump memory profile (curl exit code: $?)."
|
||||
# Remove the potentially empty/failed profile file
|
||||
rm -f "$profile_file"
|
||||
fi
|
||||
else
|
||||
echo "$(date): Memory increase (${diff_kb} KB) is within the threshold (${threshold_kb} KB)."
|
||||
fi
|
||||
|
||||
|
||||
# Update the last memory usage
|
||||
last_mem_kb=$current_mem_kb
|
||||
|
||||
# Wait for 5 minutes
|
||||
echo "$(date): Sleeping for $sleep_interval seconds..."
|
||||
sleep $sleep_interval
|
||||
done
|
||||
|
||||
echo "Memory monitoring script stopped." # This line might not be reached in normal operation
|
||||
15
docs/how-to/memory-profile-scripts/scripts/find_compiled_jeprof.sh
Executable file
15
docs/how-to/memory-profile-scripts/scripts/find_compiled_jeprof.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Locates compiled jeprof binary (memory analysis tool) after cargo build
|
||||
# Copies it to current directory from target/ build directories
|
||||
|
||||
JPROF_PATH=$(find . -name 'jeprof' -print -quit)
|
||||
if [ -n "$JPROF_PATH" ]; then
|
||||
echo "Found jeprof at $JPROF_PATH"
|
||||
cp "$JPROF_PATH" .
|
||||
chmod +x jeprof
|
||||
echo "Copied jeprof to current directory and made it executable."
|
||||
else
|
||||
echo "jeprof not found"
|
||||
exit 1
|
||||
fi
|
||||
89
docs/how-to/memory-profile-scripts/scripts/gen_flamegraph.sh
Executable file
89
docs/how-to/memory-profile-scripts/scripts/gen_flamegraph.sh
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Generate flame graphs from a series of `.gprof` files
|
||||
# First argument: Path to the binary executable
|
||||
# Second argument: Path to directory containing gprof files
|
||||
# Requires `jeprof` and `flamegraph.pl` in current directory
|
||||
# What this script essentially does is:
|
||||
# ./jeprof <binary> <gprof> --collapse | ./flamegraph.pl > <output>
|
||||
# For differential analysis between consecutive profiles:
|
||||
# ./jeprof <binary> --base <gprof1> <gprof2> --collapse | ./flamegraph.pl > <output_diff>
|
||||
|
||||
set -e # Exit immediately if a command exits with a non-zero status.
|
||||
|
||||
# Check for required tools
|
||||
if [ ! -f "./jeprof" ]; then
|
||||
echo "Error: jeprof not found in the current directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "./flamegraph.pl" ]; then
|
||||
echo "Error: flamegraph.pl not found in the current directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check arguments
|
||||
if [ "$#" -ne 2 ]; then
|
||||
echo "Usage: $0 <binary_path> <gprof_directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BINARY_PATH=$1
|
||||
GPROF_DIR=$2
|
||||
OUTPUT_DIR="${GPROF_DIR}/flamegraphs" # Store outputs in a subdirectory
|
||||
|
||||
if [ ! -f "$BINARY_PATH" ]; then
|
||||
echo "Error: Binary file not found at $BINARY_PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "$GPROF_DIR" ]; then
|
||||
echo "Error: gprof directory not found at $GPROF_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
echo "Generating flamegraphs in $OUTPUT_DIR"
|
||||
|
||||
# Find and sort gprof files
|
||||
# Use find + sort -V for natural sort of version numbers if present in filenames
|
||||
# Use null-terminated strings for safety with find/xargs/sort
|
||||
mapfile -d $'\0' gprof_files < <(find "$GPROF_DIR" -maxdepth 1 -name '*.gprof' -print0 | sort -zV)
|
||||
|
||||
if [ ${#gprof_files[@]} -eq 0 ]; then
|
||||
echo "No .gprof files found in $GPROF_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
prev_gprof=""
|
||||
|
||||
# Generate flamegraphs
|
||||
for gprof_file in "${gprof_files[@]}"; do
|
||||
# Skip empty entries if any
|
||||
if [ -z "$gprof_file" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
filename=$(basename "$gprof_file" .gprof)
|
||||
output_collapse="${OUTPUT_DIR}/${filename}.collapse"
|
||||
output_svg="${OUTPUT_DIR}/${filename}.svg"
|
||||
echo "Generating collapse file for $gprof_file -> $output_collapse"
|
||||
./jeprof "$BINARY_PATH" "$gprof_file" --collapse > "$output_collapse"
|
||||
echo "Generating flamegraph for $gprof_file -> $output_svg"
|
||||
./flamegraph.pl "$output_collapse" > "$output_svg" || true
|
||||
|
||||
# Generate diff flamegraph if not the first file
|
||||
if [ -n "$prev_gprof" ]; then
|
||||
prev_filename=$(basename "$prev_gprof" .gprof)
|
||||
diff_output_collapse="${OUTPUT_DIR}/${prev_filename}_vs_${filename}_diff.collapse"
|
||||
diff_output_svg="${OUTPUT_DIR}/${prev_filename}_vs_${filename}_diff.svg"
|
||||
echo "Generating diff collapse file for $prev_gprof vs $gprof_file -> $diff_output_collapse"
|
||||
./jeprof "$BINARY_PATH" --base "$prev_gprof" "$gprof_file" --collapse > "$diff_output_collapse"
|
||||
echo "Generating diff flamegraph for $prev_gprof vs $gprof_file -> $diff_output_svg"
|
||||
./flamegraph.pl "$diff_output_collapse" > "$diff_output_svg" || true
|
||||
fi
|
||||
|
||||
prev_gprof="$gprof_file"
|
||||
done
|
||||
|
||||
echo "Flamegraph generation complete."
|
||||
44
docs/how-to/memory-profile-scripts/scripts/gen_from_collapse.sh
Executable file
44
docs/how-to/memory-profile-scripts/scripts/gen_from_collapse.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Generate flame graphs from .collapse files
|
||||
# Argument: Path to directory containing collapse files
|
||||
# Requires `flamegraph.pl` in current directory
|
||||
|
||||
# Check if flamegraph.pl exists
|
||||
if [ ! -f "./flamegraph.pl" ]; then
|
||||
echo "Error: flamegraph.pl not found in the current directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if directory argument is provided
|
||||
if [ -z "$1" ]; then
|
||||
echo "Usage: $0 <collapse_directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COLLAPSE_DIR=$1
|
||||
|
||||
# Check if the provided argument is a directory
|
||||
if [ ! -d "$COLLAPSE_DIR" ]; then
|
||||
echo "Error: '$COLLAPSE_DIR' is not a valid directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Generating flame graphs from collapse files in '$COLLAPSE_DIR'..."
|
||||
|
||||
# Find and process each .collapse file
|
||||
find "$COLLAPSE_DIR" -maxdepth 1 -name "*.collapse" -print0 | while IFS= read -r -d $'\0' collapse_file; do
|
||||
if [ -f "$collapse_file" ]; then
|
||||
# Construct the output SVG filename
|
||||
svg_file="${collapse_file%.collapse}.svg"
|
||||
echo "Generating $svg_file from $collapse_file..."
|
||||
./flamegraph.pl "$collapse_file" > "$svg_file"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error generating flame graph for $collapse_file"
|
||||
else
|
||||
echo "Successfully generated $svg_file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Flame graph generation complete."
|
||||
6
docs/how-to/memory-profile-scripts/scripts/get_flamegraph_tool.sh
Executable file
6
docs/how-to/memory-profile-scripts/scripts/get_flamegraph_tool.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Download flamegraph.pl to current directory - this is the flame graph generation tool script
|
||||
|
||||
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
|
||||
chmod +x ./flamegraph.pl
|
||||
77
docs/rfcs/2025-02-06-remote-wal-purge.md
Normal file
77
docs/rfcs/2025-02-06-remote-wal-purge.md
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
Feature Name: Remote WAL Purge
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/5474
|
||||
Date: 2025-02-06
|
||||
Author: "Yuhan Wang <profsyb@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method for purging remote WAL in the database.
|
||||
|
||||
# Motivation
|
||||
|
||||
Currently only local wal entries are purged when flushing, while remote wal does nothing.
|
||||
|
||||
# Details
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
Region0->>Kafka: Last entry id of the topic in use
|
||||
Region0->>WALPruner: Heartbeat with last entry id
|
||||
WALPruner->>+WALPruner: Time Loop
|
||||
WALPruner->>+ProcedureManager: Submit purge procedure
|
||||
ProcedureManager->>Region0: Flush request
|
||||
ProcedureManager->>Kafka: Prune WAL entries
|
||||
Region0->>Region0: Flush
|
||||
```
|
||||
|
||||
## Steps
|
||||
|
||||
### Before purge
|
||||
|
||||
Before purging remote WAL, metasrv needs to know:
|
||||
|
||||
1. `last_entry_id` of each region.
|
||||
2. `kafka_topic_last_entry_id` which is the last entry id of the topic in use. Can be lazily updated and needed when region has empty memtable.
|
||||
3. Kafka topics that each region uses.
|
||||
|
||||
The states are maintained through:
|
||||
1. Heartbeat: Datanode sends `last_entry_id` to metasrv in heartbeat. As for regions with empty memtable, `last_entry_id` should equals to `kafka_topic_last_entry_id`.
|
||||
2. Metasrv maintains a topic-region map to know which region uses which topic.
|
||||
|
||||
`kafka_topic_last_entry_id` will be maintained by the region itself. Region will update the value after `k` heartbeats if the memtable is empty.
|
||||
|
||||
### Purge procedure
|
||||
|
||||
We can better handle locks utilizing current procedure. It's quite similar to the region migration procedure.
|
||||
|
||||
After a period of time, metasrv will submit a purge procedure to ProcedureManager. The purge will apply to all topics.
|
||||
|
||||
The procedure is divided into following stages:
|
||||
|
||||
1. Preparation:
|
||||
- Retrieve `last_entry_id` of each region kvbackend.
|
||||
- Choose regions that have a relatively small `last_entry_id` as candidate regions, which means we need to send a flush request to these regions.
|
||||
2. Communication:
|
||||
- Send flush requests to candidate regions.
|
||||
3. Purge:
|
||||
- Choose proper entry id to delete for each topic. The entry should be the smallest `last_entry_id - 1` among all regions.
|
||||
- Delete legacy entries in Kafka.
|
||||
- Store the `last_purged_entry_id` in kvbackend. It should be locked to prevent other regions from replaying the purged entries.
|
||||
|
||||
### After purge
|
||||
|
||||
After purge, there may be some regions that have `last_entry_id` smaller than the entry we just deleted. It's legal since we only delete the entries that are not needed anymore.
|
||||
|
||||
When restarting a region, it should query the `last_purged_entry_id` from metasrv and replay from `min(last_entry_id, last_purged_entry_id)`.
|
||||
|
||||
### Error handling
|
||||
|
||||
No persisted states are needed since all states are maintained in kvbackend.
|
||||
|
||||
Retry when failed to retrieving metadata from kvbackend.
|
||||
|
||||
# Alternatives
|
||||
|
||||
Purge time can depend on the size of the WAL entries instead of a fixed period of time, which may be more efficient.
|
||||
19
grafana/check.sh
Executable file
19
grafana/check.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
|
||||
# Use jq to check for panels with empty or missing descriptions
|
||||
invalid_panels=$(cat $BASEDIR/greptimedb-cluster.json | jq -r '
|
||||
.panels[]
|
||||
| select((.type == "stats" or .type == "timeseries") and (.description == "" or .description == null))
|
||||
')
|
||||
|
||||
# Check if any invalid panels were found
|
||||
if [[ -n "$invalid_panels" ]]; then
|
||||
echo "Error: The following panels have empty or missing descriptions:"
|
||||
echo "$invalid_panels"
|
||||
exit 1
|
||||
else
|
||||
echo "All panels with type 'stats' or 'timeseries' have valid descriptions."
|
||||
exit 0
|
||||
fi
|
||||
File diff suppressed because it is too large
Load Diff
11
grafana/summary.sh
Executable file
11
grafana/summary.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
echo '| Title | Description | Expressions |
|
||||
|---|---|---|'
|
||||
|
||||
cat $BASEDIR/greptimedb-cluster.json | jq -r '
|
||||
.panels |
|
||||
map(select(.type == "stat" or .type == "timeseries")) |
|
||||
.[] | "| \(.title) | \(.description | gsub("\n"; "<br>")) | \(.targets | map(.expr // .rawSql | "`\(.|gsub("\n"; "<br>"))`") | join("<br>")) |"
|
||||
'
|
||||
74
scripts/check-super-imports.py
Normal file
74
scripts/check-super-imports.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
from multiprocessing import Pool
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
rust_files = []
|
||||
for root, _, files in os.walk(directory):
|
||||
# Skip files with "test" in the path
|
||||
if "test" in root.lower():
|
||||
continue
|
||||
|
||||
for file in files:
|
||||
# Skip files with "test" in the filename
|
||||
if "test" in file.lower():
|
||||
continue
|
||||
|
||||
if file.endswith(".rs"):
|
||||
rust_files.append(os.path.join(root, file))
|
||||
return rust_files
|
||||
|
||||
|
||||
def check_file_for_super_import(file_path):
|
||||
with open(file_path, "r") as file:
|
||||
lines = file.readlines()
|
||||
|
||||
violations = []
|
||||
for line_number, line in enumerate(lines, 1):
|
||||
# Check for "use super::" without leading tab
|
||||
if line.startswith("use super::"):
|
||||
violations.append((line_number, line.strip()))
|
||||
|
||||
if violations:
|
||||
return file_path, violations
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
rust_files = find_rust_files(".")
|
||||
|
||||
with Pool() as pool:
|
||||
results = pool.map(check_file_for_super_import, rust_files)
|
||||
|
||||
# Filter out None results
|
||||
violations = [result for result in results if result]
|
||||
|
||||
if violations:
|
||||
print("Found 'use super::' without leading tab in the following files:")
|
||||
counter = 1
|
||||
for file_path, file_violations in violations:
|
||||
for line_number, line in file_violations:
|
||||
print(f"{counter:>5} {file_path}:{line_number} - {line}")
|
||||
counter += 1
|
||||
raise SystemExit(1)
|
||||
else:
|
||||
print("No 'use super::' without leading tab found. All files are compliant.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -19,9 +19,7 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
|
||||
use common_decimal::Decimal128;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{
|
||||
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
|
||||
};
|
||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
@@ -29,8 +27,8 @@ use datatypes::types::{
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
BinaryVector, BooleanVector, DateVector, Decimal128Vector, Float32Vector, Float64Vector,
|
||||
Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
@@ -118,7 +116,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::Json => ConcreteDataType::json_datatype(),
|
||||
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
||||
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
||||
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
||||
ColumnDataType::Datetime => ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ColumnDataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
|
||||
ColumnDataType::TimestampMillisecond => {
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
@@ -271,7 +269,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
ConcreteDataType::Timestamp(t) => match t {
|
||||
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
|
||||
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
|
||||
@@ -476,7 +473,6 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.timestamp_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.timestamp_millisecond_values.push(val.value()),
|
||||
@@ -577,12 +573,11 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
ValueData::BinaryValue(bytes) => ValueRef::Binary(bytes.as_slice()),
|
||||
ValueData::StringValue(string) => ValueRef::String(string.as_str()),
|
||||
ValueData::DateValue(d) => ValueRef::Date(Date::from(*d)),
|
||||
ValueData::DatetimeValue(d) => ValueRef::DateTime(DateTime::new(*d)),
|
||||
ValueData::TimestampSecondValue(t) => ValueRef::Timestamp(Timestamp::new_second(*t)),
|
||||
ValueData::TimestampMillisecondValue(t) => {
|
||||
ValueRef::Timestamp(Timestamp::new_millisecond(*t))
|
||||
}
|
||||
ValueData::TimestampMicrosecondValue(t) => {
|
||||
ValueData::DatetimeValue(t) | ValueData::TimestampMicrosecondValue(t) => {
|
||||
ValueRef::Timestamp(Timestamp::new_microsecond(*t))
|
||||
}
|
||||
ValueData::TimestampNanosecondValue(t) => {
|
||||
@@ -651,7 +646,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
ConcreteDataType::Binary(_) => Arc::new(BinaryVector::from(values.binary_values)),
|
||||
ConcreteDataType::String(_) => Arc::new(StringVector::from_vec(values.string_values)),
|
||||
ConcreteDataType::Date(_) => Arc::new(DateVector::from_vec(values.date_values)),
|
||||
ConcreteDataType::DateTime(_) => Arc::new(DateTimeVector::from_vec(values.datetime_values)),
|
||||
ConcreteDataType::Timestamp(unit) => match unit {
|
||||
TimestampType::Second(_) => Arc::new(TimestampSecondVector::from_vec(
|
||||
values.timestamp_second_values,
|
||||
@@ -787,11 +781,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
.into_iter()
|
||||
.map(|val| val.into())
|
||||
.collect(),
|
||||
ConcreteDataType::DateTime(_) => values
|
||||
.datetime_values
|
||||
.into_iter()
|
||||
.map(|v| Value::DateTime(v.into()))
|
||||
.collect(),
|
||||
ConcreteDataType::Date(_) => values
|
||||
.date_values
|
||||
.into_iter()
|
||||
@@ -947,9 +936,6 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
Value::Date(v) => v1::Value {
|
||||
value_data: Some(ValueData::DateValue(v.val())),
|
||||
},
|
||||
Value::DateTime(v) => v1::Value {
|
||||
value_data: Some(ValueData::DatetimeValue(v.val())),
|
||||
},
|
||||
Value::Timestamp(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
||||
@@ -1066,7 +1052,6 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
Value::String(v) => Some(ValueData::StringValue(v.as_utf8().to_string())),
|
||||
Value::Binary(v) => Some(ValueData::BinaryValue(v.to_vec())),
|
||||
Value::Date(v) => Some(ValueData::DateValue(v.val())),
|
||||
Value::DateTime(v) => Some(ValueData::DatetimeValue(v.val())),
|
||||
Value::Timestamp(v) => Some(match v.unit() {
|
||||
TimeUnit::Second => ValueData::TimestampSecondValue(v.value()),
|
||||
TimeUnit::Millisecond => ValueData::TimestampMillisecondValue(v.value()),
|
||||
@@ -1248,7 +1233,7 @@ mod tests {
|
||||
ColumnDataTypeWrapper::date_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
@@ -1339,10 +1324,6 @@ mod tests {
|
||||
ColumnDataTypeWrapper::date_datatype(),
|
||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
@@ -1830,17 +1811,6 @@ mod tests {
|
||||
]
|
||||
);
|
||||
|
||||
test_convert_values!(
|
||||
datetime,
|
||||
vec![1.into(), 2.into(), 3.into()],
|
||||
datetime,
|
||||
vec![
|
||||
Value::DateTime(1.into()),
|
||||
Value::DateTime(2.into()),
|
||||
Value::DateTime(3.into())
|
||||
]
|
||||
);
|
||||
|
||||
#[test]
|
||||
fn test_vectors_to_rows_for_different_types() {
|
||||
let boolean_vec = BooleanVector::from_vec(vec![true, false, true]);
|
||||
|
||||
@@ -15,10 +15,13 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexType,
|
||||
COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions,
|
||||
SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY,
|
||||
};
|
||||
use greptime_proto::v1::{
|
||||
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
||||
};
|
||||
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -103,6 +106,13 @@ pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||
}
|
||||
|
||||
/// Checks if the `ColumnOptions` contains skipping index options.
|
||||
pub fn contains_skipping(options: &Option<ColumnOptions>) -> bool {
|
||||
options
|
||||
.as_ref()
|
||||
.is_some_and(|o| o.options.contains_key(SKIPPING_INDEX_GRPC_KEY))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||
pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<ColumnOptions>> {
|
||||
let mut options = ColumnOptions::default();
|
||||
@@ -113,14 +123,43 @@ pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<Column
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` from the given `SkippingIndexOptions`.
|
||||
pub fn options_from_skipping(skipping: &SkippingIndexOptions) -> Result<Option<ColumnOptions>> {
|
||||
let mut options = ColumnOptions::default();
|
||||
|
||||
let v = serde_json::to_string(skipping).context(error::SerializeJsonSnafu)?;
|
||||
options
|
||||
.options
|
||||
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), v);
|
||||
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` for inverted index.
|
||||
pub fn options_from_inverted() -> ColumnOptions {
|
||||
let mut options = ColumnOptions::default();
|
||||
options
|
||||
.options
|
||||
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string());
|
||||
options
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
pub fn as_fulltext_option_analyzer(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
match analyzer {
|
||||
Analyzer::English => FulltextAnalyzer::English,
|
||||
Analyzer::Chinese => FulltextAnalyzer::Chinese,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextBackend` from the given backend.
|
||||
pub fn as_fulltext_option_backend(backend: PbFulltextBackend) -> FulltextBackend {
|
||||
match backend {
|
||||
PbFulltextBackend::Bloom => FulltextBackend::Bloom,
|
||||
PbFulltextBackend::Tantivy => FulltextBackend::Tantivy,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
|
||||
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
|
||||
match skipping_index_type {
|
||||
@@ -132,7 +171,7 @@ pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> Skipp
|
||||
mod tests {
|
||||
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::FulltextAnalyzer;
|
||||
use datatypes::schema::{FulltextAnalyzer, FulltextBackend};
|
||||
|
||||
use super::*;
|
||||
use crate::v1::ColumnDataType;
|
||||
@@ -191,13 +230,14 @@ mod tests {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
})
|
||||
.unwrap();
|
||||
schema.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
@@ -211,11 +251,12 @@ mod tests {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
@@ -286,6 +287,28 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<Vec<TableRef>> {
|
||||
let table_info_values = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.batch_get(table_ids)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?;
|
||||
|
||||
let tables = table_info_values
|
||||
.into_values()
|
||||
.filter(|t| t.table_info.catalog_name == catalog && t.table_info.schema_name == schema)
|
||||
.map(build_table)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(tables)
|
||||
}
|
||||
|
||||
fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
|
||||
@@ -87,6 +87,14 @@ pub trait CatalogManager: Send + Sync {
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns the tables by table ids.
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<Vec<TableRef>>;
|
||||
|
||||
/// Returns all tables with a stream by catalog and schema.
|
||||
fn tables<'a>(
|
||||
&'a self,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use async_stream::{stream, try_stream};
|
||||
@@ -28,6 +28,7 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
@@ -143,6 +144,33 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<Vec<TableRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
|
||||
let schemas = catalogs.get(catalog).context(CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?;
|
||||
|
||||
let tables = schemas
|
||||
.get(schema)
|
||||
.context(SchemaNotFoundSnafu { catalog, schema })?;
|
||||
|
||||
let filter_ids: HashSet<_> = table_ids.iter().collect();
|
||||
// It is very inefficient, but we do not need to optimize it since it will not be called in `MemoryCatalogManager`.
|
||||
let tables = tables
|
||||
.values()
|
||||
.filter(|t| filter_ids.contains(&t.table_info().table_id()))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(tables)
|
||||
}
|
||||
|
||||
fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
|
||||
@@ -77,7 +77,7 @@ trait SystemSchemaProviderInner {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||
|
||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
let table_meta = TableMetaBuilder::empty()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
|
||||
@@ -19,7 +19,7 @@ mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod procedure_info;
|
||||
mod region_peers;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
@@ -49,7 +49,6 @@ pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
@@ -63,7 +62,9 @@ use crate::system_schema::information_schema::table_constraints::InformationSche
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::system_schema::{
|
||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
|
||||
@@ -36,9 +36,8 @@ use datatypes::vectors::{
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, CLUSTER_INFO};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
|
||||
@@ -38,11 +38,11 @@ use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::{InformationTable, COLUMNS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, COLUMNS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -56,6 +56,8 @@ pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const COLUMN_NAME: &str = "column_name";
|
||||
pub const REGION_ID: &str = "region_id";
|
||||
pub const PEER_ID: &str = "peer_id";
|
||||
const ORDINAL_POSITION: &str = "ordinal_position";
|
||||
const CHARACTER_MAXIMUM_LENGTH: &str = "character_maximum_length";
|
||||
const CHARACTER_OCTET_LENGTH: &str = "character_octet_length";
|
||||
@@ -365,10 +367,6 @@ impl InformationSchemaColumnsBuilder {
|
||||
self.numeric_scales.push(None);
|
||||
|
||||
match &column_schema.data_type {
|
||||
ConcreteDataType::DateTime(datetime_type) => {
|
||||
self.datetime_precisions
|
||||
.push(Some(datetime_type.precision() as i64));
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts_type) => {
|
||||
self.datetime_precisions
|
||||
.push(Some(ts_type.precision() as i64));
|
||||
|
||||
@@ -28,16 +28,19 @@ use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
@@ -59,6 +62,10 @@ pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
pub const CREATED_TIME: &str = "created_time";
|
||||
pub const UPDATED_TIME: &str = "updated_time";
|
||||
pub const LAST_EXECUTION_TIME: &str = "last_execution_time";
|
||||
pub const SOURCE_TABLE_NAMES: &str = "source_table_names";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
#[derive(Debug)]
|
||||
@@ -99,6 +106,14 @@ impl InformationSchemaFlows {
|
||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||
(OPTIONS, CDT::string_datatype(), true),
|
||||
(CREATED_TIME, CDT::timestamp_millisecond_datatype(), false),
|
||||
(UPDATED_TIME, CDT::timestamp_millisecond_datatype(), false),
|
||||
(
|
||||
LAST_EXECUTION_TIME,
|
||||
CDT::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
),
|
||||
(SOURCE_TABLE_NAMES, CDT::string_datatype(), true),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||
@@ -170,6 +185,10 @@ struct InformationSchemaFlowsBuilder {
|
||||
sink_table_names: StringVectorBuilder,
|
||||
flownode_id_groups: StringVectorBuilder,
|
||||
option_groups: StringVectorBuilder,
|
||||
created_time: TimestampMillisecondVectorBuilder,
|
||||
updated_time: TimestampMillisecondVectorBuilder,
|
||||
last_execution_time: TimestampMillisecondVectorBuilder,
|
||||
source_table_names: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlowsBuilder {
|
||||
@@ -196,6 +215,10 @@ impl InformationSchemaFlowsBuilder {
|
||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
created_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
updated_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
last_execution_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
source_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,13 +258,14 @@ impl InformationSchemaFlowsBuilder {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)
|
||||
.await?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_flow(
|
||||
async fn add_flow(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
@@ -290,6 +314,36 @@ impl InformationSchemaFlowsBuilder {
|
||||
input: format!("{:?}", flow_info.options()),
|
||||
},
|
||||
)?));
|
||||
self.created_time
|
||||
.push(Some(flow_info.created_time().timestamp_millis().into()));
|
||||
self.updated_time
|
||||
.push(Some(flow_info.updated_time().timestamp_millis().into()));
|
||||
self.last_execution_time
|
||||
.push(flow_stat.as_ref().and_then(|state| {
|
||||
state
|
||||
.last_exec_time_map
|
||||
.get(&flow_id)
|
||||
.map(|v| TimestampMillisecond::new(*v))
|
||||
}));
|
||||
|
||||
let mut source_table_names = vec![];
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
source_table_names.extend(
|
||||
catalog_manager
|
||||
.tables_by_ids(&catalog_name, &schema_name, flow_info.source_table_ids())
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|table| table.table_info().full_table_name()),
|
||||
);
|
||||
}
|
||||
|
||||
let source_table_names = source_table_names.join(",");
|
||||
self.source_table_names.push(Some(&source_table_names));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -307,6 +361,10 @@ impl InformationSchemaFlowsBuilder {
|
||||
Arc::new(self.sink_table_names.finish()),
|
||||
Arc::new(self.flownode_id_groups.finish()),
|
||||
Arc::new(self.option_groups.finish()),
|
||||
Arc::new(self.created_time.finish()),
|
||||
Arc::new(self.updated_time.finish()),
|
||||
Arc::new(self.last_execution_time.finish()),
|
||||
Arc::new(self.source_table_names.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::information_schema::table_names::*;
|
||||
use crate::system_schema::utils::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
bigint_column, string_column, string_columns, timestamp_micro_column,
|
||||
};
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
@@ -163,17 +163,17 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
string_column("EVENT_BODY"),
|
||||
string_column("EVENT_DEFINITION"),
|
||||
string_column("EVENT_TYPE"),
|
||||
datetime_column("EXECUTE_AT"),
|
||||
timestamp_micro_column("EXECUTE_AT"),
|
||||
bigint_column("INTERVAL_VALUE"),
|
||||
string_column("INTERVAL_FIELD"),
|
||||
string_column("SQL_MODE"),
|
||||
datetime_column("STARTS"),
|
||||
datetime_column("ENDS"),
|
||||
timestamp_micro_column("STARTS"),
|
||||
timestamp_micro_column("ENDS"),
|
||||
string_column("STATUS"),
|
||||
string_column("ON_COMPLETION"),
|
||||
datetime_column("CREATED"),
|
||||
datetime_column("LAST_ALTERED"),
|
||||
datetime_column("LAST_EXECUTED"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
timestamp_micro_column("LAST_ALTERED"),
|
||||
timestamp_micro_column("LAST_EXECUTED"),
|
||||
string_column("EVENT_COMMENT"),
|
||||
bigint_column("ORIGINATOR"),
|
||||
string_column("CHARACTER_SET_CLIENT"),
|
||||
@@ -204,10 +204,10 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
bigint_column("INITIAL_SIZE"),
|
||||
bigint_column("MAXIMUM_SIZE"),
|
||||
bigint_column("AUTOEXTEND_SIZE"),
|
||||
datetime_column("CREATION_TIME"),
|
||||
datetime_column("LAST_UPDATE_TIME"),
|
||||
datetime_column("LAST_ACCESS_TIME"),
|
||||
datetime_column("RECOVER_TIME"),
|
||||
timestamp_micro_column("CREATION_TIME"),
|
||||
timestamp_micro_column("LAST_UPDATE_TIME"),
|
||||
timestamp_micro_column("LAST_ACCESS_TIME"),
|
||||
timestamp_micro_column("RECOVER_TIME"),
|
||||
bigint_column("TRANSACTION_COUNTER"),
|
||||
string_column("VERSION"),
|
||||
string_column("ROW_FORMAT"),
|
||||
@@ -217,9 +217,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
bigint_column("MAX_DATA_LENGTH"),
|
||||
bigint_column("INDEX_LENGTH"),
|
||||
bigint_column("DATA_FREE"),
|
||||
datetime_column("CREATE_TIME"),
|
||||
datetime_column("UPDATE_TIME"),
|
||||
datetime_column("CHECK_TIME"),
|
||||
timestamp_micro_column("CREATE_TIME"),
|
||||
timestamp_micro_column("UPDATE_TIME"),
|
||||
timestamp_micro_column("CHECK_TIME"),
|
||||
string_column("CHECKSUM"),
|
||||
string_column("STATUS"),
|
||||
string_column("EXTRA"),
|
||||
@@ -330,8 +330,8 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
string_column("SQL_DATA_ACCESS"),
|
||||
string_column("SQL_PATH"),
|
||||
string_column("SECURITY_TYPE"),
|
||||
datetime_column("CREATED"),
|
||||
datetime_column("LAST_ALTERED"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
timestamp_micro_column("LAST_ALTERED"),
|
||||
string_column("SQL_MODE"),
|
||||
string_column("ROUTINE_COMMENT"),
|
||||
string_column("DEFINER"),
|
||||
@@ -383,7 +383,7 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
||||
string_column("ACTION_REFERENCE_OLD_ROW"),
|
||||
string_column("ACTION_REFERENCE_NEW_ROW"),
|
||||
datetime_column("CREATED"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
string_column("SQL_MODE"),
|
||||
string_column("DEFINER"),
|
||||
string_column("CHARACTER_SET_CLIENT"),
|
||||
|
||||
@@ -24,18 +24,17 @@ use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatch
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::schema::{ColumnSchema, FulltextBackend, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
|
||||
use futures_util::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::KEY_COLUMN_USAGE;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, KEY_COLUMN_USAGE};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
@@ -48,20 +47,38 @@ pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const COLUMN_NAME: &str = "column_name";
|
||||
pub const ORDINAL_POSITION: &str = "ordinal_position";
|
||||
/// The type of the index.
|
||||
pub const GREPTIME_INDEX_TYPE: &str = "greptime_index_type";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// Primary key constraint name
|
||||
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
|
||||
/// Time index constraint name
|
||||
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
||||
pub(crate) const CONSTRAINT_NAME_TIME_INDEX: &str = "TIME INDEX";
|
||||
|
||||
/// Primary key constraint name
|
||||
pub(crate) const CONSTRAINT_NAME_PRI: &str = "PRIMARY";
|
||||
/// Primary key index type
|
||||
pub(crate) const INDEX_TYPE_PRI: &str = "greptime-primary-key-v1";
|
||||
|
||||
/// Inverted index constraint name
|
||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||
pub(crate) const CONSTRAINT_NAME_INVERTED_INDEX: &str = "INVERTED INDEX";
|
||||
/// Inverted index type
|
||||
pub(crate) const INDEX_TYPE_INVERTED_INDEX: &str = "greptime-inverted-index-v1";
|
||||
|
||||
/// Fulltext index constraint name
|
||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||
pub(crate) const CONSTRAINT_NAME_FULLTEXT_INDEX: &str = "FULLTEXT INDEX";
|
||||
/// Fulltext index v1 type
|
||||
pub(crate) const INDEX_TYPE_FULLTEXT_TANTIVY: &str = "greptime-fulltext-index-v1";
|
||||
/// Fulltext index bloom type
|
||||
pub(crate) const INDEX_TYPE_FULLTEXT_BLOOM: &str = "greptime-fulltext-index-bloom";
|
||||
|
||||
/// Skipping index constraint name
|
||||
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
|
||||
pub(crate) const CONSTRAINT_NAME_SKIPPING_INDEX: &str = "SKIPPING INDEX";
|
||||
/// Skipping index type
|
||||
pub(crate) const INDEX_TYPE_SKIPPING_INDEX: &str = "greptime-bloom-filter-v1";
|
||||
|
||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||
///
|
||||
/// Provides an extra column `greptime_index_type` for the index type of the key column.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||
schema: SchemaRef,
|
||||
@@ -121,6 +138,11 @@ impl InformationSchemaKeyColumnUsage {
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
GREPTIME_INDEX_TYPE,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
@@ -185,6 +207,7 @@ struct InformationSchemaKeyColumnUsageBuilder {
|
||||
column_name: StringVectorBuilder,
|
||||
ordinal_position: UInt32VectorBuilder,
|
||||
position_in_unique_constraint: UInt32VectorBuilder,
|
||||
greptime_index_type: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaKeyColumnUsageBuilder {
|
||||
@@ -207,6 +230,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
ordinal_position: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
position_in_unique_constraint: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
greptime_index_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,34 +254,47 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let mut constraints = vec![];
|
||||
let mut greptime_index_type = vec![];
|
||||
if column.is_time_index() {
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
TIME_INDEX_CONSTRAINT_NAME,
|
||||
CONSTRAINT_NAME_TIME_INDEX,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_name,
|
||||
&column.name,
|
||||
1, //always 1 for time index
|
||||
"",
|
||||
);
|
||||
}
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
if keys.contains(&idx) {
|
||||
constraints.push(PRI_CONSTRAINT_NAME);
|
||||
constraints.push(CONSTRAINT_NAME_PRI);
|
||||
greptime_index_type.push(INDEX_TYPE_PRI);
|
||||
}
|
||||
if column.is_inverted_indexed() {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
constraints.push(CONSTRAINT_NAME_INVERTED_INDEX);
|
||||
greptime_index_type.push(INDEX_TYPE_INVERTED_INDEX);
|
||||
}
|
||||
if column.is_fulltext_indexed() {
|
||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||
if let Ok(Some(options)) = column.fulltext_options() {
|
||||
if options.enable {
|
||||
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
||||
let index_type = match options.backend {
|
||||
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
||||
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
||||
};
|
||||
greptime_index_type.push(index_type);
|
||||
}
|
||||
}
|
||||
if column.is_skipping_indexed() {
|
||||
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
|
||||
constraints.push(CONSTRAINT_NAME_SKIPPING_INDEX);
|
||||
greptime_index_type.push(INDEX_TYPE_SKIPPING_INDEX);
|
||||
}
|
||||
|
||||
if !constraints.is_empty() {
|
||||
let aggregated_constraints = constraints.join(", ");
|
||||
let aggregated_index_types = greptime_index_type.join(", ");
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
@@ -267,6 +304,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
table_name,
|
||||
&column.name,
|
||||
idx as u32 + 1,
|
||||
&aggregated_index_types,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -289,6 +327,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
table_name: &str,
|
||||
column_name: &str,
|
||||
ordinal_position: u32,
|
||||
index_types: &str,
|
||||
) {
|
||||
let row = [
|
||||
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
|
||||
@@ -298,6 +337,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(COLUMN_NAME, &Value::from(column_name)),
|
||||
(ORDINAL_POSITION, &Value::from(ordinal_position)),
|
||||
(GREPTIME_INDEX_TYPE, &Value::from(index_types)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
@@ -314,6 +354,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
self.column_name.push(Some(column_name));
|
||||
self.ordinal_position.push(Some(ordinal_position));
|
||||
self.position_in_unique_constraint.push(None);
|
||||
self.greptime_index_type.push(Some(index_types));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -337,6 +378,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
null_string_vector,
|
||||
Arc::new(self.greptime_index_type.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
@@ -20,17 +20,18 @@ use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::datetime::DateTime;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMicrosecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
|
||||
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
|
||||
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
@@ -38,13 +39,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use super::PARTITIONS;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, PARTITIONS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -127,9 +127,21 @@ impl InformationSchemaPartitions {
|
||||
ColumnSchema::new("max_data_length", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new("index_length", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new("create_time", ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new("update_time", ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new("check_time", ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"create_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"update_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"check_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"partition_comment",
|
||||
@@ -200,7 +212,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
create_times: DateTimeVectorBuilder,
|
||||
create_times: TimestampMicrosecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
|
||||
@@ -220,7 +232,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
@@ -324,7 +336,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
};
|
||||
|
||||
self.partition_expressions.push(expressions.as_deref());
|
||||
self.create_times.push(Some(DateTime::from(
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
)));
|
||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||
@@ -342,8 +354,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
Arc::new(Int64Vector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let null_datetime_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(DateTimeVector::from(vec![None])),
|
||||
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let partition_methods = Arc::new(ConstantVector::new(
|
||||
@@ -373,8 +385,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_i64_vector.clone(),
|
||||
Arc::new(self.create_times.finish()),
|
||||
// TODO(dennis): supports update_time
|
||||
null_datetime_vector.clone(),
|
||||
null_datetime_vector,
|
||||
null_timestampmicrosecond_vector.clone(),
|
||||
null_timestampmicrosecond_vector,
|
||||
null_i64_vector,
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
|
||||
@@ -33,9 +33,8 @@ use datatypes::vectors::{StringVectorBuilder, TimestampMillisecondVectorBuilder}
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::PROCEDURE_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, PROCEDURE_INFO};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::common::HashMap;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
@@ -34,25 +35,30 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::REGION_PEERS;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, REGION_PEERS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
const PEER_ID: &str = "peer_id";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const REGION_ID: &str = "region_id";
|
||||
pub const PEER_ID: &str = "peer_id";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const IS_LEADER: &str = "is_leader";
|
||||
pub const IS_LEADER: &str = "is_leader";
|
||||
const STATUS: &str = "status";
|
||||
const DOWN_SECONDS: &str = "down_seconds";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `REGION_PEERS` table provides information about the region distribution and routes. Including fields:
|
||||
///
|
||||
/// - `table_catalog`: the table catalog name
|
||||
/// - `table_schema`: the table schema name
|
||||
/// - `table_name`: the table name
|
||||
/// - `region_id`: the region id
|
||||
/// - `peer_id`: the region storage datanode peer id
|
||||
/// - `peer_addr`: the region storage datanode gRPC peer address
|
||||
@@ -77,6 +83,9 @@ impl InformationSchemaRegionPeers {
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(REGION_ID, ConcreteDataType::uint64_datatype(), false),
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
@@ -134,6 +143,9 @@ struct InformationSchemaRegionPeersBuilder {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
table_catalogs: StringVectorBuilder,
|
||||
table_schemas: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
region_ids: UInt64VectorBuilder,
|
||||
peer_ids: UInt64VectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
@@ -152,6 +164,9 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -177,24 +192,28 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let table_id_stream = catalog_manager
|
||||
let table_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name, None)
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(table_info.ident.table_id))
|
||||
Ok(Some((
|
||||
table_info.ident.table_id,
|
||||
table_info.name.to_string(),
|
||||
)))
|
||||
}
|
||||
});
|
||||
|
||||
const BATCH_SIZE: usize = 128;
|
||||
|
||||
// Split table ids into chunks
|
||||
let mut table_id_chunks = pin!(table_id_stream.ready_chunks(BATCH_SIZE));
|
||||
// Split tables into chunks
|
||||
let mut table_chunks = pin!(table_stream.ready_chunks(BATCH_SIZE));
|
||||
|
||||
while let Some(table_ids) = table_id_chunks.next().await {
|
||||
let table_ids = table_ids.into_iter().collect::<Result<Vec<_>>>()?;
|
||||
while let Some(tables) = table_chunks.next().await {
|
||||
let tables = tables.into_iter().collect::<Result<HashMap<_, _>>>()?;
|
||||
let table_ids = tables.keys().cloned().collect::<Vec<_>>();
|
||||
|
||||
let table_routes = if let Some(partition_manager) = &partition_manager {
|
||||
partition_manager
|
||||
@@ -206,7 +225,16 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
};
|
||||
|
||||
for (table_id, routes) in table_routes {
|
||||
self.add_region_peers(&predicates, table_id, &routes);
|
||||
// Safety: table_id is guaranteed to be in the map
|
||||
let table_name = tables.get(&table_id).unwrap();
|
||||
self.add_region_peers(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_name,
|
||||
&predicates,
|
||||
table_id,
|
||||
&routes,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -216,6 +244,9 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
|
||||
fn add_region_peers(
|
||||
&mut self,
|
||||
table_catalog: &str,
|
||||
table_schema: &str,
|
||||
table_name: &str,
|
||||
predicates: &Predicates,
|
||||
table_id: TableId,
|
||||
routes: &[RegionRoute],
|
||||
@@ -231,13 +262,20 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
Some("ALIVE".to_string())
|
||||
};
|
||||
|
||||
let row = [(REGION_ID, &Value::from(region_id))];
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(table_catalog)),
|
||||
(TABLE_SCHEMA, &Value::from(table_schema)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(REGION_ID, &Value::from(region_id)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(dennis): adds followers.
|
||||
self.table_catalogs.push(Some(table_catalog));
|
||||
self.table_schemas.push(Some(table_schema));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.region_ids.push(Some(region_id));
|
||||
self.peer_ids.push(peer_id);
|
||||
self.peer_addrs.push(peer_addr.as_deref());
|
||||
@@ -245,11 +283,26 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
self.statuses.push(state.as_deref());
|
||||
self.down_seconds
|
||||
.push(route.leader_down_millis().map(|m| m / 1000));
|
||||
|
||||
for follower in &route.follower_peers {
|
||||
self.table_catalogs.push(Some(table_catalog));
|
||||
self.table_schemas.push(Some(table_schema));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.region_ids.push(Some(region_id));
|
||||
self.peer_ids.push(Some(follower.id));
|
||||
self.peer_addrs.push(Some(follower.addr.as_str()));
|
||||
self.is_leaders.push(Some("No"));
|
||||
self.statuses.push(None);
|
||||
self.down_seconds.push(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.table_schemas.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.region_ids.finish()),
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
|
||||
@@ -30,9 +30,9 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, UInt64VectorB
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::{InformationTable, REGION_STATISTICS};
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, REGION_STATISTICS};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
|
||||
@@ -35,8 +35,8 @@ use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::{InformationTable, RUNTIME_METRICS};
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaMetrics {
|
||||
|
||||
@@ -31,12 +31,11 @@ use datatypes::vectors::StringVectorBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::SCHEMATA;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, SCHEMATA};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
|
||||
@@ -32,14 +32,14 @@ use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::{InformationTable, TABLE_CONSTRAINTS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::key_column_usage::{
|
||||
PRI_CONSTRAINT_NAME, TIME_INDEX_CONSTRAINT_NAME,
|
||||
CONSTRAINT_NAME_PRI, CONSTRAINT_NAME_TIME_INDEX,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, TABLE_CONSTRAINTS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||
@@ -188,7 +188,7 @@ impl InformationSchemaTableConstraintsBuilder {
|
||||
self.add_table_constraint(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
TIME_INDEX_CONSTRAINT_NAME,
|
||||
CONSTRAINT_NAME_TIME_INDEX,
|
||||
&schema_name,
|
||||
&table.table_info().name,
|
||||
TIME_INDEX_CONSTRAINT_TYPE,
|
||||
@@ -199,7 +199,7 @@ impl InformationSchemaTableConstraintsBuilder {
|
||||
self.add_table_constraint(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
PRI_CONSTRAINT_NAME,
|
||||
CONSTRAINT_NAME_PRI,
|
||||
&schema_name,
|
||||
&table.table_info().name,
|
||||
PRI_KEY_CONSTRAINT_TYPE,
|
||||
|
||||
@@ -30,18 +30,18 @@ use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
StringVectorBuilder, TimestampMicrosecondVectorBuilder, UInt32VectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use super::TABLES;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, TABLES};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -105,9 +105,21 @@ impl InformationSchemaTables {
|
||||
ColumnSchema::new(TABLE_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DATA_FREE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(UPDATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(CHECK_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CREATE_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
UPDATE_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
CHECK_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CHECKSUM, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_OPTIONS, ConcreteDataType::string_datatype(), true),
|
||||
@@ -182,9 +194,9 @@ struct InformationSchemaTablesBuilder {
|
||||
max_index_length: UInt64VectorBuilder,
|
||||
data_free: UInt64VectorBuilder,
|
||||
auto_increment: UInt64VectorBuilder,
|
||||
create_time: DateTimeVectorBuilder,
|
||||
update_time: DateTimeVectorBuilder,
|
||||
check_time: DateTimeVectorBuilder,
|
||||
create_time: TimestampMicrosecondVectorBuilder,
|
||||
update_time: TimestampMicrosecondVectorBuilder,
|
||||
check_time: TimestampMicrosecondVectorBuilder,
|
||||
table_collation: StringVectorBuilder,
|
||||
checksum: UInt64VectorBuilder,
|
||||
create_options: StringVectorBuilder,
|
||||
@@ -219,9 +231,9 @@ impl InformationSchemaTablesBuilder {
|
||||
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
|
||||
@@ -32,13 +32,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::VIEWS;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, VIEWS};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
|
||||
@@ -29,8 +29,8 @@ use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::SystemTable;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::SystemTable;
|
||||
|
||||
/// A memory table with specified schema and columns.
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -34,9 +34,9 @@ use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::utils::tables::u32_column;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
use crate::system_schema::utils::tables::u32_column;
|
||||
use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::Arc;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use super::oid_column;
|
||||
use super::table_names::PG_TYPE;
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::pg_catalog::oid_column;
|
||||
use crate::system_schema::pg_catalog::table_names::PG_TYPE;
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
|
||||
@@ -32,12 +32,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use super::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -29,12 +29,12 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -35,11 +35,13 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::{
|
||||
query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE,
|
||||
};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -437,10 +437,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn column(name: &str) -> Expr {
|
||||
Expr::Column(Column {
|
||||
relation: None,
|
||||
name: name.to_string(),
|
||||
})
|
||||
Expr::Column(Column::from_name(name))
|
||||
}
|
||||
|
||||
fn string_literal(v: &str) -> Expr {
|
||||
|
||||
@@ -51,10 +51,10 @@ pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
||||
pub fn timestamp_micro_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -43,6 +44,10 @@ futures.workspace = true
|
||||
humantime.workspace = true
|
||||
meta-client.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
opendal = { version = "0.51.1", features = [
|
||||
"services-fs",
|
||||
"services-s3",
|
||||
] }
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
|
||||
@@ -23,6 +23,8 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use common_meta::kv_backend::rds::MySqlStore;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::rds::PgStore;
|
||||
use common_meta::peer::Peer;
|
||||
@@ -63,6 +65,9 @@ pub struct BenchTableMetadataCommand {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
count: u32,
|
||||
}
|
||||
@@ -86,6 +91,16 @@ impl BenchTableMetadataCommand {
|
||||
kv_backend
|
||||
};
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
let kv_backend = if let Some(mysql_addr) = &self.mysql_addr {
|
||||
info!("Using mysql as kv backend");
|
||||
MySqlStore::with_url(mysql_addr, "greptime_metakv", 128)
|
||||
.await
|
||||
.unwrap()
|
||||
} else {
|
||||
kv_backend
|
||||
};
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
@@ -162,7 +177,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
|
||||
fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
let mut region_routes = Vec::with_capacity(100);
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for region_id in regions.into_iter().map(u64::from) {
|
||||
region_routes.push(RegionRoute {
|
||||
@@ -173,7 +188,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer {
|
||||
id: rng.gen_range(0..10),
|
||||
id: rng.random_range(0..10),
|
||||
addr: String::new(),
|
||||
}),
|
||||
follower_peers: vec![],
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -105,52 +104,6 @@ pub enum Error {
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL"))]
|
||||
ReplCreation {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command"))]
|
||||
Readline {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
source: client::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client"))]
|
||||
StartMetaClient {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
@@ -166,13 +119,6 @@ pub enum Error {
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait"))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
#[snafu(source(from(common_config::error::Error, Box::new)))]
|
||||
@@ -276,6 +222,24 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("OpenDAL operator failed"))]
|
||||
OpenDal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: opendal::Error,
|
||||
},
|
||||
#[snafu(display("S3 config need be set"))]
|
||||
S3ConfigNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Output directory not set"))]
|
||||
OutputDirNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -300,17 +264,10 @@ impl ErrorExt for Error {
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
Error::HttpQuerySql { .. } => StatusCode::Internal,
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. }
|
||||
| Error::FileIo { .. }
|
||||
@@ -319,6 +276,9 @@ impl ErrorExt for Error {
|
||||
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::OpenDal { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
|
||||
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -21,15 +21,18 @@ use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use opendal::layers::LoggingLayer;
|
||||
use opendal::{services, Operator};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||
SchemaNotFoundSnafu,
|
||||
};
|
||||
use crate::{database, Tool};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
@@ -52,8 +55,9 @@ pub struct ExportCommand {
|
||||
addr: String,
|
||||
|
||||
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
||||
/// for local export.
|
||||
#[clap(long)]
|
||||
output_dir: String,
|
||||
output_dir: Option<String>,
|
||||
|
||||
/// The name of the catalog to export.
|
||||
#[clap(long, default_value = "greptime-*")]
|
||||
@@ -101,10 +105,51 @@ pub struct ExportCommand {
|
||||
/// Disable proxy server, if set, will not use any proxy.
|
||||
#[clap(long)]
|
||||
no_proxy: bool,
|
||||
|
||||
/// if export data to s3
|
||||
#[clap(long)]
|
||||
s3: bool,
|
||||
|
||||
/// The s3 bucket name
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_bucket: Option<String>,
|
||||
|
||||
/// The s3 endpoint
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_endpoint: Option<String>,
|
||||
|
||||
/// The s3 access key
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_access_key: Option<String>,
|
||||
|
||||
/// The s3 secret key
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_secret_key: Option<String>,
|
||||
|
||||
/// The s3 region
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_region: Option<String>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
if self.s3
|
||||
&& (self.s3_bucket.is_none()
|
||||
|| self.s3_endpoint.is_none()
|
||||
|| self.s3_access_key.is_none()
|
||||
|| self.s3_secret_key.is_none()
|
||||
|| self.s3_region.is_none())
|
||||
{
|
||||
return Err(BoxedError::new(S3ConfigNotSetSnafu {}.build()));
|
||||
}
|
||||
if !self.s3 && self.output_dir.is_none() {
|
||||
return Err(BoxedError::new(OutputDirNotSetSnafu {}.build()));
|
||||
}
|
||||
let (catalog, schema) =
|
||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
|
||||
@@ -126,24 +171,43 @@ impl ExportCommand {
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
s3: self.s3,
|
||||
s3_bucket: self.s3_bucket.clone(),
|
||||
s3_endpoint: self.s3_endpoint.clone(),
|
||||
s3_access_key: self.s3_access_key.clone(),
|
||||
s3_secret_key: self.s3_secret_key.clone(),
|
||||
s3_region: self.s3_region.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Export {
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
output_dir: String,
|
||||
output_dir: Option<String>,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
s3: bool,
|
||||
s3_bucket: Option<String>,
|
||||
s3_endpoint: Option<String>,
|
||||
s3_access_key: Option<String>,
|
||||
s3_secret_key: Option<String>,
|
||||
s3_region: Option<String>,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.output_dir).join(&self.catalog)
|
||||
if self.s3 {
|
||||
PathBuf::from(&self.catalog)
|
||||
} else if let Some(dir) = &self.output_dir {
|
||||
PathBuf::from(dir).join(&self.catalog)
|
||||
} else {
|
||||
unreachable!("catalog_path: output_dir must be set when not using s3")
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
@@ -300,19 +364,23 @@ impl Export {
|
||||
let timer = Instant::now();
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let operator = self.build_operator().await?;
|
||||
|
||||
for schema in db_names {
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_database.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let create_database = self
|
||||
.show_create("DATABASE", &self.catalog, &schema, None)
|
||||
.await?;
|
||||
file.write_all(create_database.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
let file_path = self.get_file_path(&schema, "create_database.sql");
|
||||
self.write_to_storage(&operator, &file_path, create_database.into_bytes())
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Exported {}.{} database creation SQL to {}",
|
||||
self.catalog,
|
||||
schema,
|
||||
self.format_output_path(&file_path)
|
||||
);
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
@@ -326,149 +394,267 @@ impl Export {
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let operator = Arc::new(self.build_operator().await?);
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
let export_self = self.clone();
|
||||
let operator = operator.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let (metric_physical_tables, remaining_tables, views) =
|
||||
self.get_table_list(&self.catalog, &schema).await?;
|
||||
let table_count =
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_tables.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
for (c, s, v) in views {
|
||||
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
|
||||
file.write_all(create_view.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let (metric_physical_tables, remaining_tables, views) = export_self
|
||||
.get_table_list(&export_self.catalog, &schema)
|
||||
.await?;
|
||||
|
||||
// Create directory if needed for file system storage
|
||||
if !export_self.s3 {
|
||||
let db_dir = format!("{}/{}/", export_self.catalog, schema);
|
||||
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
|
||||
}
|
||||
|
||||
let file_path = export_self.get_file_path(&schema, "create_tables.sql");
|
||||
let mut content = Vec::new();
|
||||
|
||||
// Add table creation SQL
|
||||
for (c, s, t) in metric_physical_tables.iter().chain(&remaining_tables) {
|
||||
let create_table = export_self.show_create("TABLE", c, s, Some(t)).await?;
|
||||
content.extend_from_slice(create_table.as_bytes());
|
||||
}
|
||||
|
||||
// Add view creation SQL
|
||||
for (c, s, v) in &views {
|
||||
let create_view = export_self.show_create("VIEW", c, s, Some(v)).await?;
|
||||
content.extend_from_slice(create_view.as_bytes());
|
||||
}
|
||||
|
||||
// Write to storage
|
||||
export_self
|
||||
.write_to_storage(&operator, &file_path, content)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
"Finished exporting {}.{schema} with {} table schemas to path: {}",
|
||||
export_self.catalog,
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len(),
|
||||
export_self.format_output_path(&file_path)
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export schema job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
let success = self.execute_tasks(tasks).await;
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_operator(&self) -> Result<Operator> {
|
||||
if self.s3 {
|
||||
self.build_s3_operator().await
|
||||
} else {
|
||||
self.build_fs_operator().await
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_s3_operator(&self) -> Result<Operator> {
|
||||
let mut builder = services::S3::default().root("").bucket(
|
||||
self.s3_bucket
|
||||
.as_ref()
|
||||
.expect("s3_bucket must be provided when s3 is enabled"),
|
||||
);
|
||||
|
||||
if let Some(endpoint) = self.s3_endpoint.as_ref() {
|
||||
builder = builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = self.s3_region.as_ref() {
|
||||
builder = builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = self.s3_access_key.as_ref() {
|
||||
builder = builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(secret_key) = self.s3_secret_key.as_ref() {
|
||||
builder = builder.secret_access_key(secret_key);
|
||||
}
|
||||
|
||||
let op = Operator::new(builder)
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
async fn build_fs_operator(&self) -> Result<Operator> {
|
||||
let root = self
|
||||
.output_dir
|
||||
.as_ref()
|
||||
.context(OutputDirNotSetSnafu)?
|
||||
.clone();
|
||||
let op = Operator::new(services::Fs::default().root(&root))
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
let operator = Arc::new(self.build_operator().await?);
|
||||
let with_options = build_with_options(&self.start_time, &self.end_time);
|
||||
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
let export_self = self.clone();
|
||||
let with_options_clone = with_options.clone();
|
||||
let operator = operator.clone();
|
||||
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
let with_options = match (&self.start_time, &self.end_time) {
|
||||
(Some(start_time), Some(end_time)) => {
|
||||
format!(
|
||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
||||
start_time, end_time
|
||||
)
|
||||
}
|
||||
(Some(start_time), None) => {
|
||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
||||
}
|
||||
(None, Some(end_time)) => {
|
||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
||||
}
|
||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
||||
};
|
||||
// Create directory if not using S3
|
||||
if !export_self.s3 {
|
||||
let db_dir = format!("{}/{}/", export_self.catalog, schema);
|
||||
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
|
||||
}
|
||||
|
||||
let (path, connection_part) = export_self.get_storage_params(&schema);
|
||||
|
||||
// Execute COPY DATABASE TO command
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
self.catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap(),
|
||||
with_options
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' WITH ({}){};"#,
|
||||
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
export_self.database_client.sql_in_public(&sql).await?;
|
||||
info!(
|
||||
"Finished exporting {}.{} data to {}",
|
||||
export_self.catalog, schema, path
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
// Create copy_from.sql file
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
|
||||
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||
);
|
||||
|
||||
self.database_client.sql_in_public(&sql).await?;
|
||||
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
|
||||
export_self
|
||||
.write_to_storage(
|
||||
&operator,
|
||||
©_from_path,
|
||||
copy_database_from_sql.into_bytes(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} data into path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = db_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
self.catalog,
|
||||
"Finished exporting {}.{} copy_from.sql to {}",
|
||||
export_self.catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap()
|
||||
export_self.format_output_path(©_from_path)
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
let success = self.execute_tasks(tasks).await;
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_file_path(&self, schema: &str, file_name: &str) -> String {
|
||||
format!("{}/{}/{}", self.catalog, schema, file_name)
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
if self.s3 {
|
||||
format!(
|
||||
"s3://{}/{}",
|
||||
self.s3_bucket.as_ref().unwrap_or(&String::new()),
|
||||
file_path
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{}/{}",
|
||||
self.output_dir.as_ref().unwrap_or(&String::new()),
|
||||
file_path
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_to_storage(
|
||||
&self,
|
||||
op: &Operator,
|
||||
file_path: &str,
|
||||
content: Vec<u8>,
|
||||
) -> Result<()> {
|
||||
op.write(file_path, content).await.context(OpenDalSnafu)
|
||||
}
|
||||
|
||||
fn get_storage_params(&self, schema: &str) -> (String, String) {
|
||||
if self.s3 {
|
||||
let s3_path = format!(
|
||||
"s3://{}/{}/{}/",
|
||||
// Safety: s3_bucket is required when s3 is enabled
|
||||
self.s3_bucket.as_ref().unwrap(),
|
||||
self.catalog,
|
||||
schema
|
||||
);
|
||||
|
||||
// endpoint is optional
|
||||
let endpoint_option = if let Some(endpoint) = self.s3_endpoint.as_ref() {
|
||||
format!(", ENDPOINT='{}'", endpoint)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// Safety: All s3 options are required
|
||||
let connection_options = format!(
|
||||
"ACCESS_KEY_ID='{}', SECRET_ACCESS_KEY='{}', REGION='{}'{}",
|
||||
self.s3_access_key.as_ref().unwrap(),
|
||||
self.s3_secret_key.as_ref().unwrap(),
|
||||
self.s3_region.as_ref().unwrap(),
|
||||
endpoint_option
|
||||
);
|
||||
|
||||
(s3_path, format!(" CONNECTION ({})", connection_options))
|
||||
} else {
|
||||
(
|
||||
self.catalog_path()
|
||||
.join(format!("{schema}/"))
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
String::new(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_tasks(
|
||||
&self,
|
||||
tasks: Vec<impl std::future::Future<Output = Result<()>>>,
|
||||
) -> usize {
|
||||
futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export database job failed");
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
.count()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -493,3 +679,15 @@ impl Tool for Export {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
|
||||
fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) -> String {
|
||||
let mut options = vec!["format = 'parquet'".to_string()];
|
||||
if let Some(start) = start_time {
|
||||
options.push(format!("start_time = '{}'", start));
|
||||
}
|
||||
if let Some(end) = end_time {
|
||||
options.push(format!("end_time = '{}'", end));
|
||||
}
|
||||
options.join(", ")
|
||||
}
|
||||
|
||||
@@ -23,15 +23,12 @@ mod helper;
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
mod database;
|
||||
mod import;
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
pub use database::DatabaseClient;
|
||||
use error::Result;
|
||||
pub use repl::Repl;
|
||||
|
||||
pub use crate::bench::BenchTableMetadataCommand;
|
||||
pub use crate::export::ExportCommand;
|
||||
|
||||
@@ -1,299 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{
|
||||
CachedKvBackend, CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
use either::Either;
|
||||
use meta_client::client::{ClusterKvBackend, MetaClientBuilder};
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||
use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cmd::ReplCommand;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu,
|
||||
};
|
||||
use crate::helper::RustylineHelper;
|
||||
use crate::{error, AttachCommand};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
/// Current prompt
|
||||
prompt: String,
|
||||
|
||||
/// Client for interacting with GreptimeDB
|
||||
database: Database,
|
||||
|
||||
query_engine: Option<DatafusionQueryEngine>,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
impl Repl {
|
||||
fn print_help(&self) {
|
||||
println!("{}", ReplCommand::help())
|
||||
}
|
||||
|
||||
pub(crate) async fn try_new(cmd: &AttachCommand) -> Result<Self> {
|
||||
let mut rl = Editor::new().context(ReplCreationSnafu)?;
|
||||
|
||||
if !cmd.disable_helper {
|
||||
rl.set_helper(Some(RustylineHelper::default()));
|
||||
|
||||
let history_file = history_file();
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
debug!(
|
||||
"failed to load history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let client = Client::with_urls([&cmd.grpc_addr]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
let query_engine = if let Some(meta_addr) = &cmd.meta_addr {
|
||||
create_query_engine(meta_addr).await.map(Some)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
rl,
|
||||
prompt: "> ".to_string(),
|
||||
database,
|
||||
query_engine,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the next command
|
||||
fn next_command(&mut self) -> Result<ReplCommand> {
|
||||
match self.rl.readline(&self.prompt) {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
let _ = self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
|
||||
// Some sort of real underlying error
|
||||
Err(e) => Err(e).context(ReadlineSnafu),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
|
||||
///
|
||||
/// Inspired / based on repl.rs from InfluxDB IOX
|
||||
pub(crate) async fn run(&mut self) -> Result<()> {
|
||||
println!("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
loop {
|
||||
match self.next_command()? {
|
||||
ReplCommand::Help => {
|
||||
self.print_help();
|
||||
}
|
||||
ReplCommand::UseDatabase { db_name } => {
|
||||
if self.execute_sql(format!("USE {db_name}")).await {
|
||||
println!("Using {db_name}");
|
||||
self.database.set_schema(&db_name);
|
||||
self.prompt = format!("[{db_name}] > ");
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
let _ = self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sql(&self, sql: String) -> bool {
|
||||
self.do_execute_sql(sql)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.output_msg();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn do_execute_sql(&self, sql: String) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
let output = if let Some(query_engine) = &self.query_engine {
|
||||
let query_ctx = Arc::new(QueryContext::with(
|
||||
self.database.catalog(),
|
||||
self.database.schema(),
|
||||
));
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(&sql, &query_ctx)
|
||||
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
|
||||
|
||||
let plan = query_engine
|
||||
.planner()
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = query_engine
|
||||
.optimize(&query_engine.engine_context(query_ctx), &plan)
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(&plan, DefaultSerializer)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
} else {
|
||||
self.database.sql(&sql).await
|
||||
}
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output.data {
|
||||
OutputData::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
OutputData::RecordBatches(x) => Either::Left(x),
|
||||
OutputData::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
|
||||
match either {
|
||||
Either::Left(recordbatches) => {
|
||||
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
|
||||
if total_rows > 0 {
|
||||
println!(
|
||||
"{}",
|
||||
recordbatches
|
||||
.pretty_print()
|
||||
.context(PrettyPrintRecordBatchesSnafu)?
|
||||
);
|
||||
}
|
||||
println!("Total Rows: {total_rows}")
|
||||
}
|
||||
Either::Right(rows) => println!("Affected Rows: {rows}"),
|
||||
};
|
||||
|
||||
println!("Cost {} ms", (end - start).as_millis());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repl {
|
||||
fn drop(&mut self) {
|
||||
if self.rl.helper().is_some() {
|
||||
let history_file = history_file();
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
debug!(
|
||||
"failed to save history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
|
||||
fn history_file() -> PathBuf {
|
||||
let mut buf = match std::env::var("HOME") {
|
||||
Ok(home) => PathBuf::from(home),
|
||||
Err(_) => PathBuf::new(),
|
||||
};
|
||||
buf.push(".greptimedb_cli_history");
|
||||
buf
|
||||
}
|
||||
|
||||
async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
let mut meta_client = MetaClientBuilder::default().enable_store().build();
|
||||
meta_client
|
||||
.start([meta_addr])
|
||||
.await
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let cached_meta_backend = Arc::new(
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone()))).build(),
|
||||
);
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
.build(),
|
||||
);
|
||||
let fundamental_cache_registry =
|
||||
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(error::BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let information_extension = Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry,
|
||||
None,
|
||||
);
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_manager,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
Ok(DatafusionQueryEngine::new(state, plugins))
|
||||
}
|
||||
@@ -16,6 +16,7 @@ arc-swap = "1.6"
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
@@ -25,6 +26,7 @@ common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
|
||||
@@ -12,36 +12,49 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterTableExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
|
||||
QueryRequest, RequestHeader,
|
||||
AlterTableExpr, AuthHeader, Basic, CreateTableExpr, DdlRequest, GreptimeRequest,
|
||||
InsertRequests, QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::Ticket;
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use async_stream::stream;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::do_put::DoPutResponse;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use futures::future;
|
||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::metadata::{AsciiMetadataKey, MetadataValue};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
|
||||
ServerSnafu,
|
||||
InvalidTonicMetadataValueSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
|
||||
|
||||
type DoPutResponseStream = Pin<Box<dyn Stream<Item = Result<DoPutResponse>>>>;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
@@ -108,16 +121,24 @@ impl Database {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub fn catalog(&self) -> &String {
|
||||
&self.catalog
|
||||
fn catalog_or_default(&self) -> &str {
|
||||
if self.catalog.is_empty() {
|
||||
DEFAULT_CATALOG_NAME
|
||||
} else {
|
||||
&self.catalog
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &String {
|
||||
&self.schema
|
||||
fn schema_or_default(&self) -> &str {
|
||||
if self.schema.is_empty() {
|
||||
DEFAULT_SCHEMA_NAME
|
||||
} else {
|
||||
&self.schema
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
||||
@@ -164,7 +185,7 @@ impl Database {
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
pub async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
@@ -310,6 +331,41 @@ impl Database {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ingest a stream of [RecordBatch]es that belong to a table, using Arrow Flight's "`DoPut`"
|
||||
/// method. The return value is also a stream, produces [DoPutResponse]s.
|
||||
pub async fn do_put(&self, stream: FlightDataStream) -> Result<DoPutResponseStream> {
|
||||
let mut request = tonic::Request::new(stream);
|
||||
|
||||
if let Some(AuthHeader {
|
||||
auth_scheme: Some(AuthScheme::Basic(Basic { username, password })),
|
||||
}) = &self.ctx.auth_header
|
||||
{
|
||||
let encoded = BASE64_STANDARD.encode(format!("{username}:{password}"));
|
||||
let value =
|
||||
MetadataValue::from_str(&encoded).context(InvalidTonicMetadataValueSnafu)?;
|
||||
request.metadata_mut().insert("x-greptime-auth", value);
|
||||
}
|
||||
|
||||
let db_to_put = if !self.dbname.is_empty() {
|
||||
&self.dbname
|
||||
} else {
|
||||
&build_db_string(self.catalog_or_default(), self.schema_or_default())
|
||||
};
|
||||
request.metadata_mut().insert(
|
||||
"x-greptime-db-name",
|
||||
MetadataValue::from_str(db_to_put).context(InvalidTonicMetadataValueSnafu)?,
|
||||
);
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
let response = client.mut_inner().do_put(request).await?;
|
||||
let response = response
|
||||
.into_inner()
|
||||
.map_err(Into::into)
|
||||
.and_then(|x| future::ready(DoPutResponse::try_from(x).context(ConvertFlightDataSnafu)))
|
||||
.boxed();
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
|
||||
@@ -15,10 +15,11 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::status_code::{convert_tonic_code_to_status_code, StatusCode};
|
||||
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -115,6 +116,14 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid Tonic metadata value"))]
|
||||
InvalidTonicMetadataValue {
|
||||
#[snafu(source)]
|
||||
error: InvalidMetadataValue,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -135,7 +144,9 @@ impl ErrorExt for Error {
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidAscii { .. } | Error::InvalidTonicMetadataValue { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,15 +163,15 @@ impl From<Status> for Error {
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_CODE)
|
||||
.and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
StatusCode::from_u32(code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let code = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_CODE).and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
StatusCode::from_u32(code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
let tonic_code = e.code();
|
||||
let code = code.unwrap_or_else(|| convert_tonic_code_to_status_code(tonic_code));
|
||||
|
||||
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
@@ -187,9 +198,6 @@ impl Error {
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unavailable,
|
||||
..
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unknown,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -16,8 +16,7 @@
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
#[cfg(feature = "testing")]
|
||||
mod database;
|
||||
pub mod database;
|
||||
pub mod error;
|
||||
pub mod flow;
|
||||
pub mod load_balance;
|
||||
@@ -34,7 +33,6 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
#[cfg(feature = "testing")]
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use enum_dispatch::enum_dispatch;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::seq::IndexedRandom;
|
||||
|
||||
#[enum_dispatch]
|
||||
pub trait LoadBalance {
|
||||
@@ -37,7 +37,7 @@ pub struct Random;
|
||||
|
||||
impl LoadBalance for Random {
|
||||
fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String> {
|
||||
peers.choose(&mut rand::thread_rng())
|
||||
peers.choose(&mut rand::rng())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -201,12 +201,11 @@ impl RegionRequester {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: error::Error = e.into();
|
||||
// Uses `Error::RegionServer` instead of `Error::Server`
|
||||
error::Error::RegionServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
source: BoxedError::new(error::Error::from(e)),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
|
||||
@@ -68,7 +68,6 @@ query.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
reqwest.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
|
||||
@@ -30,7 +30,7 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::service::DatanodeServiceBuilder;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
@@ -223,15 +223,14 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||
return MissingConfigSnafu {
|
||||
msg: "Missing node id option",
|
||||
ensure!(
|
||||
opts.node_id.is_some(),
|
||||
MissingConfigSnafu {
|
||||
msg: "Missing node id option"
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
@@ -287,7 +286,6 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let cluster_id = 0; // TODO(hl): read from config
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
@@ -297,9 +295,9 @@ impl StartCommand {
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Datanode { member_id },
|
||||
meta_config,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
@@ -315,7 +313,7 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let mut datanode = DatanodeBuilder::new(opts.clone(), plugins)
|
||||
let mut datanode = DatanodeBuilder::new(opts.clone(), plugins, Mode::Distributed)
|
||||
.with_meta_client(meta_client)
|
||||
.with_kv_backend(meta_backend)
|
||||
.with_cache_registry(layered_cache_registry)
|
||||
@@ -337,6 +335,7 @@ impl StartCommand {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -344,7 +343,6 @@ mod tests {
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
@@ -410,7 +408,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -424,7 +422,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -471,7 +469,7 @@ mod tests {
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert_eq!("./greptimedb_data/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
@@ -487,27 +485,14 @@ mod tests {
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
let opt = StartCommand::default()
|
||||
.load_options(&GlobalOptions::default())
|
||||
.unwrap()
|
||||
.component;
|
||||
assert_eq!(Mode::Standalone, opt.mode);
|
||||
|
||||
let opt = (StartCommand {
|
||||
node_id: Some(42),
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.unwrap()
|
||||
.component;
|
||||
assert_eq!(Mode::Distributed, opt.mode);
|
||||
|
||||
assert!((StartCommand {
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
@@ -526,11 +511,23 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
let cmd = StartCommand::default();
|
||||
let mut cmd = StartCommand::default();
|
||||
|
||||
let result = cmd.load_options(&GlobalOptions {
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: None,
|
||||
});
|
||||
// Missing node_id.
|
||||
assert_matches!(result, Err(crate::error::Error::MissingConfig { .. }));
|
||||
|
||||
cmd.node_id = Some(42);
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -540,7 +537,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -569,11 +566,11 @@ mod tests {
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -100,6 +99,13 @@ pub enum Error {
|
||||
source: flow::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Servers error"))]
|
||||
Servers {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend"))]
|
||||
StartFrontend {
|
||||
#[snafu(implicit)]
|
||||
@@ -174,52 +180,6 @@ pub enum Error {
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL"))]
|
||||
ReplCreation {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command"))]
|
||||
Readline {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
source: client::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client"))]
|
||||
StartMetaClient {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
@@ -235,13 +195,6 @@ pub enum Error {
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait"))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
#[snafu(source(from(common_config::error::Error, Box::new)))]
|
||||
@@ -365,6 +318,7 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
||||
Error::StartMetaServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::Servers { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::BuildCli { source, .. } => source.status_code(),
|
||||
@@ -387,17 +341,10 @@ impl ErrorExt for Error {
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::BuildWalOptionsAllocator { source, .. }
|
||||
| Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
Error::HttpQuerySql { .. } => StatusCode::Internal,
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. }
|
||||
| Error::FileIo { .. }
|
||||
|
||||
@@ -32,10 +32,11 @@ use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_version::{short_version, version};
|
||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendInvoker};
|
||||
use flow::{
|
||||
FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder, FrontendClient, FrontendInvoker,
|
||||
};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
@@ -203,7 +204,6 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
@@ -214,12 +214,12 @@ impl StartCommand {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||
return MissingConfigSnafu {
|
||||
msg: "Missing node id option",
|
||||
ensure!(
|
||||
opts.node_id.is_some(),
|
||||
MissingConfigSnafu {
|
||||
msg: "Missing node id option"
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -241,9 +241,6 @@ impl StartCommand {
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
@@ -253,9 +250,9 @@ impl StartCommand {
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Flownode { member_id },
|
||||
meta_config,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
@@ -318,16 +315,26 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||
let frontend_client = FrontendClient::from_meta_client(meta_client.clone());
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts,
|
||||
opts.clone(),
|
||||
Plugins::new(),
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
Arc::new(frontend_client),
|
||||
)
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
let flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||
let services = FlownodeServiceBuilder::new(&opts)
|
||||
.with_grpc_server(flownode.flownode_server().clone())
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.await
|
||||
.context(StartFlownodeSnafu)?;
|
||||
flownode.setup_services(services);
|
||||
let flownode = flownode;
|
||||
|
||||
// flownode's frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
|
||||
@@ -32,28 +32,25 @@ use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use frontend::frontend::Frontend;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use query::stats::StatementStatistics;
|
||||
use servers::export_metrics::ExportMetricsTask;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu, MissingConfigSnafu,
|
||||
Result, StartFrontendSnafu,
|
||||
};
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
|
||||
frontend: Frontend,
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
@@ -61,20 +58,17 @@ pub struct Instance {
|
||||
pub const APP_NAME: &str = "greptime-frontend";
|
||||
|
||||
impl Instance {
|
||||
pub fn new(frontend: FeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
frontend,
|
||||
_guard: guard,
|
||||
}
|
||||
pub fn new(frontend: Frontend, _guard: Vec<WorkerGuard>) -> Self {
|
||||
Self { frontend, _guard }
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut FeInstance {
|
||||
&mut self.frontend
|
||||
}
|
||||
|
||||
pub fn inner(&self) -> &FeInstance {
|
||||
pub fn inner(&self) -> &Frontend {
|
||||
&self.frontend
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut Frontend {
|
||||
&mut self.frontend
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -84,11 +78,15 @@ impl App for Instance {
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
let plugins = self.frontend.instance.plugins().clone();
|
||||
plugins::start_frontend_plugins(plugins)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
@@ -178,7 +176,7 @@ impl StartCommand {
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
.context(error::LoadLayeredConfigSnafu)?;
|
||||
|
||||
self.merge_with_cli_options(global_options, &mut opts)?;
|
||||
|
||||
@@ -283,26 +281,28 @@ impl StartCommand {
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
set_default_timezone(opts.default_timezone.as_deref()).context(error::InitTimezoneSnafu)?;
|
||||
|
||||
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client'",
|
||||
})?;
|
||||
let meta_client_options = opts
|
||||
.meta_client
|
||||
.as_ref()
|
||||
.context(error::MissingConfigSnafu {
|
||||
msg: "'meta_client'",
|
||||
})?;
|
||||
|
||||
let cache_max_capacity = meta_client_options.metadata_cache_max_capacity;
|
||||
let cache_ttl = meta_client_options.metadata_cache_ttl;
|
||||
let cache_tti = meta_client_options.metadata_cache_tti;
|
||||
|
||||
let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled.
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Frontend,
|
||||
meta_client_options,
|
||||
Some(&plugins),
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
.context(error::MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend =
|
||||
@@ -349,6 +349,7 @@ impl StartCommand {
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
);
|
||||
let heartbeat_task = Some(heartbeat_task);
|
||||
|
||||
// frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
@@ -360,7 +361,7 @@ impl StartCommand {
|
||||
};
|
||||
let client = NodeClients::new(channel_config);
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
let instance = FrontendBuilder::new(
|
||||
opts.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
@@ -371,20 +372,27 @@ impl StartCommand {
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
.with_local_cache_invalidator(layered_cache_registry)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
let instance = Arc::new(instance);
|
||||
|
||||
let servers = Services::new(opts, Arc::new(instance.clone()), plugins)
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, instance.clone(), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
instance
|
||||
.build_servers(servers)
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance::new(instance, guard))
|
||||
let frontend = Frontend {
|
||||
instance,
|
||||
servers,
|
||||
heartbeat_task,
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
Ok(Instance::new(frontend, guard))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -444,7 +452,7 @@ mod tests {
|
||||
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
body_limit = "2GB"
|
||||
|
||||
[opentsdb]
|
||||
@@ -452,7 +460,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -465,12 +473,15 @@ mod tests {
|
||||
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
assert_eq!(Duration::from_secs(0), fe_opts.http.timeout);
|
||||
|
||||
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
fe_opts.logging.dir
|
||||
);
|
||||
assert!(!fe_opts.opentsdb.enable);
|
||||
}
|
||||
|
||||
@@ -509,7 +520,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -519,7 +530,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
@@ -358,7 +358,10 @@ mod tests {
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
@@ -396,7 +399,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -406,7 +409,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -424,7 +427,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
@@ -54,10 +55,13 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use flow::{
|
||||
FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeInstance, FlownodeOptions,
|
||||
FrontendClient, FrontendInvoker,
|
||||
};
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::instance::{Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::server::Services;
|
||||
use frontend::service_config::{
|
||||
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, PostgresOptions,
|
||||
@@ -67,24 +71,18 @@ use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use mito2::config::MitoConfig;
|
||||
use query::stats::StatementStatistics;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, BuildWalOptionsAllocatorSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
||||
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu,
|
||||
Result, ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
use crate::{error, log_versions, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-standalone";
|
||||
|
||||
@@ -132,7 +130,6 @@ impl SubCommand {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_telemetry: bool,
|
||||
pub default_timezone: Option<String>,
|
||||
pub http: HttpOptions,
|
||||
@@ -162,7 +159,6 @@ pub struct StandaloneOptions {
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_telemetry: true,
|
||||
default_timezone: None,
|
||||
http: HttpOptions::default(),
|
||||
@@ -243,7 +239,6 @@ impl StandaloneOptions {
|
||||
grpc: cloned_opts.grpc,
|
||||
init_regions_in_background: cloned_opts.init_regions_in_background,
|
||||
init_regions_parallelism: cloned_opts.init_regions_parallelism,
|
||||
mode: Mode::Standalone,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -251,13 +246,10 @@ impl StandaloneOptions {
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
// TODO(discord9): wrapped it in flownode instance instead
|
||||
flow_worker_manager: Arc<FlowWorkerManager>,
|
||||
flow_shutdown: broadcast::Sender<()>,
|
||||
frontend: Frontend,
|
||||
flownode: FlownodeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
@@ -281,21 +273,24 @@ impl App for Instance {
|
||||
self.procedure_manager
|
||||
.start()
|
||||
.await
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
.context(error::StartProcedureManagerSnafu)?;
|
||||
|
||||
self.wal_options_allocator
|
||||
.start()
|
||||
.await
|
||||
.context(StartWalOptionsAllocatorSnafu)?;
|
||||
.context(error::StartWalOptionsAllocatorSnafu)?;
|
||||
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
plugins::start_frontend_plugins(self.frontend.instance.plugins().clone())
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
self.flownode.start().await.context(StartFlownodeSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
self.flow_worker_manager
|
||||
.clone()
|
||||
.run_background(Some(self.flow_shutdown.subscribe()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -303,26 +298,23 @@ impl App for Instance {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
.context(error::ShutdownFrontendSnafu)?;
|
||||
|
||||
self.procedure_manager
|
||||
.stop()
|
||||
.await
|
||||
.context(StopProcedureManagerSnafu)?;
|
||||
.context(error::StopProcedureManagerSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
self.flow_shutdown
|
||||
.send(())
|
||||
.map_err(|_e| {
|
||||
flow::error::InternalSnafu {
|
||||
reason: "Failed to send shutdown signal to flow worker manager, all receiver end already closed".to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
.context(ShutdownFlownodeSnafu)?;
|
||||
.context(error::ShutdownDatanodeSnafu)?;
|
||||
|
||||
self.flownode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownFlownodeSnafu)?;
|
||||
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
@@ -368,7 +360,7 @@ impl StartCommand {
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
.context(error::LoadLayeredConfigSnafu)?;
|
||||
|
||||
self.merge_with_cli_options(global_options, &mut opts.component)?;
|
||||
|
||||
@@ -381,9 +373,6 @@ impl StartCommand {
|
||||
global_options: &GlobalOptions,
|
||||
opts: &mut StandaloneOptions,
|
||||
) -> Result<()> {
|
||||
// Should always be standalone mode.
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
@@ -415,7 +404,7 @@ impl StartCommand {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().grpc.bind_addr;
|
||||
if addr.eq(&datanode_grpc_addr) {
|
||||
return IllegalConfigSnafu {
|
||||
return error::IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
@@ -474,18 +463,19 @@ impl StartCommand {
|
||||
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &dn_opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
.context(error::StartDatanodeSnafu)?;
|
||||
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref())
|
||||
.context(error::InitTimezoneSnafu)?;
|
||||
|
||||
let data_home = &dn_opts.storage.data_home;
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(data_home))
|
||||
.context(CreateDirSnafu { dir: data_home })?;
|
||||
.context(error::CreateDirSnafu { dir: data_home })?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(data_home);
|
||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
@@ -494,7 +484,7 @@ impl StartCommand {
|
||||
opts.procedure,
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
// Builds cache registry
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default();
|
||||
@@ -503,16 +493,16 @@ impl StartCommand {
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(BuildCacheRegistrySnafu)?
|
||||
.context(error::BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone(), Mode::Standalone)
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.with_cache_registry(layered_cache_registry.clone())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
.context(error::StartDatanodeSnafu)?;
|
||||
|
||||
let information_extension = Arc::new(StandaloneInformationExtension::new(
|
||||
datanode.region_server(),
|
||||
@@ -533,20 +523,24 @@ impl StartCommand {
|
||||
flow: opts.flow.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// TODO(discord9): for standalone not use grpc, but just somehow get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let fe_server_addr = fe_opts.grpc.bind_addr.clone();
|
||||
let frontend_client = FrontendClient::from_static_grpc_addr(fe_server_addr);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
Arc::new(frontend_client),
|
||||
);
|
||||
let flownode = Arc::new(
|
||||
flow_builder
|
||||
.build()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(OtherSnafu)?,
|
||||
);
|
||||
let flownode = flow_builder
|
||||
.build()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::OtherSnafu)?;
|
||||
|
||||
// set the ref to query for the local flow state
|
||||
{
|
||||
@@ -576,7 +570,7 @@ impl StartCommand {
|
||||
let kafka_options = opts.wal.clone().into();
|
||||
let wal_options_allocator = build_wal_options_allocator(&kafka_options, kv_backend.clone())
|
||||
.await
|
||||
.context(BuildWalOptionsAllocatorSnafu)?;
|
||||
.context(error::BuildWalOptionsAllocatorSnafu)?;
|
||||
let wal_options_allocator = Arc::new(wal_options_allocator);
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
@@ -597,8 +591,8 @@ impl StartCommand {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
fe_opts,
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
@@ -609,7 +603,8 @@ impl StartCommand {
|
||||
.with_plugin(plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
let fe_instance = Arc::new(fe_instance);
|
||||
|
||||
let flow_worker_manager = flownode.flow_worker_manager();
|
||||
// flow server need to be able to use frontend to write insert requests back
|
||||
@@ -622,24 +617,28 @@ impl StartCommand {
|
||||
node_manager,
|
||||
)
|
||||
.await
|
||||
.context(StartFlownodeSnafu)?;
|
||||
.context(error::StartFlownodeSnafu)?;
|
||||
flow_worker_manager.set_frontend_invoker(invoker).await;
|
||||
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, Arc::new(frontend.clone()), plugins)
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
frontend
|
||||
.build_servers(servers)
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
let frontend = Frontend {
|
||||
instance: fe_instance,
|
||||
servers,
|
||||
heartbeat_task: None,
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
flow_worker_manager,
|
||||
flow_shutdown: tx,
|
||||
flownode,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
_guard: guard,
|
||||
@@ -661,6 +660,7 @@ impl StartCommand {
|
||||
node_manager,
|
||||
cache_invalidator,
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_manager,
|
||||
table_metadata_allocator,
|
||||
flow_metadata_manager,
|
||||
@@ -670,7 +670,7 @@ impl StartCommand {
|
||||
procedure_manager,
|
||||
true,
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(procedure_executor)
|
||||
@@ -684,7 +684,7 @@ impl StartCommand {
|
||||
table_metadata_manager
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
.context(error::InitMetadataSnafu)?;
|
||||
|
||||
Ok(table_metadata_manager)
|
||||
}
|
||||
@@ -778,6 +778,9 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
manifest_size: region_stat.manifest_size,
|
||||
sst_size: region_stat.sst_size,
|
||||
index_size: region_stat.index_size,
|
||||
region_manifest: region_stat.manifest.into(),
|
||||
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
||||
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -852,7 +855,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
dir = "./greptimedb_data/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -860,7 +863,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -892,7 +895,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
@@ -922,7 +925,10 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/test/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/wal",
|
||||
raft_engine_config.dir.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
@@ -946,7 +952,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -958,7 +964,7 @@ mod tests {
|
||||
|
||||
let opts = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -967,7 +973,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.component;
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
}
|
||||
|
||||
@@ -1051,7 +1057,6 @@ mod tests {
|
||||
let options =
|
||||
StandaloneOptions::load_layered_options(None, "GREPTIMEDB_STANDALONE").unwrap();
|
||||
let default_options = StandaloneOptions::default();
|
||||
assert_eq!(options.mode, default_options.mode);
|
||||
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
||||
assert_eq!(options.http, default_options.http);
|
||||
assert_eq!(options.grpc, default_options.grpc);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user