Compare commits

..

2 Commits

Author SHA1 Message Date
Lei, HUANG
2ed98ff558 fix: some cr comments 2024-02-20 14:10:57 +08:00
Lei, HUANG
b46386d52a feat: data buffer and related structs 2024-02-19 22:57:25 +08:00
927 changed files with 15675 additions and 60665 deletions

View File

@@ -3,3 +3,13 @@ linker = "aarch64-linux-gnu-gcc"
[alias] [alias]
sqlness = "run --bin sqlness-runner --" sqlness = "run --bin sqlness-runner --"
[build]
rustflags = [
# lints
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
]

View File

@@ -1,10 +0,0 @@
root = true
[*]
end_of_line = lf
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[{Makefile,**.mk}]
indent_style = tab

View File

@@ -21,6 +21,3 @@ GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_ENDPOINT = GCS end point GT_GCS_ENDPOINT = GCS end point
# Settings for kafka wal test # Settings for kafka wal test
GT_KAFKA_ENDPOINTS = localhost:9092 GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002

27
.github/CODEOWNERS vendored
View File

@@ -1,27 +0,0 @@
# GreptimeDB CODEOWNERS
# These owners will be the default owners for everything in the repo.
* @GreptimeTeam/db-approver
## [Module] Databse Engine
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag
## [Module] Distributed
/src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield
## [Module] Write Ahead Log
/src/log-store @v0y4g3r
/src/store-api @v0y4g3r
## [Module] Metrics Engine
/src/metric-engine @waynexia
/src/promql @waynexia
## [Module] Flow
/src/flow @zhongzc @waynexia

View File

@@ -39,7 +39,7 @@ body:
- Query Engine - Query Engine
- Table Engine - Table Engine
- Write Protocols - Write Protocols
- Metasrv - MetaSrv
- Frontend - Frontend
- Datanode - Datanode
- Other - Other

View File

@@ -34,7 +34,7 @@ runs:
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed. if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: /tmp/greptime-*.log

View File

@@ -67,7 +67,7 @@ runs:
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed. if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: /tmp/greptime-*.log

View File

@@ -26,8 +26,6 @@ runs:
using: composite using: composite
steps: steps:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install rust toolchain - name: Install rust toolchain
uses: dtolnay/rust-toolchain@master uses: dtolnay/rust-toolchain@master
@@ -64,15 +62,15 @@ runs:
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed. if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
- name: Build greptime binary - name: Build greptime binary
shell: pwsh shell: pwsh
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
- name: Upload artifacts - name: Upload artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts

View File

@@ -1,13 +0,0 @@
name: Fuzz Test
description: 'Fuzz test given setup and service'
inputs:
target:
description: "The fuzz target to test"
runs:
using: composite
steps:
- name: Run Fuzz Test
shell: bash
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
env:
GT_MYSQL_ADDR: 127.0.0.1:4002

View File

@@ -1,10 +1,8 @@
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md). I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
## Refer to a related PR or issue link (optional)
## What's changed and what's your intention? ## What's changed and what's your intention?
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__ _PLEASE DO NOT LEAVE THIS EMPTY !!!_
Please explain IN DETAIL what the changes are in this PR and why they are needed: Please explain IN DETAIL what the changes are in this PR and why they are needed:
@@ -18,3 +16,5 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
- [ ] I have written the necessary rustdoc comments. - [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests. - [ ] I have added the necessary unit tests and integration tests.
- [x] This PR does not require documentation updates. - [x] This PR does not require documentation updates.
## Refer to a related PR or issue link (optional)

View File

@@ -13,7 +13,7 @@ on:
name: Build API docs name: Build API docs
env: env:
RUST_TOOLCHAIN: nightly-2024-04-18 RUST_TOOLCHAIN: nightly-2023-12-19
jobs: jobs:
apidoc: apidoc:
@@ -40,4 +40,3 @@ jobs:
uses: JamesIves/github-pages-deploy-action@v4 uses: JamesIves/github-pages-deploy-action@v4
with: with:
folder: target/doc folder: target/doc
single-commit: true

View File

@@ -1,7 +1,7 @@
on: on:
merge_group: merge_group:
pull_request: pull_request:
types: [ opened, synchronize, reopened, ready_for_review ] types: [opened, synchronize, reopened, ready_for_review]
paths-ignore: paths-ignore:
- 'docs/**' - 'docs/**'
- 'config/**' - 'config/**'
@@ -30,20 +30,15 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
env: env:
RUST_TOOLCHAIN: nightly-2024-04-18 RUST_TOOLCHAIN: nightly-2023-12-19
jobs: jobs:
check-typos-and-docs: typos:
name: Check typos and docs name: Spell Check with Typos
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: crate-ci/typos@v1.13.10 - uses: crate-ci/typos@v1.13.10
- name: Check the config docs
run: |
make config-docs && \
git diff --name-only --exit-code ./config/config.md \
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
check: check:
name: Check name: Check
@@ -98,8 +93,6 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }} toolchain: ${{ env.RUST_TOOLCHAIN }}
@@ -109,7 +102,7 @@ jobs:
shared-key: "build-binaries" shared-key: "build-binaries"
- name: Build greptime binaries - name: Build greptime binaries
shell: bash shell: bash
run: cargo build --bin greptime --bin sqlness-runner run: cargo build
- name: Pack greptime binaries - name: Pack greptime binaries
shell: bash shell: bash
run: | run: |
@@ -124,48 +117,6 @@ jobs:
artifacts-dir: bins artifacts-dir: bins
version: current version: current
fuzztest:
name: Fuzz Test
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database" ]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
with:
target: ${{ matrix.target }}
sqlness: sqlness:
name: Sqlness Test name: Sqlness Test
needs: build needs: build
@@ -185,12 +136,13 @@ jobs:
run: tar -xvf ./bins.tar.gz run: tar -xvf ./bins.tar.gz
- name: Run sqlness - name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
# FIXME: Logs cannot found be on failure (or even success). Need to figure out the cause.
- name: Upload sqlness logs - name: Upload sqlness logs
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
sqlness-kafka-wal: sqlness-kafka-wal:
@@ -215,12 +167,13 @@ jobs:
run: docker compose -f docker-compose-standalone.yml up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness - name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
# FIXME: Logs cannot be found on failure (or even success). Need to figure out the cause.
- name: Upload sqlness logs - name: Upload sqlness logs
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs-with-kafka-wal name: sqlness-logs
path: /tmp/greptime-*.log path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
fmt: fmt:
@@ -288,10 +241,6 @@ jobs:
with: with:
# Shares cross multiple jobs # Shares cross multiple jobs
shared-key: "coverage-test" shared-key: "coverage-test"
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release - name: Install latest nextest release
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov - name: Install cargo-llvm-cov
@@ -314,36 +263,18 @@ jobs:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld" CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ secrets.S3_REGION }}
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092 GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload - name: Codecov upload
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v2
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info files: ./lcov.info
flags: rust flags: rust
fail_ci_if_error: false fail_ci_if_error: false
verbose: true verbose: true
compat:
name: Compatibility Test
needs: build
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
mkdir -p ./bins/current
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
- run: ./tests/compat/test-compat.sh 0.6.0

View File

@@ -61,18 +61,6 @@ jobs:
sqlness: sqlness:
name: Sqlness Test name: Sqlness Test
runs-on: ${{ matrix.os }} runs-on: ubuntu-20.04
strategy:
matrix:
os: [ ubuntu-20.04 ]
steps:
- run: 'echo "No action required"'
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'

View File

@@ -13,4 +13,4 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Check License Header - name: Check License Header
uses: korandoru/hawkeye@v5 uses: korandoru/hawkeye@v4

View File

@@ -12,7 +12,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
env: env:
RUST_TOOLCHAIN: nightly-2024-04-18 RUST_TOOLCHAIN: nightly-2023-12-19
jobs: jobs:
sqlness: sqlness:
@@ -45,10 +45,10 @@ jobs:
{"text": "Nightly CI failed for sqlness tests"} {"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs - name: Upload sqlness logs
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
test-on-windows: test-on-windows:
@@ -85,10 +85,10 @@ jobs:
env: env:
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ secrets.S3_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
- name: Notify slack if failed - name: Notify slack if failed
if: failure() if: failure()

View File

@@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process. # Use env variables to control all the release process.
env: env:
# The arguments of building greptime. # The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-18 RUST_TOOLCHAIN: nightly-2023-12-19
CARGO_PROFILE: nightly CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release. # Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.8.0 NEXT_RELEASE_VERSION: v0.7.0
jobs: jobs:
allocate-runners: allocate-runners:
@@ -221,8 +221,6 @@ jobs:
arch: x86_64-apple-darwin arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3 artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
needs: [ needs: [
allocate-runners, allocate-runners,
] ]
@@ -262,8 +260,6 @@ jobs:
features: pyo3_backend,servers/dashboard features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3 artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
needs: [ needs: [
allocate-runners, allocate-runners,
] ]
@@ -288,7 +284,7 @@ jobs:
- name: Set build windows result - name: Set build windows result
id: set-build-windows-result id: set-build-windows-result
run: | run: |
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT echo "build-windows-result=success" >> $GITHUB_OUTPUT
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
@@ -299,8 +295,6 @@ jobs:
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-2004-16-cores runs-on: ubuntu-2004-16-cores
outputs:
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
@@ -316,7 +310,7 @@ jobs:
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
- name: Set build image result - name: Set build image result
id: set-build-image-result id: set-image-build-result
run: | run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT echo "build-image-result=success" >> $GITHUB_OUTPUT

View File

@@ -1,21 +0,0 @@
name: Auto Unassign
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
auto-unassign:
name: Auto Unassign
runs-on: ubuntu-latest
steps:
- name: Auto Unassign
uses: tisonspieces/auto-unassign@main
with:
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
repository: ${{ github.repository }}

4
.gitignore vendored
View File

@@ -46,7 +46,3 @@ benchmarks/data
*.code-workspace *.code-workspace
venv/ venv/
# Fuzz tests
tests-fuzz/artifacts/
tests-fuzz/corpus/

132
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
info@greptime.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md). - Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`). - Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).

3260
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,6 @@ members = [
"src/common/grpc-expr", "src/common/grpc-expr",
"src/common/mem-prof", "src/common/mem-prof",
"src/common/meta", "src/common/meta",
"src/common/plugins",
"src/common/procedure", "src/common/procedure",
"src/common/procedure-test", "src/common/procedure-test",
"src/common/query", "src/common/query",
@@ -62,32 +61,18 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.7.2" version = "0.6.0"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
[workspace.dependencies] [workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false).
#
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] } ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3" aquamarine = "0.3"
arrow = { version = "51.0.0", features = ["prettyprint"] } arrow = { version = "47.0" }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] } arrow-array = "47.0"
arrow-flight = "51.0" arrow-flight = "47.0"
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] } arrow-ipc = "47.0"
arrow-schema = { version = "51.0", features = ["serde"] } arrow-schema = { version = "47.0", features = ["serde"] }
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] } axum = { version = "0.6", features = ["headers"] }
@@ -99,38 +84,33 @@ bytes = { version = "1.5", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
dashmap = "5.4" dashmap = "5.4"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" } datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
derive_builder = "0.12" derive_builder = "0.12"
dotenv = "0.15" etcd-client = "0.12"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96f1f0404f421ee560a4310c73c5071e49168168" }
humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
itertools = "0.10" itertools = "0.10"
lazy_static = "1.4" lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" } meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
mockall = "0.11.4" mockall = "0.11.4"
moka = "0.12" moka = "0.12"
notify = "6.1"
num_cpus = "1.16" num_cpus = "1.16"
once_cell = "1.18" once_cell = "1.18"
opentelemetry-proto = { version = "0.5", features = [ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
"gen-tonic", "gen-tonic",
"metrics", "metrics",
"trace", "trace",
] } ] }
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] } parquet = "47.0"
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
@@ -138,35 +118,32 @@ prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8" rand = "0.8"
regex = "1.8" regex = "1.8"
regex-automata = { version = "0.4" } regex-automata = { version = "0.2", features = ["transducer"] }
reqwest = { version = "0.11", default-features = false, features = [ reqwest = { version = "0.11", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
"stream", "stream",
"multipart",
] } ] }
rskafka = "0.5" rskafka = "0.5"
rust_decimal = "1.33" rust_decimal = "1.33"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = "1.0"
serde_with = "3" serde_with = "3"
smallvec = { version = "1", features = ["serde"] } smallvec = { version = "1", features = ["serde"] }
snafu = "0.7" snafu = "0.7"
sysinfo = "0.30" sysinfo = "0.30"
# on branch v0.44.x # on branch v0.38.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
"visitor", "visitor",
] } ] }
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }
tempfile = "3" tempfile = "3"
tokio = { version = "1.36", features = ["full"] } tokio = { version = "1.28", features = ["full"] }
tokio-stream = { version = "0.1" } tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.8.8"
tonic = { version = "0.11", features = ["tls"] } tonic = { version = "0.10", features = ["tls"] }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] } uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
## workspaces members ## workspaces members
api = { path = "src/api" } api = { path = "src/api" }
@@ -187,7 +164,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" } common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" } common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" } common-meta = { path = "src/common/meta" }
common-plugins = { path = "src/common/plugins" }
common-procedure = { path = "src/common/procedure" } common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" } common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" } common-query = { path = "src/common/query" }
@@ -225,7 +201,7 @@ table = { path = "src/table" }
[workspace.dependencies.meter-macros] [workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git" git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "80b72716dcde47ec4161478416a5c6c21343364d" rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
[profile.release] [profile.release]
debug = 1 debug = 1

View File

@@ -3,7 +3,6 @@ CARGO_PROFILE ?=
FEATURES ?= FEATURES ?=
TARGET_DIR ?= TARGET_DIR ?=
TARGET ?= TARGET ?=
BUILD_BIN ?= greptime
CARGO_BUILD_OPTS := --locked CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime IMAGE_NAMESPACE ?= greptime
@@ -46,10 +45,6 @@ ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET} CARGO_BUILD_OPTS += --target ${TARGET}
endif endif
ifneq ($(strip $(BUILD_BIN)),)
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
endif
ifneq ($(strip $(RELEASE)),) ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release CARGO_BUILD_OPTS += --release
endif endif
@@ -169,10 +164,6 @@ check: ## Cargo check all the targets.
clippy: ## Check clippy rules. clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets --all-features -- -D warnings cargo clippy --workspace --all-targets --all-features -- -D warnings
.PHONY: fix-clippy
fix-clippy: ## Fix clippy violations.
cargo clippy --workspace --all-targets --all-features --fix
.PHONY: fmt-check .PHONY: fmt-check
fmt-check: ## Check code format. fmt-check: ## Check code format.
cargo fmt --all -- --check cargo fmt --all -- --check
@@ -192,16 +183,6 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS} make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
toml2docs/toml2docs:latest \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md
##@ General ##@ General
# The help target prints out all targets with their descriptions organized # The help target prints out all targets with their descriptions organized

219
README.md
View File

@@ -6,154 +6,145 @@
</picture> </picture>
</p> </p>
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
<div align="center">
<h3 align="center"> <h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> | The next-generation hybrid time-series/analytics processing database in the cloud
<a href="https://docs.greptime.com/">User guide</a> | </h3>
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <p align="center">
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/> <a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C"></img></a>
</a> &nbsp;
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/> &nbsp;
</a> <a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
<a href="https://hub.docker.com/r/greptime/greptimedb/"> </p>
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
</a>
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
</a>
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
</a>
<br/> <p align="center">
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
<a href="https://greptime.com/slack"> ## What is GreptimeDB
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
</a>
<a href="https://twitter.com/greptime">
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
</a>
<a href="https://www.linkedin.com/company/greptime/">
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
</a>
</div>
## Introduction GreptimeDB is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage.
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities. Our core developers have been building time-series data platforms for years. Based on their best-practices, GreptimeDB is born to give you:
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
## Why GreptimeDB - Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
- Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
- Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
- Native SQL and PromQL for queries, and Python scripting to facilitate complex analytical tasks.
- Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down.
- Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc.
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you: ## Quick Start
* **Easy horizontal scaling** ### [GreptimePlay](https://greptime.com/playground)
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
* **Analyzing time-series data**
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
* **Cloud-native distributed database**
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
* **Performance and Cost-effective**
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
## Try GreptimeDB
### 1. [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser. Try out the features of GreptimeDB right from your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/) ### Build
Start instantly with a free cluster. #### Build from Source
### 3. Docker Image To compile GreptimeDB from source, you'll need:
To install GreptimeDB locally, the recommended way is via Docker: - C/C++ Toolchain: provides basic tools for compiling and linking. This is
available as `build-essential` on ubuntu and similar name on other platforms.
- Rust: the easiest way to install Rust is to use
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
```shell #### Build with Docker
docker pull greptime/greptimedb
A docker image with necessary dependencies is provided:
```
docker build --network host -f docker/Dockerfile -t greptimedb .
``` ```
Start a GreptimeDB container with: ### Run
Start GreptimeDB from source code, in standalone mode:
```shell
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
``` ```
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
## Build
Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
Build GreptimeDB binary:
```shell
make
```
Run a standalone server:
```shell
cargo run -- standalone start cargo run -- standalone start
``` ```
## Extension Or if you built from docker:
```
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
### Get started
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
## Resources
### Installation
- [Pre-built Binaries](https://greptime.com/download):
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard ### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard) - [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
### SDK ### SDK
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go) - [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java) - [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp) - [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl) - [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust) - [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
### Grafana Dashboard ### Grafana Dashboard
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory. Our official Grafana dashboard is available at [grafana](./grafana/README.md) directory.
## Project Status ## Project Status
The current version has not yet reached General Availability version standards. This project is in its early stage and under heavy development. We move fast and
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level break things. Benchmark on development branch may not represent its potential
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412) performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community ## Community
@@ -163,12 +154,12 @@ and what went wrong. If you have any questions or if you would like to get invol
community, please check out: community, please check out:
- GreptimeDB Community on [Slack](https://greptime.com/slack) - GreptimeDB Community on [Slack](https://greptime.com/slack)
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions) - GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [website](https://greptime.com) - Greptime official [Website](https://greptime.com)
In addition, you may: In addition, you may:
- View our official [Blog](https://greptime.com/blogs/) - View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/) - Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime) - Follow us on [Twitter](https://twitter.com/greptime)
@@ -179,7 +170,7 @@ open contributions and allowing you to use the software however you want.
## Contributing ## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information. Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement ## Acknowledgement

View File

@@ -4,35 +4,13 @@ version.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[lints]
workspace = true
[dependencies] [dependencies]
api.workspace = true
arrow.workspace = true arrow.workspace = true
chrono.workspace = true chrono.workspace = true
clap.workspace = true clap.workspace = true
client.workspace = true client.workspace = true
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
dotenv.workspace = true
futures.workspace = true
futures-util.workspace = true futures-util.workspace = true
humantime.workspace = true
humantime-serde.workspace = true
indicatif = "0.17.1" indicatif = "0.17.1"
itertools.workspace = true itertools.workspace = true
lazy_static.workspace = true
log-store.workspace = true
mito2.workspace = true
num_cpus.workspace = true
parquet.workspace = true parquet.workspace = true
prometheus.workspace = true
rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
tokio.workspace = true tokio.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,11 +0,0 @@
Benchmarkers for GreptimeDB
--------------------------------
## Wal Benchmarker
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
### How to use
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.

View File

@@ -1,21 +0,0 @@
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
wal_provider = "kafka"
bootstrap_brokers = ["localhost:9092"]
num_workers = 10
num_topics = 32
num_regions = 1000
num_scrapes = 1000
num_rows = 5
col_types = "ifs"
max_batch_size = "512KB"
linger = "1ms"
backoff_init = "10ms"
backoff_max = "1ms"
backoff_base = 2
backoff_deadline = "3s"
compression = "zstd"
rng_seed = 42
skip_read = false
skip_write = false
random_topics = true
report_metrics = false

View File

@@ -29,7 +29,7 @@ use client::api::v1::column::Values;
use client::api::v1::{ use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType, Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
}; };
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures_util::TryStreamExt; use futures_util::TryStreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder; use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -215,7 +215,37 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
ColumnDataType::String, ColumnDataType::String,
) )
} }
_ => unimplemented!(), DataType::Null
| DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float16
| DataType::Float32
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Interval(_)
| DataType::Binary
| DataType::FixedSizeBinary(_)
| DataType::LargeBinary
| DataType::LargeUtf8
| DataType::List(_)
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
} }
} }
@@ -472,9 +502,9 @@ async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
for i in 0..num_iter { for i in 0..num_iter {
let now = Instant::now(); let now = Instant::now();
let res = db.sql(&query).await.unwrap(); let res = db.sql(&query).await.unwrap();
match res.data { match res {
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (), Output::AffectedRows(_) | Output::RecordBatches(_) => (),
OutputData::Stream(stream) => { Output::Stream(stream) => {
stream.try_collect::<Vec<_>>().await.unwrap(); stream.try_collect::<Vec<_>>().await.unwrap();
} }
} }

View File

@@ -1,326 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(int_roundings)]
use std::fs;
use std::sync::Arc;
use std::time::Instant;
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
use benchmarks::metrics;
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
use clap::Parser;
use common_telemetry::info;
use common_wal::config::kafka::common::BackoffConfig;
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::options::{KafkaWalOptions, WalOptions};
use itertools::Itertools;
use log_store::kafka::log_store::KafkaLogStore;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use mito2::wal::Wal;
use prometheus::{Encoder, TextEncoder};
use rand::distributions::{Alphanumeric, DistString};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use rskafka::client::partition::Compression;
use rskafka::client::ClientBuilder;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
let region_chunks = (0..cfg.num_regions)
.map(|id| {
build_region(
id as u64,
topics,
&mut SmallRng::seed_from_u64(cfg.rng_seed),
cfg,
)
})
.chunks(chunk_size as usize)
.into_iter()
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
.collect::<Vec<_>>();
let mut write_elapsed = 0;
let mut read_elapsed = 0;
if !cfg.skip_write {
info!("Benchmarking write ...");
let num_scrapes = cfg.num_scrapes;
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for _ in 0..num_scrapes {
let mut wal_writer = wal.writer();
regions
.iter()
.for_each(|region| region.add_wal_entry(&mut wal_writer));
wal_writer.write_to_wal().await.unwrap();
}
})
}))
.await;
write_elapsed += timer.elapsed().as_millis();
}
if !cfg.skip_read {
info!("Benchmarking read ...");
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for region in regions.iter() {
region.replay(&wal).await;
}
})
}))
.await;
read_elapsed = timer.elapsed().as_millis();
}
dump_report(cfg, write_elapsed, read_elapsed);
}
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
let wal_options = match cfg.wal_provider {
WalProvider::Kafka => {
assert!(!topics.is_empty());
WalOptions::Kafka(KafkaWalOptions {
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
})
}
WalProvider::RaftEngine => WalOptions::RaftEngine,
};
Region::new(
RegionId::from_u64(id),
build_schema(&parse_col_types(&cfg.col_types), rng),
wal_options,
cfg.num_rows,
cfg.rng_seed,
)
}
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
col_types
.iter()
.map(|col_type| ColumnSchema {
column_name: Alphanumeric.sample_string(&mut rng, 5),
datatype: *col_type as i32,
semantic_type: SemanticType::Field as i32,
datatype_extension: None,
})
.chain(vec![ColumnSchema {
column_name: "ts".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Tag as i32,
datatype_extension: None,
}])
.collect()
}
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
let cost_report = format!(
"write costs: {} ms, read costs: {} ms",
write_elapsed, read_elapsed,
);
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
let write_throughput = if write_elapsed > 0 {
(total_written_bytes * 1000).div_floor(write_elapsed)
} else {
0
};
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
let read_throughput = if read_elapsed > 0 {
(total_read_bytes * 1000).div_floor(read_elapsed)
} else {
0
};
let throughput_report = format!(
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
total_written_bytes,
total_read_bytes,
write_throughput,
write_throughput.div_floor(1 << 20),
read_throughput,
read_throughput.div_floor(1 << 20),
);
let metrics_report = if cfg.report_metrics {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
let metrics = prometheus::gather();
encoder.encode(&metrics, &mut buffer).unwrap();
String::from_utf8(buffer).unwrap()
} else {
String::new()
};
info!(
r#"
Benchmark config:
{cfg:?}
Benchmark report:
{cost_report}
{throughput_report}
{metrics_report}"#
);
}
async fn create_topics(cfg: &Config) -> Vec<String> {
// Creates topics.
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
.build()
.await
.unwrap();
let ctrl_client = client.controller_client().unwrap();
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
.map(|i| {
let topic = if cfg.random_topics {
format!(
"greptime_wal_bench_topic_{}_{}",
uuid::Uuid::new_v4().as_u128(),
i
)
} else {
format!("greptime_wal_bench_topic_{}", i)
};
let task = ctrl_client.create_topic(
topic.clone(),
1,
cfg.bootstrap_brokers.len() as i16,
2000,
);
(topic, task)
})
.unzip();
// Must ignore errors since we allow topics being created more than once.
let _ = futures::future::try_join_all(tasks).await;
topics
}
fn parse_compression(comp: &str) -> Compression {
match comp {
"no" => Compression::NoCompression,
"gzip" => Compression::Gzip,
"lz4" => Compression::Lz4,
"snappy" => Compression::Snappy,
"zstd" => Compression::Zstd,
other => unreachable!("Unrecognized compression {other}"),
}
}
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
let parts = col_types.split('x').collect::<Vec<_>>();
assert!(parts.len() <= 2);
let pattern = parts[0];
let repeat = parts
.get(1)
.map(|r| r.parse::<usize>().unwrap())
.unwrap_or(1);
pattern
.chars()
.map(|c| match c {
'i' | 'I' => ColumnDataType::Int64,
'f' | 'F' => ColumnDataType::Float64,
's' | 'S' => ColumnDataType::String,
other => unreachable!("Cannot parse {other} as a column data type"),
})
.cycle()
.take(pattern.len() * repeat)
.collect()
}
fn main() {
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
common_telemetry::init_default_ut_logging();
let args = Args::parse();
let cfg = if !args.cfg_file.is_empty() {
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
} else {
Config::from(args)
};
// Validates arguments.
if cfg.num_regions < cfg.num_workers {
panic!("num_regions must be greater than or equal to num_workers");
}
if cfg
.num_workers
.min(cfg.num_topics)
.min(cfg.num_regions)
.min(cfg.num_scrapes)
.min(cfg.max_batch_size.as_bytes() as u32)
.min(cfg.bootstrap_brokers.len() as u32)
== 0
{
panic!("Invalid arguments");
}
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
match cfg.wal_provider {
WalProvider::Kafka => {
let topics = create_topics(&cfg).await;
let kafka_cfg = KafkaConfig {
broker_endpoints: cfg.bootstrap_brokers.clone(),
max_batch_size: cfg.max_batch_size,
linger: cfg.linger,
backoff: BackoffConfig {
init: cfg.backoff_init,
max: cfg.backoff_max,
base: cfg.backoff_base,
deadline: Some(cfg.backoff_deadline),
},
compression: parse_compression(&cfg.compression),
..Default::default()
};
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &topics, wal).await;
}
WalProvider::RaftEngine => {
// The benchmarker assumes the raft engine directory exists.
let store = RaftEngineLogStore::try_new(
"/tmp/greptimedb/raft-engine-wal".to_string(),
RaftEngineConfig::default(),
)
.await
.map(Arc::new)
.unwrap();
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &[], wal).await;
}
}
});
}

View File

@@ -1,16 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod metrics;
pub mod wal_bench;

View File

@@ -1,39 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use lazy_static::lazy_static;
use prometheus::*;
/// Logstore label.
pub const LOGSTORE_LABEL: &str = "logstore";
/// Operation type label.
pub const OPTYPE_LABEL: &str = "optype";
lazy_static! {
/// Counters of bytes of each operation on a logstore.
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
"greptime_bench_wal_op_bytes_total",
"wal operation bytes total",
&[OPTYPE_LABEL],
)
.unwrap();
/// Counter of bytes of the append_batch operation.
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["write"],
);
/// Counter of bytes of the read operation.
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["read"],
);
}

View File

@@ -1,361 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::mem::size_of;
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
use clap::{Parser, ValueEnum};
use common_base::readable_size::ReadableSize;
use common_wal::options::WalOptions;
use futures::StreamExt;
use mito2::wal::{Wal, WalWriter};
use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
use crate::metrics;
/// The wal provider.
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum WalProvider {
#[default]
RaftEngine,
Kafka,
}
#[derive(Parser)]
pub struct Args {
/// The provided configuration file.
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
#[clap(long, short = 'c')]
pub cfg_file: String,
/// The wal provider.
#[clap(long, value_enum, default_value_t = WalProvider::default())]
pub wal_provider: WalProvider,
/// The advertised addresses of the kafka brokers.
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
#[clap(long, short = 'b', default_value = "localhost:9092")]
pub bootstrap_brokers: String,
/// The number of workers each running in a dedicated thread.
#[clap(long, default_value_t = num_cpus::get() as u32)]
pub num_workers: u32,
/// The number of kafka topics to be created.
#[clap(long, default_value_t = 32)]
pub num_topics: u32,
/// The number of regions.
#[clap(long, default_value_t = 1000)]
pub num_regions: u32,
/// The number of times each region is scraped.
#[clap(long, default_value_t = 1000)]
pub num_scrapes: u32,
/// The number of rows in each wal entry.
/// Each time a region is scraped, a wal entry containing will be produced.
#[clap(long, default_value_t = 5)]
pub num_rows: u32,
/// The column types of the schema for each region.
/// Currently, three column types are supported:
/// - i = ColumnDataType::Int64
/// - f = ColumnDataType::Float64
/// - s = ColumnDataType::String
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
///
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
/// This feature is useful if you want to specify many columns.
#[clap(long, default_value = "ifs")]
pub col_types: String,
/// The maximum size of a batch of kafka records.
/// The default value is 1mb.
#[clap(long, default_value = "512KB")]
pub max_batch_size: ReadableSize,
/// The minimum latency the kafka client issues a batch of kafka records.
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
#[clap(long, default_value = "1ms")]
pub linger: String,
/// The initial backoff delay of the kafka consumer.
#[clap(long, default_value = "10ms")]
pub backoff_init: String,
/// The maximum backoff delay of the kafka consumer.
#[clap(long, default_value = "1s")]
pub backoff_max: String,
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
#[clap(long, default_value_t = 2)]
pub backoff_base: u32,
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
#[clap(long, default_value = "3s")]
pub backoff_deadline: String,
/// The client-side compression algorithm for kafka records.
#[clap(long, default_value = "zstd")]
pub compression: String,
/// The seed of random number generators.
#[clap(long, default_value_t = 42)]
pub rng_seed: u64,
/// Skips the read phase, aka. region replay, if set to true.
#[clap(long, default_value_t = false)]
pub skip_read: bool,
/// Skips the write phase if set to true.
#[clap(long, default_value_t = false)]
pub skip_write: bool,
/// Randomly generates topic names if set to true.
/// Useful when you want to run the benchmarker without worrying about the topics created before.
#[clap(long, default_value_t = false)]
pub random_topics: bool,
/// Logs out the gathered prometheus metrics when the benchmarker ends.
#[clap(long, default_value_t = false)]
pub report_metrics: bool,
}
/// Benchmarker config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub wal_provider: WalProvider,
pub bootstrap_brokers: Vec<String>,
pub num_workers: u32,
pub num_topics: u32,
pub num_regions: u32,
pub num_scrapes: u32,
pub num_rows: u32,
pub col_types: String,
pub max_batch_size: ReadableSize,
#[serde(with = "humantime_serde")]
pub linger: Duration,
#[serde(with = "humantime_serde")]
pub backoff_init: Duration,
#[serde(with = "humantime_serde")]
pub backoff_max: Duration,
pub backoff_base: u32,
#[serde(with = "humantime_serde")]
pub backoff_deadline: Duration,
pub compression: String,
pub rng_seed: u64,
pub skip_read: bool,
pub skip_write: bool,
pub random_topics: bool,
pub report_metrics: bool,
}
impl From<Args> for Config {
fn from(args: Args) -> Self {
let cfg = Self {
wal_provider: args.wal_provider,
bootstrap_brokers: args
.bootstrap_brokers
.split(',')
.map(ToString::to_string)
.collect::<Vec<_>>(),
num_workers: args.num_workers.min(num_cpus::get() as u32),
num_topics: args.num_topics,
num_regions: args.num_regions,
num_scrapes: args.num_scrapes,
num_rows: args.num_rows,
col_types: args.col_types,
max_batch_size: args.max_batch_size,
linger: humantime::parse_duration(&args.linger).unwrap(),
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
backoff_base: args.backoff_base,
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
compression: args.compression,
rng_seed: args.rng_seed,
skip_read: args.skip_read,
skip_write: args.skip_write,
random_topics: args.random_topics,
report_metrics: args.report_metrics,
};
cfg
}
}
/// The region used for wal benchmarker.
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
rng: Mutex<Option<SmallRng>>,
num_rows: u32,
}
impl Region {
/// Creates a new region.
pub fn new(
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
num_rows: u32,
rng_seed: u64,
) -> Self {
Self {
id,
schema,
wal_options,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
num_rows,
}
}
/// Scrapes the region and adds the generated entry to wal.
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
let mutation = Mutation {
op_type: OpType::Put as i32,
sequence: self
.next_sequence
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
rows: Some(self.build_rows()),
};
let entry = WalEntry {
mutations: vec![mutation],
};
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
wal_writer
.add_entry(
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
&self.wal_options,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
}
}
/// Computes the estimated size in bytes of the entry.
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
let wrapper_size = size_of::<WalEntry>()
+ entry.mutations.capacity() * size_of::<Mutation>()
+ size_of::<Rows>();
let rows = entry.mutations[0].rows.as_ref().unwrap();
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
+ rows
.schema
.iter()
.map(|s| s.column_name.capacity())
.sum::<usize>();
let values_size = (rows.rows.capacity() * size_of::<Row>())
+ rows
.rows
.iter()
.map(|r| r.values.capacity() * size_of::<Value>())
.sum::<usize>();
wrapper_size + schema_size + values_size
}
fn build_rows(&self) -> Rows {
let cols = self
.schema
.iter()
.map(|col_schema| {
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
self.build_col(&col_data_type, self.num_rows)
})
.collect::<Vec<_>>();
let rows = (0..self.num_rows)
.map(|i| {
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
Row { values }
})
.collect();
Rows {
schema: self.schema.clone(),
rows,
}
}
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
let mut rng_guard = self.rng.lock().unwrap();
let rng = rng_guard.as_mut().unwrap();
match col_data_type {
ColumnDataType::TimestampMillisecond => (0..num_rows)
.map(|_| {
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
Value {
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
}
})
.collect(),
ColumnDataType::Int64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0, 10_000));
Value {
value_data: Some(ValueData::I64Value(v)),
}
})
.collect(),
ColumnDataType::Float64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0.0, 5000.0));
Value {
value_data: Some(ValueData::F64Value(v)),
}
})
.collect(),
ColumnDataType::String => (0..num_rows)
.map(|_| {
let v = Alphanumeric.sample_string(rng, 10);
Value {
value_data: Some(ValueData::StringValue(v)),
}
})
.collect(),
_ => unreachable!(),
}
}
}

View File

@@ -1,127 +0,0 @@
# https://git-cliff.org/docs/configuration
[remote.github]
owner = "GreptimeTeam"
repo = "greptimedb"
[changelog]
header = ""
footer = ""
# template for the changelog body
# https://keats.github.io/tera/docs/#introduction
body = """
# {{ version }}
Release date: {{ timestamp | date(format="%B %d, %Y") }}
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
{%- if breakings | length > 0 %}
## Breaking changes
{% for commit in breakings %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor %}
{%- endif -%}
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
{% for group, commits in grouped_commits %}
### {{ group | striptags | trim | upper_first }}
{% for commit in commits %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor -%}
{% endfor %}
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
{% raw %}\n{% endraw -%}
## New Contributors
{% endif -%}
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
{%- endfor -%}
{% if github.contributors | length != 0 %}
{% raw %}\n{% endraw -%}
## All Contributors
We would like to thank the following contributors from the GreptimeDB community:
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
{%- set bots = ['dependabot[bot]'] %}
{% for contributor in contributors %}
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
{%- if loop.first -%}
[@{{ contributor }}](https://github.com/{{ contributor }})
{%- else -%}
, [@{{ contributor }}](https://github.com/{{ contributor }})
{%- endif -%}
{%- endfor %}
{%- endif %}
{% raw %}\n{% endraw %}
{%- macro remote_url() -%}
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
{%- endmacro -%}
"""
trim = true
[git]
# parse the commits based on https://www.conventionalcommits.org
conventional_commits = true
# filter out the commits that are not conventional
filter_unconventional = true
# process each line of a commit as an individual commit
split_commits = false
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
{ message = "^chore\\(release\\): prepare for", skip = true },
{ message = "^chore\\(deps.*\\)", skip = true },
{ message = "^chore\\(pr\\)", skip = true },
{ message = "^chore\\(pull\\)", skip = true },
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
]
# protect breaking changes from being skipped due to matching a skipping commit_parser
protect_breaking_commits = false
# filter out the commits that are not matched by commit parsers
filter_commits = false
# regex for matching git tags
# tag_pattern = "v[0-9].*"
# regex for skipping tags
# skip_tags = ""
# regex for ignoring tags
ignore_tags = ".*-nightly-.*"
# sort the tags topologically
topo_order = false
# sort the commits inside sections by oldest/newest order
sort_commits = "oldest"
# limit the number of commits included in the changelog.
# limit_commits = 42

View File

@@ -8,6 +8,5 @@ coverage:
ignore: ignore:
- "**/error*.rs" # ignore all error.rs files - "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner - "tests/runner/*.rs" # ignore integration test runner
- "tests-integration/**/*.rs" # ignore integration tests
comment: # this is a top-level key comment: # this is a top-level key
layout: "diff" layout: "diff"

View File

@@ -1,19 +0,0 @@
# Configurations
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
## Cluster Mode
### Frontend
{{ toml2docs "./frontend.example.toml" }}
### Metasrv
{{ toml2docs "./metasrv.example.toml" }}
### Datanode
{{ toml2docs "./datanode.example.toml" }}

View File

@@ -1,376 +0,0 @@
# Configurations
## Standalone Mode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
| `mysql.tls.key_path` | String | `None` | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable |
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
## Cluster Mode
### Frontend
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
| `mysql.tls.key_path` | String | `None` | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable |
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
### Metasrv
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | -- |
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
### Datanode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
| `export_metrics.self_import.db` | String | `None` | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |

View File

@@ -1,430 +1,159 @@
## The running mode of the datanode. It can be `standalone` or `distributed`. # Node running mode, see `standalone.example.toml`.
mode = "standalone" mode = "distributed"
# The datanode identifier, should be unique.
## The datanode identifier and should be unique in the cluster.
## +toml2docs:none-default
node_id = 42 node_id = 42
# gRPC server address, "127.0.0.1:3001" by default.
## Start services after regions have obtained leases. rpc_addr = "127.0.0.1:3001"
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv. # Hostname of this node.
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Start services after regions have obtained leases.
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false require_lease_before_startup = false
## Initialize all regions in the background during the startup. # Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized. # By default, it provides services after all regions have been initialized.
init_regions_in_background = false init_regions_in_background = false
## The gRPC address of the datanode.
rpc_addr = "127.0.0.1:3001"
## The hostname of the datanode.
## +toml2docs:none-default
rpc_hostname = "127.0.0.1"
## The number of gRPC server worker threads.
rpc_runtime_size = 8
## The maximum receive message size for gRPC server.
rpc_max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The heartbeat options.
[heartbeat] [heartbeat]
## Interval for sending heartbeat messages to the metasrv. # Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
interval = "3s" interval = "3s"
## Interval for retrying to send heartbeat messages to the metasrv. # Metasrv client options.
retry_interval = "3s"
## The metasrv client options.
[meta_client] [meta_client]
## The addresses of the metasrv. # Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"] metasrv_addrs = ["127.0.0.1:3002"]
# Heartbeat timeout, 500 milliseconds by default.
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms" heartbeat_timeout = "500ms"
# Operation timeout, 3 seconds by default.
## DDL timeout. timeout = "3s"
ddl_timeout = "10s" # Connect server timeout, 1 second by default.
## Connect server timeout.
connect_timeout = "1s" connect_timeout = "1s"
# `TCP_NODELAY` option for accepted connections, true by default.
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true tcp_nodelay = true
## The configuration about the cache of the metadata. # WAL options.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The WAL options.
[wal] [wal]
## The provider of the WAL.
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
## - `kafka`: it's remote wal that data is stored in Kafka.
provider = "raft_engine" provider = "raft_engine"
## The directory to store the WAL files. # Raft-engine wal options, see `standalone.example.toml`.
## **It's only used when the provider is `raft_engine`**. # dir = "/tmp/greptimedb/wal"
## +toml2docs:none-default
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB" file_size = "256MB"
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB" purge_threshold = "4GB"
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_interval = "10m" purge_interval = "10m"
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128 read_batch_size = 128
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false sync_write = false
## Whether to reuse logically truncated log files. # Kafka wal options, see `standalone.example.toml`.
## **It's only used when the provider is `raft_engine`**. # broker_endpoints = ["127.0.0.1:9092"]
enable_log_recycle = true # Warning: Kafka has a default limit of 1MB per message in a topic.
# max_batch_size = "1MB"
# linger = "200ms"
# consumer_wait_timeout = "100ms"
# backoff_init = "500ms"
# backoff_max = "10s"
# backoff_base = 2
# backoff_deadline = "5mins"
## Whether to pre-create log files on start up. # Storage options, see `standalone.example.toml`.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_size = "1MB"
## The linger duration of a kafka batch producer.
## **It's only used when the provider is `kafka`**.
linger = "200ms"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
# Example of using S3 as the storage.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
[storage] [storage]
## The working home directory. # The working home directory.
data_home = "/tmp/greptimedb/" data_home = "/tmp/greptimedb/"
# Storage type.
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
## Cache configuration for object storage such as 'S3' etc. # Cache configuration for object storage such as 'S3' etc.
## The local file cache directory. # The local file cache directory
## +toml2docs:none-default # cache_path = "/path/local_cache"
cache_path = "/path/local_cache" # The local file cache capacity in bytes.
# cache_capacity = "256MB"
## The local file cache capacity in bytes.
## +toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## +toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## +toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## +toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## +toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## +toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential_path = "test"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
region = "us-west-2"
# Custom storage options # Custom storage options
# [[storage.providers]] #[[storage.providers]]
# type = "S3" #type = "S3"
# [[storage.providers]] #[[storage.providers]]
# type = "Gcs" #type = "Gcs"
## The region engine options. You can configure multiple region engines. # Mito engine options
[[region_engine]] [[region_engine]]
## The Mito engine options.
[region_engine.mito] [region_engine.mito]
# Number of region workers
## Number of region workers.
num_workers = 8 num_workers = 8
# Request channel size of each worker
## Request channel size of each worker.
worker_channel_size = 128 worker_channel_size = 128
# Max batch size for a worker to handle requests
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64 worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
# Max number of running background jobs
## Max number of running background jobs
max_background_jobs = 4 max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h" auto_flush_interval = "1h"
# Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
global_write_buffer_size = "1GB" global_write_buffer_size = "1GB"
# Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
global_write_buffer_reject_size = "2GB" global_write_buffer_reject_size = "2GB"
# Cache size for SST metadata. Setting it to 0 to disable the cache.
## Cache size for SST metadata. Setting it to 0 to disable the cache. # If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
sst_meta_cache_size = "128MB" sst_meta_cache_size = "128MB"
# Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache. # If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
vector_cache_size = "512MB" vector_cache_size = "512MB"
# Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache. # If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB" page_cache_size = "512MB"
# Buffer size for SST writing.
## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
# Parallelism to scan a region (default: 1/4 of cpu cores).
## Parallelism to scan a region (default: 1/4 of cpu cores). # - 0: using the default value (1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores). # - 1: scan in current thread.
## - `1`: scan in current thread. # - n: scan in parallelism n.
## - `n`: scan in parallelism n.
scan_parallelism = 0 scan_parallelism = 0
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32 parallel_scan_channel_size = 32
# Whether to allow stale WAL entries read during replay.
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false allow_stale_entries = false
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
# Whether to create the index on flush.
## Whether to create the index on flush. # - "auto": automatically
## - `auto`: automatically # - "disable": never
## - `disable`: never
create_on_flush = "auto" create_on_flush = "auto"
# Whether to create the index on compaction.
## Whether to create the index on compaction. # - "auto": automatically
## - `auto`: automatically # - "disable": never
## - `disable`: never
create_on_compaction = "auto" create_on_compaction = "auto"
# Whether to apply the index on query
## Whether to apply the index on query # - "auto": automatically
## - `auto`: automatically # - "disable": never
## - `disable`: never
apply_on_query = "auto" apply_on_query = "auto"
# Memory threshold for performing an external sort during index creation.
## Memory threshold for performing an external sort during index creation. # Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. mem_threshold_on_create = "64MB"
mem_threshold_on_create = "64M" # File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = "" intermediate_path = ""
[region_engine.mito.memtable] # Log options, see `standalone.example.toml`
## Memtable type. # [logging]
## - `time_series`: time-series memtable # dir = "/tmp/greptimedb/logs"
## - `partition_tree`: partition tree memtable (experimental) # level = "info"
type = "time_series"
## The max number of keys in one shard. # Datanode export the metrics generated by itself
## Only available for `partition_tree` memtable. # encoded to Prometheus remote-write format
index_max_keys_per_shard = 8192 # and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
## The max rows of data inside the actively writing buffer in one shard. # [export_metrics]
## Only available for `partition_tree` memtable. # whether enable export metrics, default is false
data_freeze_threshold = 32768 # enable = false
# The interval of export metrics
## Max dictionary bytes. # write_interval = "30s"
## Only available for `partition_tree` memtable. # [export_metrics.remote_write]
fork_dictionary_bytes = "1GiB" # The url the metrics send to. The url is empty by default, url example: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`
# url = ""
## The logging options. # HTTP headers of Prometheus remote-write carry
[logging] # headers = {}
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,192 +1,104 @@
## The running mode of the datanode. It can be `standalone` or `distributed`. # Node running mode, see `standalone.example.toml`.
mode = "standalone" mode = "distributed"
# The default timezone of the server
# default_timezone = "UTC"
## The default timezone of the server.
## +toml2docs:none-default
default_timezone = "UTC"
## The heartbeat options.
[heartbeat] [heartbeat]
## Interval for sending heartbeat messages to the metasrv. # Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
interval = "18s" interval = "5s"
# Interval for retry sending heartbeat task, 5 seconds by default.
retry_interval = "5s"
## Interval for retrying to send heartbeat messages to the metasrv. # HTTP server options, see `standalone.example.toml`.
retry_interval = "3s"
## The HTTP server options.
[http] [http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout.
timeout = "30s" timeout = "30s"
## HTTP request body limit.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
body_limit = "64MB" body_limit = "64MB"
## The gRPC server options. # gRPC server options, see `standalone.example.toml`.
[grpc] [grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## MySQL server options. # MySQL server options, see `standalone.example.toml`.
[mysql] [mysql]
## Whether to enable.
enable = true enable = true
## The addr to bind the MySQL server.
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads.
runtime_size = 2 runtime_size = 2
# MySQL server TLS options. # MySQL server TLS options, see `standalone.example.toml`.
[mysql.tls] [mysql.tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - `disable` (default value)
## - `prefer`
## - `require`
## - `verify-ca`
## - `verify-full`
mode = "disable" mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # PostgresSQL server options, see `standalone.example.toml`.
watch = false
## PostgresSQL server options.
[postgres] [postgres]
## Whether to enable
enable = true enable = true
## The addr to bind the PostgresSQL server.
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## PostgresSQL server TLS options, see `mysql_options.tls` section. # PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres.tls] [postgres.tls]
## TLS mode.
mode = "disable" mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # OpenTSDB protocol options, see `standalone.example.toml`.
watch = false
## OpenTSDB protocol options.
[opentsdb] [opentsdb]
## Whether to enable
enable = true enable = true
## OpenTSDB telnet API server address.
addr = "127.0.0.1:4242" addr = "127.0.0.1:4242"
## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## InfluxDB protocol options. # InfluxDB protocol options, see `standalone.example.toml`.
[influxdb] [influxdb]
## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Prometheus remote storage options # Prometheus remote storage options, see `standalone.example.toml`.
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API.
enable = true enable = true
## Whether to store the data from Prometheus remote write in metric engine. # Whether to store the data from Prometheus remote write in metric engine.
# true by default
with_metric_engine = true with_metric_engine = true
## The metasrv client options. # Metasrv client options, see `datanode.example.toml`.
[meta_client] [meta_client]
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"] metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s" timeout = "3s"
# DDL timeouts options.
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s" ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s" connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true tcp_nodelay = true
# The configuration about the cache of the Metadata.
## The configuration about the cache of the metadata. # default: 100000
metadata_cache_max_capacity = 100000 metadata_cache_max_capacity = 100000
# default: 10m
## TTL of the metadata cache.
metadata_cache_ttl = "10m" metadata_cache_ttl = "10m"
# default: 5m
# TTI of the metadata cache.
metadata_cache_tti = "5m" metadata_cache_tti = "5m"
## Datanode options. # Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
# Datanode options.
[datanode] [datanode]
## Datanode client options. # Datanode client options.
[datanode.client] [datanode.client]
timeout = "10s" timeout = "10s"
connect_timeout = "10s" connect_timeout = "10s"
tcp_nodelay = true tcp_nodelay = true
## The logging options. # Frontend export the metrics generated by itself
[logging] # encoded to Prometheus remote-write format
## The directory to store the log files. # and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
dir = "/tmp/greptimedb/logs" # This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
## The log level. Can be `info`/`debug`/`warn`/`error`. # whether enable export metrics, default is false
## +toml2docs:none-default # enable = false
level = "info" # The interval of export metrics
# write_interval = "30s"
## Enable OTLP tracing. # for `frontend`, `self_import` is recommend to collect metrics generated by itself
enable_otlp_tracing = false # [export_metrics.self_import]
# db = "information_schema"
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,46 +1,35 @@
## The working home directory. # The working home directory.
data_home = "/tmp/metasrv/" data_home = "/tmp/metasrv/"
# The bind address of metasrv, "127.0.0.1:3002" by default.
## The bind address of metasrv.
bind_addr = "127.0.0.1:3002" bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002" server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
## Etcd server address.
store_addr = "127.0.0.1:2379" store_addr = "127.0.0.1:2379"
# Datanode selector type.
## Datanode selector type. # - "lease_based" (default value).
## - `lease_based` (default value). # - "load_based"
## - `load_based` # For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
selector = "lease_based" selector = "lease_based"
# Store data in memory, false by default.
## Store data in memory.
use_memory_store = false use_memory_store = false
# Whether to enable greptimedb telemetry, true by default.
## Whether to enable greptimedb telemetry.
enable_telemetry = true enable_telemetry = true
# If it's not empty, the metasrv will store all data with this key prefix.
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = "" store_key_prefix = ""
## Procedure storage options. # Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
# Procedure storage options.
[procedure] [procedure]
# Procedure max retry time.
## Procedure max retry time.
max_retry_times = 12 max_retry_times = 12
# Initial retry delay of procedures, increases exponentially
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
## Auto split large value
## GreptimeDB procedure uses etcd as the default metadata storage backend.
## The etcd the maximum size of any request is 1.5 MiB
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB"
# Failure detectors options. # Failure detectors options.
[failure_detector] [failure_detector]
threshold = 8.0 threshold = 8.0
@@ -48,96 +37,57 @@ min_std_deviation = "100ms"
acceptable_heartbeat_pause = "3000ms" acceptable_heartbeat_pause = "3000ms"
first_heartbeat_estimate = "1000ms" first_heartbeat_estimate = "1000ms"
## Datanode options. # # Datanode options.
[datanode] # [datanode]
## Datanode client options. # # Datanode client options.
[datanode.client] # [datanode.client_options]
timeout = "10s" # timeout = "10s"
connect_timeout = "10s" # connect_timeout = "10s"
tcp_nodelay = true # tcp_nodelay = true
[wal] [wal]
# Available wal providers: # Available wal providers:
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently. # - "raft_engine" (default)
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode. # - "kafka"
provider = "raft_engine" provider = "raft_engine"
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
# Kafka wal config. # Kafka wal config.
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
# broker_endpoints = ["127.0.0.1:9092"]
# Number of topics to be created upon start.
# num_topics = 64
# Topic selector type.
# Available selector types:
# - "round_robin" (default)
# selector_type = "round_robin"
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
# topic_name_prefix = "greptimedb_wal_topic"
# Expected number of replicas of each partition.
# replication_factor = 1
# Above which a topic creation operation will be cancelled.
# create_topic_timeout = "30s"
# The initial backoff for kafka clients.
# backoff_init = "500ms"
# The maximum backoff for kafka clients.
# backoff_max = "10s"
# Exponential backoff rate, i.e. next backoff = base * current backoff.
# backoff_base = 2
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
# backoff_deadline = "5mins"
## The broker endpoints of the Kafka cluster. # Metasrv export the metrics generated by itself
broker_endpoints = ["127.0.0.1:9092"] # encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
## Number of topics to be created upon start. # This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
num_topics = 64 # [export_metrics]
# whether enable export metrics, default is false
## Topic selector type. # enable = false
## Available selector types: # The interval of export metrics
## - `round_robin` (default) # write_interval = "30s"
selector_type = "round_robin" # [export_metrics.remote_write]
# The url the metrics send to. The url is empty by default, url example: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. # url = ""
topic_name_prefix = "greptimedb_wal_topic" # HTTP headers of Prometheus remote-write carry
# headers = {}
## Expected number of replicas of each partition.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
create_topic_timeout = "30s"
## The initial backoff for kafka clients.
backoff_init = "500ms"
## The maximum backoff for kafka clients.
backoff_max = "10s"
## Exponential backoff rate, i.e. next backoff = base * current backoff.
backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,477 +1,269 @@
## The running mode of the datanode. It can be `standalone` or `distributed`. # Node running mode, "standalone" or "distributed".
mode = "standalone" mode = "standalone"
# Whether to enable greptimedb telemetry, true by default.
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true enable_telemetry = true
# The default timezone of the server
# default_timezone = "UTC"
## The default timezone of the server. # HTTP server options.
## +toml2docs:none-default
default_timezone = "UTC"
## The HTTP server options.
[http] [http]
## The address to bind the HTTP server. # Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. # HTTP request timeout, 30s by default.
timeout = "30s" timeout = "30s"
## HTTP request body limit. # HTTP request body limit, 64Mb by default.
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. # the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
body_limit = "64MB" body_limit = "64MB"
## The gRPC server options. # gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. # Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The number of server worker threads. # The number of server worker threads, 8 by default.
runtime_size = 8 runtime_size = 8
## MySQL server options. # MySQL server options.
[mysql] [mysql]
## Whether to enable. # Whether to enable
enable = true enable = true
## The addr to bind the MySQL server. # Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. # The number of server worker threads, 2 by default.
runtime_size = 2 runtime_size = 2
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html # - "disable" (default value)
## - `disable` (default value) # - "prefer"
## - `prefer` # - "require"
## - `require` # - "verify-ca"
## - `verify-ca` # - "verify-full"
## - `verify-full`
mode = "disable" mode = "disable"
# Certificate file path.
## Certificate file path.
## +toml2docs:none-default
cert_path = "" cert_path = ""
# Private key file path.
## Private key file path.
## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # PostgresSQL server options.
watch = false
## PostgresSQL server options.
[postgres] [postgres]
## Whether to enable # Whether to enable
enable = true enable = true
## The addr to bind the PostgresSQL server. # Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. # The number of server worker threads, 2 by default.
runtime_size = 2 runtime_size = 2
## PostgresSQL server TLS options, see `mysql_options.tls` section. # PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres.tls] [postgres.tls]
## TLS mode. # TLS mode.
mode = "disable" mode = "disable"
# certificate file path.
## Certificate file path.
## +toml2docs:none-default
cert_path = "" cert_path = ""
# private key file path.
## Private key file path.
## +toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # OpenTSDB protocol options.
watch = false
## OpenTSDB protocol options.
[opentsdb] [opentsdb]
## Whether to enable # Whether to enable
enable = true enable = true
## OpenTSDB telnet API server address. # OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242" addr = "127.0.0.1:4242"
## The number of server worker threads. # The number of server worker threads, 2 by default.
runtime_size = 2 runtime_size = 2
## InfluxDB protocol options. # InfluxDB protocol options.
[influxdb] [influxdb]
## Whether to enable InfluxDB protocol in HTTP API. # Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true enable = true
## Prometheus remote storage options # Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. # Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true enable = true
## Whether to store the data from Prometheus remote write in metric engine. # Whether to store the data from Prometheus remote write in metric engine.
# true by default
with_metric_engine = true with_metric_engine = true
## The WAL options.
[wal] [wal]
## The provider of the WAL. # Available wal providers:
## - `raft_engine`: the wal is stored in the local file system by raft-engine. # - "raft_engine" (default)
## - `kafka`: it's remote wal that data is stored in Kafka. # - "kafka"
provider = "raft_engine" provider = "raft_engine"
## The directory to store the WAL files. # Raft-engine wal options.
## **It's only used when the provider is `raft_engine`**. # WAL data directory
## +toml2docs:none-default # dir = "/tmp/greptimedb/wal"
dir = "/tmp/greptimedb/wal" # WAL file size in bytes.
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB" file_size = "256MB"
# WAL purge threshold.
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB" purge_threshold = "4GB"
# WAL purge interval in seconds.
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_interval = "10m" purge_interval = "10m"
# WAL read batch size.
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128 read_batch_size = 128
# Whether to sync log file after every write.
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false sync_write = false
# Whether to reuse logically truncated log files.
## Whether to reuse logically truncated log files.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true enable_log_recycle = true
# Whether to pre-create log files on start up
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false prefill_log_files = false
# Duration for fsyncing log files.
sync_period = "1000ms"
## Duration for fsyncing log files. # Kafka wal options.
## **It's only used when the provider is `raft_engine`**. # The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
sync_period = "10s" # broker_endpoints = ["127.0.0.1:9092"]
## The Kafka broker endpoints. # Number of topics to be created upon start.
## **It's only used when the provider is `kafka`**. # num_topics = 64
broker_endpoints = ["127.0.0.1:9092"] # Topic selector type.
# Available selector types:
# - "round_robin" (default)
# selector_type = "round_robin"
# The prefix of topic name.
# topic_name_prefix = "greptimedb_wal_topic"
# The number of replicas of each partition.
# Warning: the replication factor must be positive and must not be greater than the number of broker endpoints.
# replication_factor = 1
## The max size of a single producer batch. # The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic. # Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**. # max_batch_size = "1MB"
max_batch_size = "1MB" # The linger duration.
# linger = "200ms"
# The consumer wait timeout.
# consumer_wait_timeout = "100ms"
# Create topic timeout.
# create_topic_timeout = "30s"
## The linger duration of a kafka batch producer. # The initial backoff delay.
## **It's only used when the provider is `kafka`**. # backoff_init = "500ms"
linger = "200ms" # The maximum backoff delay.
# backoff_max = "10s"
# Exponential backoff rate, i.e. next backoff = base * current backoff.
# backoff_base = 2
# The deadline of retries.
# backoff_deadline = "5mins"
## The consumer wait timeout. # Metadata storage options.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Metadata storage options.
[metadata_store] [metadata_store]
## Kv file size in bytes. # Kv file size in bytes.
file_size = "256MB" file_size = "256MB"
## Kv purge threshold. # Kv purge threshold.
purge_threshold = "4GB" purge_threshold = "4GB"
## Procedure storage options. # Procedure storage options.
[procedure] [procedure]
## Procedure max retry time. # Procedure max retry time.
max_retry_times = 3 max_retry_times = 3
## Initial retry delay of procedures, increases exponentially # Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
# Example of using S3 as the storage. # Storage options.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
[storage] [storage]
## The working home directory. # The working home directory.
data_home = "/tmp/greptimedb/" data_home = "/tmp/greptimedb/"
# Storage type.
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
# TTL for all tables. Disabled by default.
## Cache configuration for object storage such as 'S3' etc. # global_ttl = "7d"
## The local file cache directory. # Cache configuration for object storage such as 'S3' etc.
## +toml2docs:none-default # cache_path = "/path/local_cache"
cache_path = "/path/local_cache" # The local file cache capacity in bytes.
# cache_capacity = "256MB"
## The local file cache capacity in bytes.
## +toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## +toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## +toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## +toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## +toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## +toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## +toml2docs:none-default
credential_path = "test"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## +toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## +toml2docs:none-default
region = "us-west-2"
# Custom storage options # Custom storage options
# [[storage.providers]] #[[storage.providers]]
# type = "S3" #type = "S3"
# [[storage.providers]] #[[storage.providers]]
# type = "Gcs" #type = "Gcs"
## The region engine options. You can configure multiple region engines. # Mito engine options
[[region_engine]] [[region_engine]]
## The Mito engine options.
[region_engine.mito] [region_engine.mito]
# Number of region workers
## Number of region workers.
num_workers = 8 num_workers = 8
# Request channel size of each worker
## Request channel size of each worker.
worker_channel_size = 128 worker_channel_size = 128
# Max batch size for a worker to handle requests
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64 worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
# Max number of running background jobs
## Max number of running background jobs
max_background_jobs = 4 max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h" auto_flush_interval = "1h"
# Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
global_write_buffer_size = "1GB" global_write_buffer_size = "1GB"
# Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
global_write_buffer_reject_size = "2GB" global_write_buffer_reject_size = "2GB"
# Cache size for SST metadata. Setting it to 0 to disable the cache.
## Cache size for SST metadata. Setting it to 0 to disable the cache. # If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
sst_meta_cache_size = "128MB" sst_meta_cache_size = "128MB"
# Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache. # If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
vector_cache_size = "512MB" vector_cache_size = "512MB"
# Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache. # If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB" page_cache_size = "512MB"
# Buffer size for SST writing.
## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
# Parallelism to scan a region (default: 1/4 of cpu cores).
## Parallelism to scan a region (default: 1/4 of cpu cores). # - 0: using the default value (1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores). # - 1: scan in current thread.
## - `1`: scan in current thread. # - n: scan in parallelism n.
## - `n`: scan in parallelism n.
scan_parallelism = 0 scan_parallelism = 0
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32 parallel_scan_channel_size = 32
# Whether to allow stale WAL entries read during replay.
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false allow_stale_entries = false
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
# Whether to create the index on flush.
## Whether to create the index on flush. # - "auto": automatically
## - `auto`: automatically # - "disable": never
## - `disable`: never
create_on_flush = "auto" create_on_flush = "auto"
# Whether to create the index on compaction.
## Whether to create the index on compaction. # - "auto": automatically
## - `auto`: automatically # - "disable": never
## - `disable`: never
create_on_compaction = "auto" create_on_compaction = "auto"
# Whether to apply the index on query
## Whether to apply the index on query # - "auto": automatically
## - `auto`: automatically # - "disable": never
## - `disable`: never
apply_on_query = "auto" apply_on_query = "auto"
# Memory threshold for performing an external sort during index creation.
## Memory threshold for performing an external sort during index creation. # Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
mem_threshold_on_create = "64M" mem_threshold_on_create = "64M"
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = "" intermediate_path = ""
[region_engine.mito.memtable] # Log options
## Memtable type. # [logging]
## - `time_series`: time-series memtable # Specify logs directory.
## - `partition_tree`: partition tree memtable (experimental) # dir = "/tmp/greptimedb/logs"
type = "time_series" # Specify the log level [info | debug | error | warn]
# level = "info"
# whether enable tracing, default is false
# enable_otlp_tracing = false
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
# otlp_endpoint = "localhost:4317"
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# tracing_sample_ratio = 1.0
# Whether to append logs to stdout. Defaults to true.
# append_stdout = true
## The max number of keys in one shard. # Standalone export the metrics generated by itself
## Only available for `partition_tree` memtable. # encoded to Prometheus remote-write format
index_max_keys_per_shard = 8192 # and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
## The max rows of data inside the actively writing buffer in one shard. # [export_metrics]
## Only available for `partition_tree` memtable. # whether enable export metrics, default is false
data_freeze_threshold = 32768 # enable = false
# The interval of export metrics
## Max dictionary bytes. # write_interval = "30s"
## Only available for `partition_tree` memtable. # for `standalone`, `self_import` is recommend to collect metrics generated by itself
fork_dictionary_bytes = "1GiB" # [export_metrics.self_import]
# db = "information_schema"
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }

View File

@@ -1,50 +0,0 @@
# TSBS benchmark - v0.7.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | -------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 50GB (GP3) |
| OS | Ubuntu 22.04.1 |
## Write performance
| Environment | Ingest rate (rows/s) |
| ------------------ | --------------------- |
| Local | 3695814.64 |
| EC2 c5d.2xlarge | 2987166.64 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | ---------------------- |
| cpu-max-all-1 | 30.56 | 54.74 |
| cpu-max-all-8 | 52.69 | 70.50 |
| double-groupby-1 | 664.30 | 1366.63 |
| double-groupby-5 | 1391.26 | 2141.71 |
| double-groupby-all | 2828.94 | 3389.59 |
| groupby-orderby-limit | 718.92 | 1213.90 |
| high-cpu-1 | 29.21 | 52.98 |
| high-cpu-all | 5514.12 | 7194.91 |
| lastpoint | 7571.40 | 9423.41 |
| single-groupby-1-1-1 | 19.09 | 7.77 |
| single-groupby-1-1-12 | 27.28 | 51.64 |
| single-groupby-1-8-1 | 31.85 | 11.64 |
| single-groupby-5-1-1 | 16.14 | 9.67 |
| single-groupby-5-1-12 | 27.21 | 53.62 |
| single-groupby-5-8-1 | 39.62 | 14.96 |

View File

@@ -79,7 +79,7 @@ This RFC proposes to add a new expression node `MergeScan` to merge result from
│ │ │ │ │ │ │ │
└─Frontend──────┘ └─Remote-Sources──────────────┘ └─Frontend──────┘ └─Remote-Sources──────────────┘
``` ```
This merge operation simply chains all the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend. This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
## Commutativity of MergeScan ## Commutativity of MergeScan

View File

@@ -27,8 +27,8 @@ subgraph Frontend["Frontend"]
end end
end end
MyTable --> Metasrv MyTable --> MetaSrv
Metasrv --> ETCD MetaSrv --> ETCD
MyTable-->TableEngine0 MyTable-->TableEngine0
MyTable-->TableEngine1 MyTable-->TableEngine1
@@ -95,8 +95,8 @@ subgraph Frontend["Frontend"]
end end
end end
MyTable --> Metasrv MyTable --> MetaSrv
Metasrv --> ETCD MetaSrv --> ETCD
MyTable-->RegionEngine MyTable-->RegionEngine
MyTable-->RegionEngine1 MyTable-->RegionEngine1

View File

@@ -36,7 +36,7 @@ Hence, we choose the third option, and use a simple logical plan that's anagonis
## Deploy mode and protocol ## Deploy mode and protocol
- Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode. - Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode.
- It accepts insert request Rows, which is used between frontend and datanode. - It accepts insert request Rows, which is used between frontend and datanode.
- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in Metasrv. - New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in MetaSrv.
- It also persists results in the format of Rows to frontend. - It also persists results in the format of Rows to frontend.
- The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine. - The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine.
- Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's. - Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

View File

@@ -1,101 +0,0 @@
---
Feature Name: Multi-dimension Partition Rule
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3351
Date: 2024-02-21
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
# Summary
A new region partition scheme that runs on multiple dimensions of the key space. The partition rule is defined by a set of simple expressions on the partition key columns.
# Motivation
The current partition rule is from MySQL's [`RANGE Partition`](https://dev.mysql.com/doc/refman/8.0/en/partitioning-range.html), which is based on a single dimension. It is sort of a [Hilbert Curve](https://en.wikipedia.org/wiki/Hilbert_curve) and pick several point on the curve to divide the space. It is neither easy to understand how the data get partitioned nor flexible enough to handle complex partitioning requirements.
Considering the future requirements like region repartitioning or autonomous rebalancing, where both workload and partition may change frequently. Here proposes a new region partition scheme that uses a set of simple expressions on the partition key columns to divide the key space.
# Details
## Partition rule
First, we define a simple expression that can be used to define the partition rule. The simple expression is a binary expression expression on the partition key columns that can be evaluated to a boolean value. The binary operator is limited to comparison operators only, like `=`, `!=`, `>`, `>=`, `<`, `<=`. And the operands are limited either literal value or partition column.
Example of valid simple expressions are $`col_A = 10`$, $`col_A \gt 10 \& col_B \gt 20`$ or $`col_A \ne 10`$.
Those expressions can be used as predicates to divide the key space into different regions. The following example have two partition columns `Col A` and `Col B`, and four partitioned regions.
```math
\left\{\begin{aligned}
&col_A \le 10 &Region_1 \\
&10 \lt col_A \& col_A \le 20 &Region_2 \\
&20 \lt col_A \space \& \space col_B \lt 100 &Region_3 \\
&20 \lt col_A \space \& \space col_B \ge 100 &Region_4
\end{aligned}\right\}
```
An advantage of this scheme is that it is easy to understand how the data get partitioned. The above example can be visualized in a 2D space (two partition column is involved in the example).
![example](2d-example.png)
Here each expression draws a line in the 2D space. Managing data partitioning becomes a matter of drawing lines in the key space.
To make it easy to use, there is a "default region" which catches all the data that doesn't match any of previous expressions. The default region exist by default and do not need to specify. It is also possible to remove this default region if the DB finds it is not necessary.
## SQL interface
The SQL interface is in response to two parts: specifying the partition columns and the partition rule. Thouth we are targeting an autonomous system, it's still allowed to give some bootstrap rules or hints on creating table.
Partition column is specified by `PARTITION ON COLUMNS` sub-clause in `CREATE TABLE`:
```sql
CREATE TABLE t (...)
PARTITION ON COLUMNS (...) ();
```
Two following brackets are for partition columns and partition rule respectively.
Columns provided here are only used as an allow-list of how the partition rule can be defined. Which means (a) the sequence between columns doesn't matter, (b) the columns provided here are not necessarily being used in the partition rule.
The partition rule part is a list of comma-separated simple expressions. Expressions here are not corresponding to region, as they might be changed by system to fit various workload.
A full example of `CREATE TABLE` with partition rule is:
```sql
CREATE TABLE IF NOT EXISTS demo (
a STRING,
b STRING,
c STRING,
d STRING,
ts TIMESTAMP,
memory DOUBLE,
TIME INDEX (ts),
PRIMARY KEY (a, b, c, d)
)
PARTITION ON COLUMNS (c, b, a) (
a < 10,
10 >= a AND a < 20,
20 >= a AND b < 100,
20 >= a AND b > 100
)
```
## Combine with storage
Examining columns separately suits our columnar storage very well in two aspects.
1. The simple expression can be pushed down to storage and file format, and is likely to hit existing index. Makes pruning operation very efficient.
2. Columns in columnar storage are not tightly coupled like in the traditional row storages, which means we can easily add or remove columns from partition rule without much impact (like a global reshuffle) on data.
The data file itself can be "projected" to the key space as a polyhedron, it is guaranteed that each plane is in parallel with some coordinate planes (in a 2D scenario, this is saying that all the files can be projected to a rectangle). Thus partition or repartition also only need to consider related columns.
![sst-project](sst-project.png)
An additional limitation is that considering how the index works and how we organize the primary keys at present, the partition columns are limited to be a subset of primary keys for better performance.
# Drawbacks
This is a breaking change.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

View File

@@ -1,46 +0,0 @@
# GreptimeDB Style Guide
This style guide is intended to help contributors to GreptimeDB write code that is consistent with the rest of the codebase. It is a living document and will be updated as the codebase evolves.
It's mainly an complement to the [Rust Style Guide](https://pingcap.github.io/style-guide/rust/).
## Table of Contents
- Formatting
- Modules
- Comments
## Formatting
- Place all `mod` declaration before any `use`.
- Use `unimplemented!()` instead of `todo!()` for things that aren't likely to be implemented.
- Add an empty line before and after declaration blocks.
- Place comment before attributes (`#[]`) and derive (`#[derive]`).
## Modules
- Use the file with same name instead of `mod.rs` to define a module. E.g.:
```
.
├── cache
│ ├── cache_size.rs
│ └── write_cache.rs
└── cache.rs
```
## Comments
- Add comments for public functions and structs.
- Prefer document comment (`///`) over normal comment (`//`) for structs, fields, functions etc.
- Add link (`[]`) to struct, method, or any other reference. And make sure that link works.
## Error handling
- Define a custom error type for the module if needed.
- Prefer `with_context()` over `context()` when allocation is needed to construct an error.
- Use `error!()` or `warn!()` macros in the `common_telemetry` crate to log errors. E.g.:
```rust
error!(e; "Failed to do something");
```

View File

@@ -66,7 +66,7 @@
}, },
"editable": true, "editable": true,
"fiscalYearStartMonth": 0, "fiscalYearStartMonth": 0,
"graphTooltip": 1, "graphTooltip": 0,
"id": null, "id": null,
"links": [], "links": [],
"liveNow": false, "liveNow": false,
@@ -2116,7 +2116,7 @@
} }
] ]
}, },
"unit": "none" "unit": "bytes"
}, },
"overrides": [] "overrides": []
}, },
@@ -2126,7 +2126,7 @@
"x": 0, "x": 0,
"y": 61 "y": 61
}, },
"id": 17, "id": 12,
"interval": "1s", "interval": "1s",
"options": { "options": {
"legend": { "legend": {
@@ -2147,8 +2147,8 @@
"uid": "${DS_PROMETHEUS-1}" "uid": "${DS_PROMETHEUS-1}"
}, },
"disableTextWrap": false, "disableTextWrap": false,
"editorMode": "code", "editorMode": "builder",
"expr": "rate(raft_engine_sync_log_duration_seconds_count[2s])", "expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
"fullMetaSearch": false, "fullMetaSearch": false,
"includeNullMetadata": false, "includeNullMetadata": false,
"instant": false, "instant": false,
@@ -2158,7 +2158,7 @@
"useBackend": false "useBackend": false
} }
], ],
"title": "raft engine sync count", "title": "wal write size",
"type": "timeseries" "type": "timeseries"
}, },
{ {
@@ -2378,120 +2378,6 @@
], ],
"title": "raft engine write duration seconds", "title": "raft engine write duration seconds",
"type": "timeseries" "type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "bytes"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 68
},
"id": 12,
"interval": "1s",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
"legendFormat": "req-size-p95",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
"editorMode": "code",
"expr": "rate(raft_engine_write_size_sum[$__rate_interval])",
"hide": false,
"instant": false,
"legendFormat": "throughput",
"range": true,
"refId": "B"
}
],
"title": "wal write size",
"type": "timeseries"
} }
], ],
"refresh": "10s", "refresh": "10s",
@@ -2501,13 +2387,13 @@
"list": [] "list": []
}, },
"time": { "time": {
"from": "now-30m", "from": "now-3h",
"to": "now" "to": "now"
}, },
"timepicker": {}, "timepicker": {},
"timezone": "", "timezone": "",
"title": "GreptimeDB", "title": "GreptimeDB",
"uid": "e7097237-669b-4f8d-b751-13067afbfb68", "uid": "e7097237-669b-4f8d-b751-13067afbfb68",
"version": 12, "version": 9,
"weekStart": "" "weekStart": ""
} }

View File

@@ -19,12 +19,6 @@ includes = [
"*.py", "*.py",
] ]
excludes = [
# copied sources
"src/common/base/src/readable_size.rs",
"src/servers/src/repeated_field.rs",
]
[properties] [properties]
inceptionYear = 2023 inceptionYear = 2023
copyrightOwner = "Greptime Team" copyrightOwner = "Greptime Team"

View File

@@ -1,2 +1,2 @@
[toolchain] [toolchain]
channel = "nightly-2024-04-18" channel = "nightly-2023-12-19"

View File

@@ -1,7 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository. # This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
set -ex
set -e -x
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd) declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR}) declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
@@ -12,34 +13,13 @@ RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
echo "Downloading assets to dir: $OUT_DIR" echo "Downloading assets to dir: $OUT_DIR"
cd $OUT_DIR cd $OUT_DIR
if [[ -z "$GITHUB_PROXY_URL" ]]; then
GITHUB_URL="https://github.com"
else
GITHUB_URL="${GITHUB_PROXY_URL%/}"
fi
function retry_fetch() {
local url=$1
local filename=$2
curl --connect-timeout 10 --retry 3 -fsSL $url --output $filename || {
echo "Failed to download $url"
echo "You may try to set http_proxy and https_proxy environment variables."
if [[ -z "$GITHUB_PROXY_URL" ]]; then
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/https://github.com/"
fi
exit 1
}
}
# Download the SHA256 checksum attached to the release. To verify the integrity # Download the SHA256 checksum attached to the release. To verify the integrity
# of the download, this checksum will be used to check the download tar file # of the download, this checksum will be used to check the download tar file
# containing the built dashboard assets. # containing the built dashboard assets.
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
# Download the tar file containing the built dashboard assets. # Download the tar file containing the built dashboard assets.
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/build.tar.gz" build.tar.gz curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
# Verify the checksums match; exit if they don't. # Verify the checksums match; exit if they don't.
case "$(uname -s)" in case "$(uname -s)" in

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[lints]
workspace = true
[dependencies] [dependencies]
common-base.workspace = true common-base.workspace = true
common-decimal.workspace = true common-decimal.workspace = true
@@ -18,6 +15,7 @@ greptime-proto.workspace = true
paste = "1.0" paste = "1.0"
prost.workspace = true prost.workspace = true
snafu.workspace = true snafu.workspace = true
tonic.workspace = true
[build-dependencies] [build-dependencies]
tonic-build = "0.9" tonic-build = "0.9"

View File

@@ -707,6 +707,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
} }
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> { pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
// TODO(fys): use macros to optimize code
match data_type { match data_type {
ConcreteDataType::Int64(_) => values ConcreteDataType::Int64(_) => values
.i64_values .i64_values

View File

@@ -21,7 +21,6 @@ pub mod prom_store {
} }
} }
pub mod region;
pub mod v1; pub mod v1;
pub use greptime_proto; pub use greptime_proto;

View File

@@ -1,42 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use common_base::AffectedRows;
use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
/// This result struct is derived from [RegionResponseV1]
#[derive(Debug)]
pub struct RegionResponse {
pub affected_rows: AffectedRows,
pub extension: HashMap<String, Vec<u8>>,
}
impl RegionResponse {
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
Self {
affected_rows: region_response.affected_rows as _,
extension: region_response.extension,
}
}
/// Creates one response without extension
pub fn new(affected_rows: AffectedRows) -> Self {
Self {
affected_rows,
extension: Default::default(),
}
}
}

View File

@@ -8,17 +8,13 @@ license.workspace = true
default = [] default = []
testing = [] testing = []
[lints]
workspace = true
[dependencies] [dependencies]
api.workspace = true api.workspace = true
async-trait.workspace = true async-trait.workspace = true
common-error.workspace = true common-error.workspace = true
common-macro.workspace = true common-macro.workspace = true
common-telemetry.workspace = true
digest = "0.10" digest = "0.10"
notify.workspace = true hex = { version = "0.4" }
secrecy = { version = "0.8", features = ["serde", "alloc"] } secrecy = { version = "0.8", features = ["serde", "alloc"] }
sha1 = "0.10" sha1 = "0.10"
snafu.workspace = true snafu.workspace = true

View File

@@ -22,9 +22,6 @@ use snafu::{ensure, OptionExt};
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu}; use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
use crate::user_info::DefaultUserInfo; use crate::user_info::DefaultUserInfo;
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER}; use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
use crate::user_provider::watch_file_user_provider::{
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
};
use crate::{UserInfoRef, UserProviderRef}; use crate::{UserInfoRef, UserProviderRef};
pub(crate) const DEFAULT_USERNAME: &str = "greptime"; pub(crate) const DEFAULT_USERNAME: &str = "greptime";
@@ -43,12 +40,9 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
match name { match name {
STATIC_USER_PROVIDER => { STATIC_USER_PROVIDER => {
let provider = let provider =
StaticUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)?; StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
Ok(provider) Ok(provider)
} }
WATCH_FILE_USER_PROVIDER => {
WatchFileUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)
}
_ => InvalidConfigSnafu { _ => InvalidConfigSnafu {
value: name.to_string(), value: name.to_string(),
msg: "Invalid UserProviderOption", msg: "Invalid UserProviderOption",

View File

@@ -64,13 +64,6 @@ pub enum Error {
username: String, username: String,
}, },
#[snafu(display("Failed to initialize a watcher for file {}", path))]
FileWatch {
path: String,
#[snafu(source)]
error: notify::Error,
},
#[snafu(display("User is not authorized to perform this action"))] #[snafu(display("User is not authorized to perform this action"))]
PermissionDenied { location: Location }, PermissionDenied { location: Location },
} }
@@ -80,7 +73,6 @@ impl ErrorExt for Error {
match self { match self {
Error::InvalidConfig { .. } => StatusCode::InvalidArguments, Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
Error::IllegalParam { .. } => StatusCode::InvalidArguments, Error::IllegalParam { .. } => StatusCode::InvalidArguments,
Error::FileWatch { .. } => StatusCode::InvalidArguments,
Error::InternalState { .. } => StatusCode::Unexpected, Error::InternalState { .. } => StatusCode::Unexpected,
Error::Io { .. } => StatusCode::Internal, Error::Io { .. } => StatusCode::Internal,
Error::AuthBackend { .. } => StatusCode::Internal, Error::AuthBackend { .. } => StatusCode::Internal,

View File

@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
impl MockUserProvider { impl MockUserProvider {
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) { pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
info.catalog.clone_into(&mut self.catalog); self.catalog = info.catalog.to_owned();
info.schema.clone_into(&mut self.schema); self.schema = info.schema.to_owned();
info.username.clone_into(&mut self.username); self.username = info.username.to_owned();
} }
} }

View File

@@ -13,24 +13,10 @@
// limitations under the License. // limitations under the License.
pub(crate) mod static_user_provider; pub(crate) mod static_user_provider;
pub(crate) mod watch_file_user_provider;
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::common::{Identity, Password}; use crate::common::{Identity, Password};
use crate::error::{ use crate::error::Result;
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu, use crate::UserInfoRef;
UserNotFoundSnafu, UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
use crate::{auth_mysql, UserInfoRef};
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait UserProvider: Send + Sync { pub trait UserProvider: Send + Sync {
@@ -58,88 +44,3 @@ pub trait UserProvider: Send + Sync {
Ok(user_info) Ok(user_info)
} }
} }
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
// check valid path
let path = Path::new(filepath);
if !path.exists() {
return Ok(None);
}
ensure!(
path.is_file(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider file must be a file",
}
);
let file = File::open(path).context(IoSnafu)?;
let credential = io::BufReader::new(file)
.lines()
.map_while(std::result::Result::ok)
.filter_map(|line| {
if let Some((k, v)) = line.split_once('=') {
Some((k.to_string(), v.as_bytes().to_vec()))
} else {
None
}
})
.collect::<HashMap<String, Vec<u8>>>();
ensure!(
!credential.is_empty(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider's file must contains at least one valid credential",
}
);
Ok(Some(credential))
}
fn authenticate_with_credential(
users: &HashMap<String, Vec<u8>>,
input_id: Identity<'_>,
input_pwd: Password<'_>,
) -> Result<UserInfoRef> {
match input_id {
Identity::UserId(username, _) => {
ensure!(
!username.is_empty(),
IllegalParamSnafu {
msg: "blank username"
}
);
let save_pwd = users.get(username).context(UserNotFoundSnafu {
username: username.to_string(),
})?;
match input_pwd {
Password::PlainText(pwd) => {
ensure!(
!pwd.expose_secret().is_empty(),
IllegalParamSnafu {
msg: "blank password"
}
);
if save_pwd == pwd.expose_secret().as_bytes() {
Ok(DefaultUserInfo::with_name(username))
} else {
UserPasswordMismatchSnafu {
username: username.to_string(),
}
.fail()
}
}
Password::MysqlNativePassword(auth_data, salt) => {
auth_mysql(auth_data, salt, username, save_pwd)
.map(|_| DefaultUserInfo::with_name(username))
}
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
password_type: "pg_md5",
}
.fail(),
}
}
}
}

View File

@@ -13,34 +13,60 @@
// limitations under the License. // limitations under the License.
use std::collections::HashMap; use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use async_trait::async_trait; use async_trait::async_trait;
use snafu::OptionExt; use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{InvalidConfigSnafu, Result}; use crate::error::{
use crate::user_provider::{authenticate_with_credential, load_credential_from_file}; Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
use crate::{Identity, Password, UserInfoRef, UserProvider}; UserNotFoundSnafu, UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider"; pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
pub(crate) struct StaticUserProvider { impl TryFrom<&str> for StaticUserProvider {
users: HashMap<String, Vec<u8>>, type Error = Error;
}
impl StaticUserProvider { fn try_from(value: &str) -> Result<Self> {
pub(crate) fn new(value: &str) -> Result<Self> {
let (mode, content) = value.split_once(':').context(InvalidConfigSnafu { let (mode, content) = value.split_once(':').context(InvalidConfigSnafu {
value: value.to_string(), value: value.to_string(),
msg: "StaticUserProviderOption must be in format `<option>:<value>`", msg: "StaticUserProviderOption must be in format `<option>:<value>`",
})?; })?;
return match mode { return match mode {
"file" => { "file" => {
let users = load_credential_from_file(content)? // check valid path
.context(InvalidConfigSnafu { let path = Path::new(content);
ensure!(path.exists() && path.is_file(), InvalidConfigSnafu {
value: content.to_string(), value: content.to_string(),
msg: "StaticFileUserProvider must be a valid file path", msg: "StaticUserProviderOption file must be a valid file path",
})?; });
Ok(StaticUserProvider { users })
let file = File::open(path).context(IoSnafu)?;
let credential = io::BufReader::new(file)
.lines()
.map_while(std::result::Result::ok)
.filter_map(|line| {
if let Some((k, v)) = line.split_once('=') {
Some((k.to_string(), v.as_bytes().to_vec()))
} else {
None
}
})
.collect::<HashMap<String, Vec<u8>>>();
ensure!(!credential.is_empty(), InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticUserProviderOption file must contains at least one valid credential",
});
Ok(StaticUserProvider { users: credential, })
} }
"cmd" => content "cmd" => content
.split(',') .split(',')
@@ -62,14 +88,61 @@ impl StaticUserProvider {
} }
} }
pub(crate) struct StaticUserProvider {
users: HashMap<String, Vec<u8>>,
}
#[async_trait] #[async_trait]
impl UserProvider for StaticUserProvider { impl UserProvider for StaticUserProvider {
fn name(&self) -> &str { fn name(&self) -> &str {
STATIC_USER_PROVIDER STATIC_USER_PROVIDER
} }
async fn authenticate(&self, id: Identity<'_>, pwd: Password<'_>) -> Result<UserInfoRef> { async fn authenticate(
authenticate_with_credential(&self.users, id, pwd) &self,
input_id: Identity<'_>,
input_pwd: Password<'_>,
) -> Result<UserInfoRef> {
match input_id {
Identity::UserId(username, _) => {
ensure!(
!username.is_empty(),
IllegalParamSnafu {
msg: "blank username"
}
);
let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
username: username.to_string(),
})?;
match input_pwd {
Password::PlainText(pwd) => {
ensure!(
!pwd.expose_secret().is_empty(),
IllegalParamSnafu {
msg: "blank password"
}
);
return if save_pwd == pwd.expose_secret().as_bytes() {
Ok(DefaultUserInfo::with_name(username))
} else {
UserPasswordMismatchSnafu {
username: username.to_string(),
}
.fail()
};
}
Password::MysqlNativePassword(auth_data, salt) => {
auth_mysql(auth_data, salt, username, save_pwd)
.map(|_| DefaultUserInfo::with_name(username))
}
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
password_type: "pg_md5",
}
.fail(),
}
}
}
} }
async fn authorize( async fn authorize(
@@ -108,7 +181,7 @@ pub mod test {
#[tokio::test] #[tokio::test]
async fn test_authorize() { async fn test_authorize() {
let user_info = DefaultUserInfo::with_name("root"); let user_info = DefaultUserInfo::with_name("root");
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap(); let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
provider provider
.authorize("catalog", "schema", &user_info) .authorize("catalog", "schema", &user_info)
.await .await
@@ -117,7 +190,7 @@ pub mod test {
#[tokio::test] #[tokio::test]
async fn test_inline_provider() { async fn test_inline_provider() {
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap(); let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
test_authenticate(&provider, "root", "123456").await; test_authenticate(&provider, "root", "123456").await;
test_authenticate(&provider, "admin", "654321").await; test_authenticate(&provider, "admin", "654321").await;
} }
@@ -141,7 +214,7 @@ admin=654321",
} }
let param = format!("file:{file_path}"); let param = format!("file:{file_path}");
let provider = StaticUserProvider::new(param.as_str()).unwrap(); let provider = StaticUserProvider::try_from(param.as_str()).unwrap();
test_authenticate(&provider, "root", "123456").await; test_authenticate(&provider, "root", "123456").await;
test_authenticate(&provider, "admin", "654321").await; test_authenticate(&provider, "admin", "654321").await;
} }

View File

@@ -1,215 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::path::Path;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_telemetry::{info, warn};
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::{ensure, ResultExt};
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
use crate::user_info::DefaultUserInfo;
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
use crate::{Identity, Password, UserInfoRef, UserProvider};
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
/// A user provider that reads user credential from a file and watches the file for changes.
///
/// Empty file is invalid; but file not exist means every user can be authenticated.
pub(crate) struct WatchFileUserProvider {
users: WatchedCredentialRef,
}
impl WatchFileUserProvider {
pub fn new(filepath: &str) -> Result<Self> {
let credential = load_credential_from_file(filepath)?;
let users = Arc::new(Mutex::new(credential));
let this = WatchFileUserProvider {
users: users.clone(),
};
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut debouncer =
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
let mut dir = Path::new(filepath).to_path_buf();
ensure!(
dir.pop(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider path must be a file path",
}
);
debouncer
.watch(&dir, RecursiveMode::NonRecursive)
.context(FileWatchSnafu { path: filepath })?;
let filepath = filepath.to_string();
std::thread::spawn(move || {
let filename = Path::new(&filepath).file_name();
let _hold = debouncer;
while let Ok(res) = rx.recv() {
if let Ok(event) = res {
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
let is_relevant_event = matches!(
event.kind,
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
);
if is_this_file && is_relevant_event {
info!(?event.kind, "User provider file {} changed", &filepath);
match load_credential_from_file(&filepath) {
Ok(credential) => {
let mut users =
users.lock().expect("users credential must be valid");
#[cfg(not(test))]
info!("User provider file {filepath} reloaded");
#[cfg(test)]
info!("User provider file {filepath} reloaded: {credential:?}");
*users = credential;
}
Err(err) => {
warn!(
?err,
"Fail to load credential from file {filepath}; keep the old one",
)
}
}
}
}
}
});
Ok(this)
}
}
#[async_trait]
impl UserProvider for WatchFileUserProvider {
fn name(&self) -> &str {
WATCH_FILE_USER_PROVIDER
}
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
let users = self.users.lock().expect("users credential must be valid");
if let Some(users) = users.as_ref() {
authenticate_with_credential(users, id, password)
} else {
match id {
Identity::UserId(id, _) => {
warn!(id, "User provider file not exist, allow all users");
Ok(DefaultUserInfo::with_name(id))
}
}
}
}
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
// default allow all
Ok(())
}
}
#[cfg(test)]
pub mod test {
use std::time::{Duration, Instant};
use common_test_util::temp_dir::create_temp_dir;
use tokio::time::sleep;
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
use crate::user_provider::{Identity, Password};
use crate::UserProvider;
async fn test_authenticate(
provider: &dyn UserProvider,
username: &str,
password: &str,
ok: bool,
timeout: Option<Duration>,
) {
if let Some(timeout) = timeout {
let deadline = Instant::now().checked_add(timeout).unwrap();
loop {
let re = provider
.authenticate(
Identity::UserId(username, None),
Password::PlainText(password.to_string().into()),
)
.await;
if re.is_ok() == ok {
break;
} else if Instant::now() < deadline {
sleep(Duration::from_millis(100)).await;
} else {
panic!("timeout (username: {username}, password: {password}, expected: {ok})");
}
}
} else {
let re = provider
.authenticate(
Identity::UserId(username, None),
Password::PlainText(password.to_string().into()),
)
.await;
assert_eq!(
re.is_ok(),
ok,
"username: {}, password: {}",
username,
password
);
}
}
#[tokio::test]
async fn test_file_provider() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("test_file_provider");
let file_path = format!("{}/test_file_provider", dir.path().to_str().unwrap());
// write a tmp file
assert!(std::fs::write(&file_path, "root=123456\nadmin=654321\n").is_ok());
let provider = WatchFileUserProvider::new(file_path.as_str()).unwrap();
let timeout = Duration::from_secs(60);
test_authenticate(&provider, "root", "123456", true, None).await;
test_authenticate(&provider, "admin", "654321", true, None).await;
test_authenticate(&provider, "root", "654321", false, None).await;
// update the tmp file
assert!(std::fs::write(&file_path, "root=654321\n").is_ok());
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
// remove the tmp file
assert!(std::fs::remove_file(&file_path).is_ok());
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
// recreate the tmp file
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", false, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
}
}

View File

@@ -7,21 +7,21 @@ license.workspace = true
[features] [features]
testing = [] testing = []
[lints]
workspace = true
[dependencies] [dependencies]
api.workspace = true api.workspace = true
arc-swap = "1.0"
arrow.workspace = true arrow.workspace = true
arrow-schema.workspace = true arrow-schema.workspace = true
async-stream.workspace = true async-stream.workspace = true
async-trait = "0.1" async-trait = "0.1"
common-catalog.workspace = true common-catalog.workspace = true
common-error.workspace = true common-error.workspace = true
common-grpc.workspace = true
common-macro.workspace = true common-macro.workspace = true
common-meta.workspace = true common-meta.workspace = true
common-query.workspace = true common-query.workspace = true
common-recordbatch.workspace = true common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true common-telemetry.workspace = true
common-time.workspace = true common-time.workspace = true
common-version.workspace = true common-version.workspace = true
@@ -34,13 +34,15 @@ itertools.workspace = true
lazy_static.workspace = true lazy_static.workspace = true
meta-client.workspace = true meta-client.workspace = true
moka = { workspace = true, features = ["future", "sync"] } moka = { workspace = true, features = ["future", "sync"] }
parking_lot = "0.12"
partition.workspace = true partition.workspace = true
paste = "1.0" paste = "1.0"
prometheus.workspace = true prometheus.workspace = true
regex.workspace = true
serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
session.workspace = true session.workspace = true
snafu.workspace = true snafu.workspace = true
sql.workspace = true
store-api.workspace = true store-api.workspace = true
table.workspace = true table.workspace = true
tokio.workspace = true tokio.workspace = true

View File

@@ -164,8 +164,11 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display("Failed to find table partitions"))] #[snafu(display("Failed to find table partitions: #{table}"))]
FindPartitions { source: partition::error::Error }, FindPartitions {
source: partition::error::Error,
table: String,
},
#[snafu(display("Failed to find region routes"))] #[snafu(display("Failed to find region routes"))]
FindRegionRoutes { source: partition::error::Error }, FindRegionRoutes { source: partition::error::Error },
@@ -216,7 +219,7 @@ pub enum Error {
}, },
#[snafu(display("Failed to perform metasrv operation"))] #[snafu(display("Failed to perform metasrv operation"))]
Metasrv { MetaSrv {
location: Location, location: Location,
source: meta_client::error::Error, source: meta_client::error::Error,
}, },
@@ -251,12 +254,6 @@ pub enum Error {
source: common_meta::error::Error, source: common_meta::error::Error,
location: Location, location: Location,
}, },
#[snafu(display("Get null from table cache, key: {}", key))]
TableCacheNotGet { key: String, location: Location },
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
GetTableCache { err_msg: String },
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@@ -304,7 +301,7 @@ impl ErrorExt for Error {
| Error::CreateTable { source, .. } | Error::CreateTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(), | Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(), Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(), Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(), Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(), Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
@@ -317,7 +314,6 @@ impl ErrorExt for Error {
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied, Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery, Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(), Error::TableMetadataManager { source, .. } => source.status_code(),
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
} }
} }

View File

@@ -12,17 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
pub mod columns; mod columns;
pub mod key_column_usage; mod key_column_usage;
mod memory_table; mod memory_table;
mod partitions; mod partitions;
mod predicate; mod predicate;
mod region_peers; mod region_peers;
mod runtime_metrics; mod runtime_metrics;
pub mod schemata; mod schemata;
mod table_constraints;
mod table_names; mod table_names;
pub mod tables; mod tables;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
@@ -42,7 +41,8 @@ use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::metadata::{ use table::metadata::{
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType, FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
}; };
use table::{Table, TableRef}; use table::thin_table::{ThinTable, ThinTableAdapter};
use table::TableRef;
pub use table_names::*; pub use table_names::*;
use self::columns::InformationSchemaColumns; use self::columns::InformationSchemaColumns;
@@ -53,7 +53,6 @@ use crate::information_schema::partitions::InformationSchemaPartitions;
use crate::information_schema::region_peers::InformationSchemaRegionPeers; use crate::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::information_schema::runtime_metrics::InformationSchemaMetrics; use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::information_schema::schemata::InformationSchemaSchemata; use crate::information_schema::schemata::InformationSchemaSchemata;
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::information_schema::tables::InformationSchemaTables; use crate::information_schema::tables::InformationSchemaTables;
use crate::CatalogManager; use crate::CatalogManager;
@@ -175,10 +174,6 @@ impl InformationSchemaProvider {
KEY_COLUMN_USAGE.to_string(), KEY_COLUMN_USAGE.to_string(),
self.build_table(KEY_COLUMN_USAGE).unwrap(), self.build_table(KEY_COLUMN_USAGE).unwrap(),
); );
tables.insert(
TABLE_CONSTRAINTS.to_string(),
self.build_table(TABLE_CONSTRAINTS).unwrap(),
);
// Add memory tables // Add memory tables
for name in MEMORY_TABLES.iter() { for name in MEMORY_TABLES.iter() {
@@ -192,9 +187,10 @@ impl InformationSchemaProvider {
self.information_table(name).map(|table| { self.information_table(name).map(|table| {
let table_info = Self::table_info(self.catalog_name.clone(), &table); let table_info = Self::table_info(self.catalog_name.clone(), &table);
let filter_pushdown = FilterPushDownType::Inexact; let filter_pushdown = FilterPushDownType::Inexact;
let thin_table = ThinTable::new(table_info, filter_pushdown);
let data_source = Arc::new(InformationTableDataSource::new(table)); let data_source = Arc::new(InformationTableDataSource::new(table));
let table = Table::new(table_info, filter_pushdown, data_source); Arc::new(ThinTableAdapter::new(thin_table, data_source)) as _
Arc::new(table)
}) })
} }
@@ -247,10 +243,6 @@ impl InformationSchemaProvider {
self.catalog_name.clone(), self.catalog_name.clone(),
self.catalog_manager.clone(), self.catalog_manager.clone(),
)) as _), )) as _),
TABLE_CONSTRAINTS => Some(Arc::new(InformationSchemaTableConstraints::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
_ => None, _ => None,
} }
} }

View File

@@ -26,16 +26,13 @@ use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector}; use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::scalars::ScalarVectorBuilder; use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value; use datatypes::value::Value;
use datatypes::vectors::{ use datatypes::vectors::{StringVectorBuilder, VectorRef};
ConstantVector, Int64Vector, Int64VectorBuilder, StringVector, StringVectorBuilder, VectorRef,
};
use futures::TryStreamExt; use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt}; use snafu::{OptionExt, ResultExt};
use sql::statements;
use store_api::storage::{ScanRequest, TableId}; use store_api::storage::{ScanRequest, TableId};
use super::{InformationTable, COLUMNS}; use super::{InformationTable, COLUMNS};
@@ -51,42 +48,18 @@ pub(super) struct InformationSchemaColumns {
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,
} }
pub const TABLE_CATALOG: &str = "table_catalog"; const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema"; const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name"; const TABLE_NAME: &str = "table_name";
pub const COLUMN_NAME: &str = "column_name"; const COLUMN_NAME: &str = "column_name";
const ORDINAL_POSITION: &str = "ordinal_position"; const DATA_TYPE: &str = "data_type";
const CHARACTER_MAXIMUM_LENGTH: &str = "character_maximum_length"; const SEMANTIC_TYPE: &str = "semantic_type";
const CHARACTER_OCTET_LENGTH: &str = "character_octet_length"; const COLUMN_DEFAULT: &str = "column_default";
const NUMERIC_PRECISION: &str = "numeric_precision"; const IS_NULLABLE: &str = "is_nullable";
const NUMERIC_SCALE: &str = "numeric_scale";
const DATETIME_PRECISION: &str = "datetime_precision";
const CHARACTER_SET_NAME: &str = "character_set_name";
pub const COLLATION_NAME: &str = "collation_name";
pub const COLUMN_KEY: &str = "column_key";
pub const EXTRA: &str = "extra";
pub const PRIVILEGES: &str = "privileges";
const GENERATION_EXPRESSION: &str = "generation_expression";
// Extension field to keep greptime data type name
pub const GREPTIME_DATA_TYPE: &str = "greptime_data_type";
pub const DATA_TYPE: &str = "data_type";
pub const SEMANTIC_TYPE: &str = "semantic_type";
pub const COLUMN_DEFAULT: &str = "column_default";
pub const IS_NULLABLE: &str = "is_nullable";
const COLUMN_TYPE: &str = "column_type"; const COLUMN_TYPE: &str = "column_type";
pub const COLUMN_COMMENT: &str = "column_comment"; const COLUMN_COMMENT: &str = "column_comment";
const SRS_ID: &str = "srs_id";
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;
// The maximum length of string type
const MAX_STRING_LENGTH: i64 = 2147483647;
const UTF8_CHARSET_NAME: &str = "utf8";
const UTF8_COLLATE_NAME: &str = "utf8_bin";
const PRI_COLUMN_KEY: &str = "PRI";
const TIME_INDEX_COLUMN_KEY: &str = "TIME INDEX";
const DEFAULT_PRIVILEGES: &str = "select,insert";
const EMPTY_STR: &str = "";
impl InformationSchemaColumns { impl InformationSchemaColumns {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self { pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self { Self {
@@ -102,46 +75,12 @@ impl InformationSchemaColumns {
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false), ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false), ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false), ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(ORDINAL_POSITION, ConcreteDataType::int64_datatype(), false),
ColumnSchema::new(
CHARACTER_MAXIMUM_LENGTH,
ConcreteDataType::int64_datatype(),
true,
),
ColumnSchema::new(
CHARACTER_OCTET_LENGTH,
ConcreteDataType::int64_datatype(),
true,
),
ColumnSchema::new(NUMERIC_PRECISION, ConcreteDataType::int64_datatype(), true),
ColumnSchema::new(NUMERIC_SCALE, ConcreteDataType::int64_datatype(), true),
ColumnSchema::new(DATETIME_PRECISION, ConcreteDataType::int64_datatype(), true),
ColumnSchema::new(
CHARACTER_SET_NAME,
ConcreteDataType::string_datatype(),
true,
),
ColumnSchema::new(COLLATION_NAME, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(COLUMN_KEY, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(EXTRA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(PRIVILEGES, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
GENERATION_EXPRESSION,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
GREPTIME_DATA_TYPE,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false), ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false), ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_DEFAULT, ConcreteDataType::string_datatype(), true), ColumnSchema::new(COLUMN_DEFAULT, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(IS_NULLABLE, ConcreteDataType::string_datatype(), false), ColumnSchema::new(IS_NULLABLE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_TYPE, ConcreteDataType::string_datatype(), false), ColumnSchema::new(COLUMN_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_COMMENT, ConcreteDataType::string_datatype(), true), ColumnSchema::new(COLUMN_COMMENT, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(SRS_ID, ConcreteDataType::int64_datatype(), true),
])) ]))
} }
@@ -197,18 +136,9 @@ struct InformationSchemaColumnsBuilder {
schema_names: StringVectorBuilder, schema_names: StringVectorBuilder,
table_names: StringVectorBuilder, table_names: StringVectorBuilder,
column_names: StringVectorBuilder, column_names: StringVectorBuilder,
ordinal_positions: Int64VectorBuilder,
character_maximum_lengths: Int64VectorBuilder,
character_octet_lengths: Int64VectorBuilder,
numeric_precisions: Int64VectorBuilder,
numeric_scales: Int64VectorBuilder,
datetime_precisions: Int64VectorBuilder,
character_set_names: StringVectorBuilder,
collation_names: StringVectorBuilder,
column_keys: StringVectorBuilder,
greptime_data_types: StringVectorBuilder,
data_types: StringVectorBuilder, data_types: StringVectorBuilder,
semantic_types: StringVectorBuilder, semantic_types: StringVectorBuilder,
column_defaults: StringVectorBuilder, column_defaults: StringVectorBuilder,
is_nullables: StringVectorBuilder, is_nullables: StringVectorBuilder,
column_types: StringVectorBuilder, column_types: StringVectorBuilder,
@@ -229,16 +159,6 @@ impl InformationSchemaColumnsBuilder {
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY), schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY), table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_names: StringVectorBuilder::with_capacity(INIT_CAPACITY), column_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
character_maximum_lengths: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
character_octet_lengths: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
numeric_precisions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
numeric_scales: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
datetime_precisions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
character_set_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_keys: StringVectorBuilder::with_capacity(INIT_CAPACITY),
greptime_data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY), data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
semantic_types: StringVectorBuilder::with_capacity(INIT_CAPACITY), semantic_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_defaults: StringVectorBuilder::with_capacity(INIT_CAPACITY), column_defaults: StringVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -275,7 +195,6 @@ impl InformationSchemaColumnsBuilder {
self.add_column( self.add_column(
&predicates, &predicates,
idx,
&catalog_name, &catalog_name,
&schema_name, &schema_name,
&table.table_info().name, &table.table_info().name,
@@ -289,27 +208,16 @@ impl InformationSchemaColumnsBuilder {
self.finish() self.finish()
} }
#[allow(clippy::too_many_arguments)]
fn add_column( fn add_column(
&mut self, &mut self,
predicates: &Predicates, predicates: &Predicates,
index: usize,
catalog_name: &str, catalog_name: &str,
schema_name: &str, schema_name: &str,
table_name: &str, table_name: &str,
semantic_type: &str, semantic_type: &str,
column_schema: &ColumnSchema, column_schema: &ColumnSchema,
) { ) {
// Use sql data type name let data_type = &column_schema.data_type.name();
let data_type = statements::concrete_data_type_to_sql_data_type(&column_schema.data_type)
.map(|dt| dt.to_string().to_lowercase())
.unwrap_or_else(|_| column_schema.data_type.name());
let column_key = match semantic_type {
SEMANTIC_TYPE_PRIMARY_KEY => PRI_COLUMN_KEY,
SEMANTIC_TYPE_TIME_INDEX => TIME_INDEX_COLUMN_KEY,
_ => EMPTY_STR,
};
let row = [ let row = [
(TABLE_CATALOG, &Value::from(catalog_name)), (TABLE_CATALOG, &Value::from(catalog_name)),
@@ -318,8 +226,6 @@ impl InformationSchemaColumnsBuilder {
(COLUMN_NAME, &Value::from(column_schema.name.as_str())), (COLUMN_NAME, &Value::from(column_schema.name.as_str())),
(DATA_TYPE, &Value::from(data_type.as_str())), (DATA_TYPE, &Value::from(data_type.as_str())),
(SEMANTIC_TYPE, &Value::from(semantic_type)), (SEMANTIC_TYPE, &Value::from(semantic_type)),
(ORDINAL_POSITION, &Value::from((index + 1) as i64)),
(COLUMN_KEY, &Value::from(column_key)),
]; ];
if !predicates.eval(&row) { if !predicates.eval(&row) {
@@ -330,63 +236,7 @@ impl InformationSchemaColumnsBuilder {
self.schema_names.push(Some(schema_name)); self.schema_names.push(Some(schema_name));
self.table_names.push(Some(table_name)); self.table_names.push(Some(table_name));
self.column_names.push(Some(&column_schema.name)); self.column_names.push(Some(&column_schema.name));
// Starts from 1 self.data_types.push(Some(data_type));
self.ordinal_positions.push(Some((index + 1) as i64));
if column_schema.data_type.is_string() {
self.character_maximum_lengths.push(Some(MAX_STRING_LENGTH));
self.character_octet_lengths.push(Some(MAX_STRING_LENGTH));
self.numeric_precisions.push(None);
self.numeric_scales.push(None);
self.datetime_precisions.push(None);
self.character_set_names.push(Some(UTF8_CHARSET_NAME));
self.collation_names.push(Some(UTF8_COLLATE_NAME));
} else if column_schema.data_type.is_numeric() || column_schema.data_type.is_decimal() {
self.character_maximum_lengths.push(None);
self.character_octet_lengths.push(None);
self.numeric_precisions.push(
column_schema
.data_type
.numeric_precision()
.map(|x| x as i64),
);
self.numeric_scales
.push(column_schema.data_type.numeric_scale().map(|x| x as i64));
self.datetime_precisions.push(None);
self.character_set_names.push(None);
self.collation_names.push(None);
} else {
self.character_maximum_lengths.push(None);
self.character_octet_lengths.push(None);
self.numeric_precisions.push(None);
self.numeric_scales.push(None);
match &column_schema.data_type {
ConcreteDataType::DateTime(datetime_type) => {
self.datetime_precisions
.push(Some(datetime_type.precision() as i64));
}
ConcreteDataType::Timestamp(ts_type) => {
self.datetime_precisions
.push(Some(ts_type.precision() as i64));
}
ConcreteDataType::Time(time_type) => {
self.datetime_precisions
.push(Some(time_type.precision() as i64));
}
_ => self.datetime_precisions.push(None),
}
self.character_set_names.push(None);
self.collation_names.push(None);
}
self.column_keys.push(Some(column_key));
self.greptime_data_types
.push(Some(&column_schema.data_type.name()));
self.data_types.push(Some(&data_type));
self.semantic_types.push(Some(semantic_type)); self.semantic_types.push(Some(semantic_type));
self.column_defaults.push( self.column_defaults.push(
column_schema column_schema
@@ -399,52 +249,23 @@ impl InformationSchemaColumnsBuilder {
} else { } else {
self.is_nullables.push(Some("No")); self.is_nullables.push(Some("No"));
} }
self.column_types.push(Some(&data_type)); self.column_types.push(Some(data_type));
self.column_comments self.column_comments
.push(column_schema.column_comment().map(|x| x.as_ref())); .push(column_schema.column_comment().map(|x| x.as_ref()));
} }
fn finish(&mut self) -> Result<RecordBatch> { fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.collation_names.len();
let privileges = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec![DEFAULT_PRIVILEGES])),
rows_num,
));
let empty_string = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec![EMPTY_STR])),
rows_num,
));
let srs_ids = Arc::new(ConstantVector::new(
Arc::new(Int64Vector::from(vec![None])),
rows_num,
));
let columns: Vec<VectorRef> = vec![ let columns: Vec<VectorRef> = vec![
Arc::new(self.catalog_names.finish()), Arc::new(self.catalog_names.finish()),
Arc::new(self.schema_names.finish()), Arc::new(self.schema_names.finish()),
Arc::new(self.table_names.finish()), Arc::new(self.table_names.finish()),
Arc::new(self.column_names.finish()), Arc::new(self.column_names.finish()),
Arc::new(self.ordinal_positions.finish()),
Arc::new(self.character_maximum_lengths.finish()),
Arc::new(self.character_octet_lengths.finish()),
Arc::new(self.numeric_precisions.finish()),
Arc::new(self.numeric_scales.finish()),
Arc::new(self.datetime_precisions.finish()),
Arc::new(self.character_set_names.finish()),
Arc::new(self.collation_names.finish()),
Arc::new(self.column_keys.finish()),
empty_string.clone(),
privileges,
empty_string,
Arc::new(self.greptime_data_types.finish()),
Arc::new(self.data_types.finish()), Arc::new(self.data_types.finish()),
Arc::new(self.semantic_types.finish()), Arc::new(self.semantic_types.finish()),
Arc::new(self.column_defaults.finish()), Arc::new(self.column_defaults.finish()),
Arc::new(self.is_nullables.finish()), Arc::new(self.is_nullables.finish()),
Arc::new(self.column_types.finish()), Arc::new(self.column_types.finish()),
Arc::new(self.column_comments.finish()), Arc::new(self.column_comments.finish()),
srs_ids,
]; ];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu) RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)

View File

@@ -37,23 +37,15 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates}; use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager; use crate::CatalogManager;
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema"; const CONSTRAINT_SCHEMA: &str = "constraint_schema";
pub const CONSTRAINT_NAME: &str = "constraint_name"; const CONSTRAINT_NAME: &str = "constraint_name";
// It's always `def` in MySQL const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_CATALOG: &str = "table_catalog"; const TABLE_SCHEMA: &str = "table_schema";
// The real catalog name for this key column. const TABLE_NAME: &str = "table_name";
pub const REAL_TABLE_CATALOG: &str = "real_table_catalog"; const COLUMN_NAME: &str = "column_name";
pub const TABLE_SCHEMA: &str = "table_schema"; const ORDINAL_POSITION: &str = "ordinal_position";
pub const TABLE_NAME: &str = "table_name";
pub const COLUMN_NAME: &str = "column_name";
pub const ORDINAL_POSITION: &str = "ordinal_position";
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;
/// Primary key constraint name
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
/// Time index constraint name
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`. /// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
pub(super) struct InformationSchemaKeyColumnUsage { pub(super) struct InformationSchemaKeyColumnUsage {
schema: SchemaRef, schema: SchemaRef,
@@ -84,11 +76,6 @@ impl InformationSchemaKeyColumnUsage {
), ),
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false), ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false), ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
REAL_TABLE_CATALOG,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false), ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false), ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false), ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
@@ -171,7 +158,6 @@ struct InformationSchemaKeyColumnUsageBuilder {
constraint_schema: StringVectorBuilder, constraint_schema: StringVectorBuilder,
constraint_name: StringVectorBuilder, constraint_name: StringVectorBuilder,
table_catalog: StringVectorBuilder, table_catalog: StringVectorBuilder,
real_table_catalog: StringVectorBuilder,
table_schema: StringVectorBuilder, table_schema: StringVectorBuilder,
table_name: StringVectorBuilder, table_name: StringVectorBuilder,
column_name: StringVectorBuilder, column_name: StringVectorBuilder,
@@ -193,7 +179,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY), constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY), constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY), table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
real_table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY), table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY), table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY), column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -237,8 +222,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.add_key_column_usage( self.add_key_column_usage(
&predicates, &predicates,
&schema_name, &schema_name,
TIME_INDEX_CONSTRAINT_NAME, "TIME INDEX",
&catalog_name,
&schema_name, &schema_name,
&table_name, &table_name,
&column.name, &column.name,
@@ -247,7 +231,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
} }
if keys.contains(&idx) { if keys.contains(&idx) {
primary_constraints.push(( primary_constraints.push((
catalog_name.clone(),
schema_name.clone(), schema_name.clone(),
table_name.clone(), table_name.clone(),
column.name.clone(), column.name.clone(),
@@ -261,14 +244,13 @@ impl InformationSchemaKeyColumnUsageBuilder {
} }
} }
for (i, (catalog_name, schema_name, table_name, column_name)) in for (i, (schema_name, table_name, column_name)) in
primary_constraints.into_iter().enumerate() primary_constraints.into_iter().enumerate()
{ {
self.add_key_column_usage( self.add_key_column_usage(
&predicates, &predicates,
&schema_name, &schema_name,
PRI_CONSTRAINT_NAME, "PRIMARY",
&catalog_name,
&schema_name, &schema_name,
&table_name, &table_name,
&column_name, &column_name,
@@ -287,7 +269,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
predicates: &Predicates, predicates: &Predicates,
constraint_schema: &str, constraint_schema: &str,
constraint_name: &str, constraint_name: &str,
table_catalog: &str,
table_schema: &str, table_schema: &str,
table_name: &str, table_name: &str,
column_name: &str, column_name: &str,
@@ -296,7 +277,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
let row = [ let row = [
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)), (CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
(CONSTRAINT_NAME, &Value::from(constraint_name)), (CONSTRAINT_NAME, &Value::from(constraint_name)),
(REAL_TABLE_CATALOG, &Value::from(table_catalog)),
(TABLE_SCHEMA, &Value::from(table_schema)), (TABLE_SCHEMA, &Value::from(table_schema)),
(TABLE_NAME, &Value::from(table_name)), (TABLE_NAME, &Value::from(table_name)),
(COLUMN_NAME, &Value::from(column_name)), (COLUMN_NAME, &Value::from(column_name)),
@@ -311,7 +291,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.constraint_schema.push(Some(constraint_schema)); self.constraint_schema.push(Some(constraint_schema));
self.constraint_name.push(Some(constraint_name)); self.constraint_name.push(Some(constraint_name));
self.table_catalog.push(Some("def")); self.table_catalog.push(Some("def"));
self.real_table_catalog.push(Some(table_catalog));
self.table_schema.push(Some(table_schema)); self.table_schema.push(Some(table_schema));
self.table_name.push(Some(table_name)); self.table_name.push(Some(table_name));
self.column_name.push(Some(column_name)); self.column_name.push(Some(column_name));
@@ -331,7 +310,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
Arc::new(self.constraint_schema.finish()), Arc::new(self.constraint_schema.finish()),
Arc::new(self.constraint_name.finish()), Arc::new(self.constraint_name.finish()),
Arc::new(self.table_catalog.finish()), Arc::new(self.table_catalog.finish()),
Arc::new(self.real_table_catalog.finish()),
Arc::new(self.table_schema.finish()), Arc::new(self.table_schema.finish()),
Arc::new(self.table_name.finish()), Arc::new(self.table_name.finish()),
Arc::new(self.column_name.finish()), Arc::new(self.column_name.finish()),

View File

@@ -14,15 +14,13 @@
use std::sync::Arc; use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE}; use common_catalog::consts::MITO_ENGINE;
use datatypes::prelude::{ConcreteDataType, VectorRef}; use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector}; use datatypes::vectors::{Int64Vector, StringVector};
use crate::information_schema::table_names::*; use crate::information_schema::table_names::*;
const NO_VALUE: &str = "NO";
/// Find the schema and columns by the table_name, only valid for memory tables. /// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise. /// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) { pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
@@ -61,15 +59,14 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
"SAVEPOINTS", "SAVEPOINTS",
]), ]),
vec![ vec![
Arc::new(StringVector::from(vec![MITO_ENGINE, METRIC_ENGINE])), Arc::new(StringVector::from(vec![MITO_ENGINE])),
Arc::new(StringVector::from(vec!["DEFAULT", "YES"])), Arc::new(StringVector::from(vec!["DEFAULT"])),
Arc::new(StringVector::from(vec![ Arc::new(StringVector::from(vec![
"Storage engine for time-series data", "Storage engine for time-series data",
"Storage engine for observability scenarios, which is adept at handling a large number of small tables, making it particularly suitable for cloud-native monitoring",
])), ])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])), Arc::new(StringVector::from(vec!["NO"])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])), Arc::new(StringVector::from(vec!["NO"])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])), Arc::new(StringVector::from(vec!["NO"])),
], ],
), ),

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use core::pin::pin;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -32,7 +31,7 @@ use datatypes::vectors::{
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder, ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder, MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
}; };
use futures::{StreamExt, TryStreamExt}; use futures::TryStreamExt;
use partition::manager::PartitionInfo; use partition::manager::PartitionInfo;
use partition::partition::PartitionDef; use partition::partition::PartitionDef;
use snafu::{OptionExt, ResultExt}; use snafu::{OptionExt, ResultExt};
@@ -241,55 +240,32 @@ impl InformationSchemaPartitionsBuilder {
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_info_stream = catalog_manager let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
.tables(&catalog_name, &schema_name)
.await while let Some(table) = stream.try_next().await? {
.try_filter_map(|t| async move { let table_info = table.table_info();
let table_info = t.table_info();
if table_info.table_type == TableType::Temporary { if table_info.table_type == TableType::Temporary {
Ok(None) continue;
} else {
Ok(Some(table_info))
} }
});
const BATCH_SIZE: usize = 128; let table_id = table_info.ident.table_id;
let partitions = if let Some(partition_manager) = &partition_manager {
// Split table infos into chunks
let mut table_info_chunks = pin!(table_info_stream.ready_chunks(BATCH_SIZE));
while let Some(table_infos) = table_info_chunks.next().await {
let table_infos = table_infos.into_iter().collect::<Result<Vec<_>>>()?;
let table_ids: Vec<TableId> =
table_infos.iter().map(|info| info.ident.table_id).collect();
let mut table_partitions = if let Some(partition_manager) = &partition_manager {
partition_manager partition_manager
.batch_find_table_partitions(&table_ids) .find_table_partitions(table_id)
.await .await
.context(FindPartitionsSnafu)? .context(FindPartitionsSnafu {
table: &table_info.name,
})?
} else { } else {
// Current node must be a standalone instance, contains only one partition by default. // Current node must be a standalone instance, contains only one partition by default.
// TODO(dennis): change it when we support multi-regions for standalone. // TODO(dennis): change it when we support multi-regions for standalone.
table_ids
.into_iter()
.map(|table_id| {
(
table_id,
vec![PartitionInfo { vec![PartitionInfo {
id: RegionId::new(table_id, 0), id: RegionId::new(table_id, 0),
partition: PartitionDef::new(vec![], vec![]), partition: PartitionDef::new(vec![], vec![]),
}], }]
)
})
.collect()
}; };
for table_info in table_infos {
let partitions = table_partitions
.remove(&table_info.ident.table_id)
.unwrap_or(vec![]);
self.add_partitions( self.add_partitions(
&predicates, &predicates,
&table_info, &table_info,
@@ -300,7 +276,6 @@ impl InformationSchemaPartitionsBuilder {
); );
} }
} }
}
self.finish() self.finish()
} }

View File

@@ -109,7 +109,11 @@ impl Predicate {
}; };
} }
Predicate::Not(p) => { Predicate::Not(p) => {
return Some(!p.eval(row)?); let Some(b) = p.eval(row) else {
return None;
};
return Some(!b);
} }
} }
@@ -121,7 +125,13 @@ impl Predicate {
fn from_expr(expr: DfExpr) -> Option<Predicate> { fn from_expr(expr: DfExpr) -> Option<Predicate> {
match expr { match expr {
// NOT expr // NOT expr
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))), DfExpr::Not(expr) => {
let Some(p) = Self::from_expr(*expr) else {
return None;
};
Some(Predicate::Not(Box::new(p)))
}
// expr LIKE pattern // expr LIKE pattern
DfExpr::Like(Like { DfExpr::Like(Like {
negated, negated,
@@ -168,15 +178,25 @@ impl Predicate {
} }
// left AND right // left AND right
(left, Operator::And, right) => { (left, Operator::And, right) => {
let left = Self::from_expr(left)?; let Some(left) = Self::from_expr(left) else {
let right = Self::from_expr(right)?; return None;
};
let Some(right) = Self::from_expr(right) else {
return None;
};
Some(Predicate::And(Box::new(left), Box::new(right))) Some(Predicate::And(Box::new(left), Box::new(right)))
} }
// left OR right // left OR right
(left, Operator::Or, right) => { (left, Operator::Or, right) => {
let left = Self::from_expr(left)?; let Some(left) = Self::from_expr(left) else {
let right = Self::from_expr(right)?; return None;
};
let Some(right) = Self::from_expr(right) else {
return None;
};
Some(Predicate::Or(Box::new(left), Box::new(right))) Some(Predicate::Or(Box::new(left), Box::new(right)))
} }

View File

@@ -199,7 +199,7 @@ impl InformationSchemaRegionPeersBuilder {
let table_routes = if let Some(partition_manager) = &partition_manager { let table_routes = if let Some(partition_manager) = &partition_manager {
partition_manager partition_manager
.batch_find_region_routes(&table_ids) .find_region_routes_batch(&table_ids)
.await .await
.context(FindRegionRoutesSnafu)? .context(FindRegionRoutesSnafu)?
} else { } else {

View File

@@ -37,8 +37,8 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates}; use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager; use crate::CatalogManager;
pub const CATALOG_NAME: &str = "catalog_name"; const CATALOG_NAME: &str = "catalog_name";
pub const SCHEMA_NAME: &str = "schema_name"; const SCHEMA_NAME: &str = "schema_name";
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name"; const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
const DEFAULT_COLLATION_NAME: &str = "default_collation_name"; const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;

View File

@@ -1,286 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, MutableVector};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, VectorRef};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use super::{InformationTable, TABLE_CONSTRAINTS};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::key_column_usage::{
PRI_CONSTRAINT_NAME, TIME_INDEX_CONSTRAINT_NAME,
};
use crate::information_schema::Predicates;
use crate::CatalogManager;
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
pub(super) struct InformationSchemaTableConstraints {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
}
const CONSTRAINT_CATALOG: &str = "constraint_catalog";
const CONSTRAINT_SCHEMA: &str = "constraint_schema";
const CONSTRAINT_NAME: &str = "constraint_name";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const CONSTRAINT_TYPE: &str = "constraint_type";
const ENFORCED: &str = "enforced";
const INIT_CAPACITY: usize = 42;
const TIME_INDEX_CONSTRAINT_TYPE: &str = "TIME INDEX";
const PRI_KEY_CONSTRAINT_TYPE: &str = "PRIMARY KEY";
impl InformationSchemaTableConstraints {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(
CONSTRAINT_CATALOG,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
CONSTRAINT_SCHEMA,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(CONSTRAINT_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(ENFORCED, ConcreteDataType::string_datatype(), false),
]))
}
fn builder(&self) -> InformationSchemaTableConstraintsBuilder {
InformationSchemaTableConstraintsBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
)
}
}
impl InformationTable for InformationSchemaTableConstraints {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID
}
fn table_name(&self) -> &'static str {
TABLE_CONSTRAINTS
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_table_constraints(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaTableConstraintsBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
constraint_schemas: StringVectorBuilder,
constraint_names: StringVectorBuilder,
table_schemas: StringVectorBuilder,
table_names: StringVectorBuilder,
constraint_types: StringVectorBuilder,
}
impl InformationSchemaTableConstraintsBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
constraint_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
/// Construct the `information_schema.table_constraints` virtual table
async fn make_table_constraints(
&mut self,
request: Option<ScanRequest>,
) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices;
let schema = table.schema();
if schema.timestamp_index().is_some() {
self.add_table_constraint(
&predicates,
&schema_name,
TIME_INDEX_CONSTRAINT_NAME,
&schema_name,
&table.table_info().name,
TIME_INDEX_CONSTRAINT_TYPE,
);
}
if !keys.is_empty() {
self.add_table_constraint(
&predicates,
&schema_name,
PRI_CONSTRAINT_NAME,
&schema_name,
&table.table_info().name,
PRI_KEY_CONSTRAINT_TYPE,
);
}
}
}
self.finish()
}
fn add_table_constraint(
&mut self,
predicates: &Predicates,
constraint_schema: &str,
constraint_name: &str,
table_schema: &str,
table_name: &str,
constraint_type: &str,
) {
let row = [
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
(CONSTRAINT_NAME, &Value::from(constraint_name)),
(TABLE_SCHEMA, &Value::from(table_schema)),
(TABLE_NAME, &Value::from(table_name)),
(CONSTRAINT_TYPE, &Value::from(constraint_type)),
];
if !predicates.eval(&row) {
return;
}
self.constraint_schemas.push(Some(constraint_schema));
self.constraint_names.push(Some(constraint_name));
self.table_schemas.push(Some(table_schema));
self.table_names.push(Some(table_name));
self.constraint_types.push(Some(constraint_type));
}
fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.constraint_names.len();
let constraint_catalogs = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["def"])),
rows_num,
));
let enforceds = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["YES"])),
rows_num,
));
let columns: Vec<VectorRef> = vec![
constraint_catalogs,
Arc::new(self.constraint_schemas.finish()),
Arc::new(self.constraint_names.finish()),
Arc::new(self.table_schemas.finish()),
Arc::new(self.table_names.finish()),
Arc::new(self.constraint_types.finish()),
enforceds,
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaTableConstraints {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_table_constraints(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -41,4 +41,3 @@ pub const SESSION_STATUS: &str = "session_status";
pub const RUNTIME_METRICS: &str = "runtime_metrics"; pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions"; pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "greptime_region_peers"; pub const REGION_PEERS: &str = "greptime_region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";

View File

@@ -39,10 +39,10 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates}; use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager; use crate::CatalogManager;
pub const TABLE_CATALOG: &str = "table_catalog"; const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema"; const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name"; const TABLE_NAME: &str = "table_name";
pub const TABLE_TYPE: &str = "table_type"; const TABLE_TYPE: &str = "table_type";
const TABLE_ID: &str = "table_id"; const TABLE_ID: &str = "table_id";
const ENGINE: &str = "engine"; const ENGINE: &str = "engine";
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;

View File

@@ -17,6 +17,7 @@ use std::fmt::Debug;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Duration; use std::time::Duration;
use std::usize;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator; use common_meta::cache_invalidator::KvCacheInvalidator;
@@ -81,10 +82,12 @@ impl CachedMetaKvBackendBuilder {
let cache_ttl = self.cache_ttl.unwrap_or(DEFAULT_CACHE_TTL); let cache_ttl = self.cache_ttl.unwrap_or(DEFAULT_CACHE_TTL);
let cache_tti = self.cache_tti.unwrap_or(DEFAULT_CACHE_TTI); let cache_tti = self.cache_tti.unwrap_or(DEFAULT_CACHE_TTI);
let cache = CacheBuilder::new(cache_max_capacity) let cache = Arc::new(
CacheBuilder::new(cache_max_capacity)
.time_to_live(cache_ttl) .time_to_live(cache_ttl)
.time_to_idle(cache_tti) .time_to_idle(cache_tti)
.build(); .build(),
);
let kv_backend = Arc::new(MetaKvBackend { let kv_backend = Arc::new(MetaKvBackend {
client: self.meta_client, client: self.meta_client,
@@ -101,7 +104,7 @@ impl CachedMetaKvBackendBuilder {
} }
} }
pub type CacheBackend = Cache<Vec<u8>, KeyValue>; pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
/// A wrapper of `MetaKvBackend` with cache support. /// A wrapper of `MetaKvBackend` with cache support.
/// ///
@@ -114,7 +117,7 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
/// TTL and TTI for cache. /// TTL and TTI for cache.
pub struct CachedMetaKvBackend { pub struct CachedMetaKvBackend {
kv_backend: KvBackendRef, kv_backend: KvBackendRef,
cache: CacheBackend, cache: CacheBackendRef,
name: String, name: String,
version: AtomicUsize, version: AtomicUsize,
} }
@@ -314,10 +317,12 @@ impl CachedMetaKvBackend {
// only for test // only for test
#[cfg(test)] #[cfg(test)]
fn wrap(kv_backend: KvBackendRef) -> Self { fn wrap(kv_backend: KvBackendRef) -> Self {
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY) let cache = Arc::new(
CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL) .time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI) .time_to_idle(DEFAULT_CACHE_TTI)
.build(); .build(),
);
let name = format!("CachedKvBackend({})", kv_backend.name()); let name = format!("CachedKvBackend({})", kv_backend.name());
Self { Self {
@@ -328,7 +333,7 @@ impl CachedMetaKvBackend {
} }
} }
pub fn cache(&self) -> &CacheBackend { pub fn cache(&self) -> &CacheBackendRef {
&self.cache &self.cache
} }
@@ -363,10 +368,6 @@ impl KvBackend for MetaKvBackend {
"MetaKvBackend" "MetaKvBackend"
} }
fn as_any(&self) -> &dyn Any {
self
}
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> { async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
self.client self.client
.range(req) .range(req)
@@ -375,6 +376,27 @@ impl KvBackend for MetaKvBackend {
.context(ExternalSnafu) .context(ExternalSnafu)
} }
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
key: kv.take_key(),
value: kv.take_value(),
}))
}
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
self.client
.batch_put(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn put(&self, req: PutRequest) -> Result<PutResponse> { async fn put(&self, req: PutRequest) -> Result<PutResponse> {
self.client self.client
.put(req) .put(req)
@@ -383,9 +405,17 @@ impl KvBackend for MetaKvBackend {
.context(ExternalSnafu) .context(ExternalSnafu)
} }
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> { async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
self.client self.client
.batch_put(req) .delete_range(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
self.client
.batch_delete(req)
.await .await
.map_err(BoxedError::new) .map_err(BoxedError::new)
.context(ExternalSnafu) .context(ExternalSnafu)
@@ -410,33 +440,8 @@ impl KvBackend for MetaKvBackend {
.context(ExternalSnafu) .context(ExternalSnafu)
} }
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> { fn as_any(&self) -> &dyn Any {
self.client self
.delete_range(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
self.client
.batch_delete(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
key: kv.take_key(),
value: kv.take_value(),
}))
} }
} }
@@ -505,32 +510,32 @@ mod tests {
} }
async fn range(&self, _req: RangeRequest) -> Result<RangeResponse, Self::Error> { async fn range(&self, _req: RangeRequest) -> Result<RangeResponse, Self::Error> {
unimplemented!() todo!()
} }
async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> { async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
unimplemented!() todo!()
} }
async fn compare_and_put( async fn compare_and_put(
&self, &self,
_req: CompareAndPutRequest, _req: CompareAndPutRequest,
) -> Result<CompareAndPutResponse, Self::Error> { ) -> Result<CompareAndPutResponse, Self::Error> {
unimplemented!() todo!()
} }
async fn delete_range( async fn delete_range(
&self, &self,
_req: DeleteRangeRequest, _req: DeleteRangeRequest,
) -> Result<DeleteRangeResponse, Self::Error> { ) -> Result<DeleteRangeResponse, Self::Error> {
unimplemented!() todo!()
} }
async fn batch_delete( async fn batch_delete(
&self, &self,
_req: BatchDeleteRequest, _req: BatchDeleteRequest,
) -> Result<BatchDeleteResponse, Self::Error> { ) -> Result<BatchDeleteResponse, Self::Error> {
unimplemented!() todo!()
} }
} }

View File

@@ -15,36 +15,32 @@
use std::any::Any; use std::any::Any;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration;
use async_stream::try_stream; use async_stream::try_stream;
use common_catalog::consts::{ use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
};
use common_catalog::format_full_table_name;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator}; use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
use common_meta::instruction::CacheIdent; use common_meta::error::Result as MetaResult;
use common_meta::key::catalog_name::CatalogNameKey; use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey; use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue; use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey; use common_meta::key::table_name::TableNameKey;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::KvBackendRef;
use common_meta::table_name::TableName;
use futures_util::stream::BoxStream; use futures_util::stream::BoxStream;
use futures_util::{StreamExt, TryStreamExt}; use futures_util::{StreamExt, TryStreamExt};
use moka::future::{Cache as AsyncCache, CacheBuilder};
use moka::sync::Cache; use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef}; use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*; use snafu::prelude::*;
use table::dist_table::DistTable; use table::dist_table::DistTable;
use table::metadata::TableId;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::TableRef; use table::TableRef;
use crate::error::Error::{GetTableCache, TableCacheNotGet};
use crate::error::{ use crate::error::{
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result, self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu,
TableCacheNotGetSnafu, TableMetadataManagerSnafu, Result as CatalogResult, TableMetadataManagerSnafu,
}; };
use crate::information_schema::InformationSchemaProvider; use crate::information_schema::InformationSchemaProvider;
use crate::CatalogManager; use crate::CatalogManager;
@@ -56,74 +52,56 @@ use crate::CatalogManager;
/// comes from `SystemCatalog`, which is static and read-only. /// comes from `SystemCatalog`, which is static and read-only.
#[derive(Clone)] #[derive(Clone)]
pub struct KvBackendCatalogManager { pub struct KvBackendCatalogManager {
// TODO(LFC): Maybe use a real implementation for Standalone mode.
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
// is implemented by RaftEngine. Maybe we need a cache for it?
cache_invalidator: CacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef, partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef, table_metadata_manager: TableMetadataManagerRef,
/// A sub-CatalogManager that handles system tables /// A sub-CatalogManager that handles system tables
system_catalog: SystemCatalog, system_catalog: SystemCatalog,
table_cache: AsyncCache<String, TableRef>,
} }
struct TableCacheInvalidator { fn make_table(table_info_value: TableInfoValue) -> CatalogResult<TableRef> {
table_cache: AsyncCache<String, TableRef>, let table_info = table_info_value
} .table_info
.try_into()
impl TableCacheInvalidator { .context(catalog_err::InvalidTableInfoInCatalogSnafu)?;
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self { Ok(DistTable::table(Arc::new(table_info)))
Self { table_cache }
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl CacheInvalidator for TableCacheInvalidator { impl CacheInvalidator for KvBackendCatalogManager {
async fn invalidate( async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
&self, self.cache_invalidator
_ctx: &Context, .invalidate_table_id(ctx, table_id)
caches: Vec<CacheIdent>, .await
) -> common_meta::error::Result<()> {
for cache in caches {
if let CacheIdent::TableName(table_name) = cache {
let table_cache_key = format_full_table_name(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
);
self.table_cache.invalidate(&table_cache_key).await;
} }
}
Ok(()) async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_name(ctx, table_name)
.await
} }
} }
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128; const DEFAULT_CACHED_CATALOG: u64 = 128;
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
impl KvBackendCatalogManager { impl KvBackendCatalogManager {
pub async fn new( pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
backend: KvBackendRef,
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
) -> Arc<Self> {
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
.time_to_live(TABLE_CACHE_TTL)
.time_to_idle(TABLE_CACHE_TTI)
.build();
multi_cache_invalidator
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
.await;
Arc::new_cyclic(|me| Self { Arc::new_cyclic(|me| Self {
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())), partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)), table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
cache_invalidator,
system_catalog: SystemCatalog { system_catalog: SystemCatalog {
catalog_manager: me.clone(), catalog_manager: me.clone(),
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY), catalog_cache: Cache::new(DEFAULT_CACHED_CATALOG),
information_schema_provider: Arc::new(InformationSchemaProvider::new( information_schema_provider: Arc::new(InformationSchemaProvider::new(
DEFAULT_CATALOG_NAME.to_string(), // The catalog name is not used in system_catalog, so let it empty
String::default(),
me.clone(), me.clone(),
)), )),
}, },
table_cache,
}) })
} }
@@ -142,11 +120,12 @@ impl CatalogManager for KvBackendCatalogManager {
self self
} }
async fn catalog_names(&self) -> Result<Vec<String>> { async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
let stream = self let stream = self
.table_metadata_manager .table_metadata_manager
.catalog_manager() .catalog_manager()
.catalog_names(); .catalog_names()
.await;
let keys = stream let keys = stream
.try_collect::<Vec<_>>() .try_collect::<Vec<_>>()
@@ -157,11 +136,12 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys) Ok(keys)
} }
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> { async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
let stream = self let stream = self
.table_metadata_manager .table_metadata_manager
.schema_manager() .schema_manager()
.schema_names(catalog); .schema_names(catalog)
.await;
let mut keys = stream let mut keys = stream
.try_collect::<BTreeSet<_>>() .try_collect::<BTreeSet<_>>()
.await .await
@@ -173,11 +153,12 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys.into_iter().collect()) Ok(keys.into_iter().collect())
} }
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> { async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
let stream = self let stream = self
.table_metadata_manager .table_metadata_manager
.table_name_manager() .table_name_manager()
.tables(catalog, schema); .tables(catalog, schema)
.await;
let mut tables = stream let mut tables = stream
.try_collect::<Vec<_>>() .try_collect::<Vec<_>>()
.await .await
@@ -191,7 +172,7 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(tables.into_iter().collect()) Ok(tables.into_iter().collect())
} }
async fn catalog_exists(&self, catalog: &str) -> Result<bool> { async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
self.table_metadata_manager self.table_metadata_manager
.catalog_manager() .catalog_manager()
.exists(CatalogNameKey::new(catalog)) .exists(CatalogNameKey::new(catalog))
@@ -199,7 +180,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu) .context(TableMetadataManagerSnafu)
} }
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> { async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
if self.system_catalog.schema_exist(schema) { if self.system_catalog.schema_exist(schema) {
return Ok(true); return Ok(true);
} }
@@ -211,7 +192,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu) .context(TableMetadataManagerSnafu)
} }
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> { async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
if self.system_catalog.table_exist(schema, table) { if self.system_catalog.table_exist(schema, table) {
return Ok(true); return Ok(true);
} }
@@ -230,24 +211,20 @@ impl CatalogManager for KvBackendCatalogManager {
catalog: &str, catalog: &str,
schema: &str, schema: &str,
table_name: &str, table_name: &str,
) -> Result<Option<TableRef>> { ) -> CatalogResult<Option<TableRef>> {
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) { if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
return Ok(Some(table)); return Ok(Some(table));
} }
let init = async { let key = TableNameKey::new(catalog, schema, table_name);
let table_name_key = TableNameKey::new(catalog, schema, table_name);
let Some(table_name_value) = self let Some(table_name_value) = self
.table_metadata_manager .table_metadata_manager
.table_name_manager() .table_name_manager()
.get(table_name_key) .get(key)
.await .await
.context(TableMetadataManagerSnafu)? .context(TableMetadataManagerSnafu)?
else { else {
return TableCacheNotGetSnafu { return Ok(None);
key: table_name_key.to_string(),
}
.fail();
}; };
let table_id = table_name_value.table_id(); let table_id = table_name_value.table_id();
@@ -259,35 +236,16 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)? .context(TableMetadataManagerSnafu)?
.map(|v| v.into_inner()) .map(|v| v.into_inner())
else { else {
return TableCacheNotGetSnafu { return Ok(None);
key: table_name_key.to_string(),
}
.fail();
}; };
build_table(table_info_value) make_table(table_info_value).map(Some)
};
match self
.table_cache
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
.await
{
Ok(table) => Ok(Some(table)),
Err(err) => match err.as_ref() {
TableCacheNotGet { .. } => Ok(None),
_ => Err(err),
},
}
.map_err(|err| GetTableCache {
err_msg: err.to_string(),
})
} }
async fn tables<'a>( async fn tables<'a>(
&'a self, &'a self,
catalog: &'a str, catalog: &'a str,
schema: &'a str, schema: &'a str,
) -> BoxStream<'a, Result<TableRef>> { ) -> BoxStream<'a, CatalogResult<TableRef>> {
let sys_tables = try_stream!({ let sys_tables = try_stream!({
// System tables // System tables
let sys_table_names = self.system_catalog.table_names(schema); let sys_table_names = self.system_catalog.table_names(schema);
@@ -302,6 +260,7 @@ impl CatalogManager for KvBackendCatalogManager {
.table_metadata_manager .table_metadata_manager
.table_name_manager() .table_name_manager()
.tables(catalog, schema) .tables(catalog, schema)
.await
.map_ok(|(_, v)| v.table_id()); .map_ok(|(_, v)| v.table_id());
const BATCH_SIZE: usize = 128; const BATCH_SIZE: usize = 128;
let user_tables = try_stream!({ let user_tables = try_stream!({
@@ -311,7 +270,7 @@ impl CatalogManager for KvBackendCatalogManager {
while let Some(table_ids) = table_id_chunks.next().await { while let Some(table_ids) = table_id_chunks.next().await {
let table_ids = table_ids let table_ids = table_ids
.into_iter() .into_iter()
.collect::<std::result::Result<Vec<_>, _>>() .collect::<Result<Vec<_>, _>>()
.map_err(BoxedError::new) .map_err(BoxedError::new)
.context(ListTablesSnafu { catalog, schema })?; .context(ListTablesSnafu { catalog, schema })?;
@@ -323,7 +282,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)?; .context(TableMetadataManagerSnafu)?;
for table_info_value in table_info_values.into_values() { for table_info_value in table_info_values.into_values() {
yield build_table(table_info_value)?; yield make_table(table_info_value)?;
} }
} }
}); });
@@ -332,14 +291,6 @@ impl CatalogManager for KvBackendCatalogManager {
} }
} }
fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
let table_info = table_info_value
.table_info
.try_into()
.context(InvalidTableInfoInCatalogSnafu)?;
Ok(DistTable::table(Arc::new(table_info)))
}
// TODO: This struct can hold a static map of all system tables when // TODO: This struct can hold a static map of all system tables when
// the upper layer (e.g., procedure) can inform the catalog manager // the upper layer (e.g., procedure) can inform the catalog manager
// a new catalog is created. // a new catalog is created.

View File

@@ -19,10 +19,10 @@ use std::any::Any;
use std::fmt::{Debug, Formatter}; use std::fmt::{Debug, Formatter};
use std::sync::Arc; use std::sync::Arc;
use api::v1::CreateTableExpr;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures_util::stream::BoxStream; use futures_util::stream::BoxStream;
use table::metadata::TableId; use table::metadata::TableId;
use table::requests::CreateTableRequest;
use table::TableRef; use table::TableRef;
use crate::error::Result; use crate::error::Result;
@@ -75,9 +75,9 @@ pub type OpenSystemTableHook =
/// Register system table request: /// Register system table request:
/// - When system table is already created and registered, the hook will be called /// - When system table is already created and registered, the hook will be called
/// with table ref after opening the system table /// with table ref after opening the system table
/// - When system table is not exists, create and register the table by `create_table_expr` and calls `open_hook` with the created table. /// - When system table is not exists, create and register the table by create_table_request and calls open_hook with the created table.
pub struct RegisterSystemTableRequest { pub struct RegisterSystemTableRequest {
pub create_table_expr: CreateTableExpr, pub create_table_request: CreateTableRequest,
pub open_hook: Option<OpenSystemTableHook>, pub open_hook: Option<OpenSystemTableHook>,
} }

View File

@@ -49,7 +49,10 @@ impl DfTableSourceProvider {
} }
} }
pub fn resolve_table_ref(&self, table_ref: TableReference) -> Result<ResolvedTableReference> { pub fn resolve_table_ref<'a>(
&'a self,
table_ref: TableReference<'a>,
) -> Result<ResolvedTableReference<'a>> {
if self.disallow_cross_catalog_query { if self.disallow_cross_catalog_query {
match &table_ref { match &table_ref {
TableReference::Bare { .. } => (), TableReference::Bare { .. } => (),
@@ -73,7 +76,7 @@ impl DfTableSourceProvider {
pub async fn resolve_table( pub async fn resolve_table(
&mut self, &mut self,
table_ref: TableReference, table_ref: TableReference<'_>,
) -> Result<Arc<dyn TableSource>> { ) -> Result<Arc<dyn TableSource>> {
let table_ref = self.resolve_table_ref(table_ref)?; let table_ref = self.resolve_table_ref(table_ref)?;
@@ -103,6 +106,8 @@ impl DfTableSourceProvider {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::borrow::Cow;
use session::context::QueryContext; use session::context::QueryContext;
use super::*; use super::*;
@@ -115,37 +120,68 @@ mod tests {
let table_provider = let table_provider =
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx); DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
let table_ref = TableReference::bare("table_name"); let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref); let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok()); assert!(result.is_ok());
let table_ref = TableReference::partial("public", "table_name"); let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref); let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok()); assert!(result.is_ok());
let table_ref = TableReference::partial("wrong_schema", "table_name"); let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref); let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok()); assert!(result.is_ok());
let table_ref = TableReference::full("greptime", "public", "table_name"); let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref); let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok()); assert!(result.is_ok());
let table_ref = TableReference::full("wrong_catalog", "public", "table_name"); let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref); let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err()); assert!(result.is_err());
let table_ref = TableReference::partial("information_schema", "columns"); let table_ref = TableReference::Partial {
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
let result = table_provider.resolve_table_ref(table_ref); let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok()); assert!(result.is_ok());
let table_ref = TableReference::full("greptime", "information_schema", "columns"); let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
assert!(table_provider.resolve_table_ref(table_ref).is_ok()); assert!(table_provider.resolve_table_ref(table_ref).is_ok());
let table_ref = TableReference::full("dummy", "information_schema", "columns"); let table_ref = TableReference::Full {
catalog: Cow::Borrowed("dummy"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
assert!(table_provider.resolve_table_ref(table_ref).is_err()); assert!(table_provider.resolve_table_ref(table_ref).is_err());
let table_ref = TableReference::full("greptime", "greptime_private", "columns"); let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("greptime_private"),
table: Cow::Borrowed("columns"),
};
assert!(table_provider.resolve_table_ref(table_ref).is_ok()); assert!(table_provider.resolve_table_ref(table_ref).is_ok());
} }
} }

View File

@@ -7,15 +7,13 @@ license.workspace = true
[features] [features]
testing = [] testing = []
[lints]
workspace = true
[dependencies] [dependencies]
api.workspace = true api.workspace = true
arc-swap = "1.6" arc-swap = "1.6"
arrow-flight.workspace = true arrow-flight.workspace = true
async-stream.workspace = true async-stream.workspace = true
async-trait.workspace = true async-trait.workspace = true
common-base.workspace = true
common-catalog.workspace = true common-catalog.workspace = true
common-error.workspace = true common-error.workspace = true
common-grpc.workspace = true common-grpc.workspace = true
@@ -24,6 +22,10 @@ common-meta.workspace = true
common-query.workspace = true common-query.workspace = true
common-recordbatch.workspace = true common-recordbatch.workspace = true
common-telemetry.workspace = true common-telemetry.workspace = true
common-time.workspace = true
datafusion.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
enum_dispatch = "0.3" enum_dispatch = "0.3"
futures-util.workspace = true futures-util.workspace = true
lazy_static.workspace = true lazy_static.workspace = true
@@ -32,7 +34,7 @@ parking_lot = "0.12"
prometheus.workspace = true prometheus.workspace = true
prost.workspace = true prost.workspace = true
rand.workspace = true rand.workspace = true
serde_json.workspace = true session.workspace = true
snafu.workspace = true snafu.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] } tokio-stream = { workspace = true, features = ["net"] }

View File

@@ -37,8 +37,6 @@ use snafu::{ensure, ResultExt};
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu}; use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter}; use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter};
pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct Database { pub struct Database {
// The "catalog" and "schema" to be used in processing the requests at the server side. // The "catalog" and "schema" to be used in processing the requests at the server side.
@@ -217,7 +215,6 @@ impl Database {
start: start.to_string(), start: start.to_string(),
end: end.to_string(), end: end.to_string(),
step: step.to_string(), step: step.to_string(),
lookback: DEFAULT_LOOKBACK_STRING.to_string(),
})), })),
})) }))
.await .await
@@ -310,7 +307,7 @@ impl Database {
reason: "Expect 'AffectedRows' Flight messages to be the one and the only!" reason: "Expect 'AffectedRows' Flight messages to be the one and the only!"
} }
); );
Ok(Output::new_with_affected_rows(rows)) Ok(Output::AffectedRows(rows))
} }
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => { FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
IllegalFlightMessagesSnafu { IllegalFlightMessagesSnafu {
@@ -343,7 +340,7 @@ impl Database {
output_ordering: None, output_ordering: None,
metrics: Default::default(), metrics: Default::default(),
}; };
Ok(Output::new_with_stream(Box::pin(record_batch_stream))) Ok(Output::Stream(Box::pin(record_batch_stream)))
} }
} }
} }

View File

@@ -134,17 +134,10 @@ impl From<Status> for Error {
impl Error { impl Error {
pub fn should_retry(&self) -> bool { pub fn should_retry(&self) -> bool {
// TODO(weny): figure out each case of these codes. !matches!(
matches!(
self, self,
Self::RegionServer { Self::RegionServer {
code: Code::Cancelled, code: Code::InvalidArgument,
..
} | Self::RegionServer {
code: Code::DeadlineExceeded,
..
} | Self::RegionServer {
code: Code::Unavailable,
.. ..
} }
) )

View File

@@ -26,7 +26,7 @@ use api::v1::greptime_response::Response;
use api::v1::{AffectedRows, GreptimeResponse}; use api::v1::{AffectedRows, GreptimeResponse};
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::status_code::StatusCode; use common_error::status_code::StatusCode;
pub use common_query::{Output, OutputData, OutputMeta}; pub use common_query::Output;
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream}; pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt; use snafu::OptionExt;

View File

@@ -14,8 +14,7 @@
use std::sync::Arc; use std::sync::Arc;
use api::region::RegionResponse; use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::ResponseHeader; use api::v1::ResponseHeader;
use arc_swap::ArcSwapOption; use arc_swap::ArcSwapOption;
use arrow_flight::Ticket; use arrow_flight::Ticket;
@@ -24,7 +23,7 @@ use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt}; use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode; use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage}; use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::datanode_manager::Datanode; use common_meta::datanode_manager::{AffectedRows, Datanode};
use common_meta::error::{self as meta_error, Result as MetaResult}; use common_meta::error::{self as meta_error, Result as MetaResult};
use common_recordbatch::error::ExternalSnafu; use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream}; use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
@@ -47,7 +46,7 @@ pub struct RegionRequester {
#[async_trait] #[async_trait]
impl Datanode for RegionRequester { impl Datanode for RegionRequester {
async fn handle(&self, request: RegionRequest) -> MetaResult<RegionResponse> { async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
self.handle_inner(request).await.map_err(|err| { self.handle_inner(request).await.map_err(|err| {
if err.should_retry() { if err.should_retry() {
meta_error::Error::RetryLater { meta_error::Error::RetryLater {
@@ -124,8 +123,8 @@ impl RegionRequester {
.fail(); .fail();
}; };
let metrics = Arc::new(ArcSwapOption::from(None)); let metrics_str = Arc::new(ArcSwapOption::from(None));
let metrics_ref = metrics.clone(); let ref_str = metrics_str.clone();
let tracing_context = TracingContext::from_current_span(); let tracing_context = TracingContext::from_current_span();
@@ -141,8 +140,7 @@ impl RegionRequester {
match flight_message { match flight_message {
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch), FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
FlightMessage::Metrics(s) => { FlightMessage::Metrics(s) => {
let m = serde_json::from_str(&s).ok().map(Arc::new); ref_str.swap(Some(Arc::new(s)));
metrics_ref.swap(m);
break; break;
} }
_ => { _ => {
@@ -161,12 +159,12 @@ impl RegionRequester {
schema, schema,
stream, stream,
output_ordering: None, output_ordering: None,
metrics, metrics: metrics_str,
}; };
Ok(Box::pin(record_batch_stream)) Ok(Box::pin(record_batch_stream))
} }
async fn handle_inner(&self, request: RegionRequest) -> Result<RegionResponse> { async fn handle_inner(&self, request: RegionRequest) -> Result<AffectedRows> {
let request_type = request let request_type = request
.body .body
.as_ref() .as_ref()
@@ -179,7 +177,10 @@ impl RegionRequester {
let mut client = self.client.raw_region_client()?; let mut client = self.client.raw_region_client()?;
let response = client let RegionResponse {
header,
affected_rows,
} = client
.handle(request) .handle(request)
.await .await
.map_err(|e| { .map_err(|e| {
@@ -193,20 +194,19 @@ impl RegionRequester {
})? })?
.into_inner(); .into_inner();
check_response_header(&response.header)?; check_response_header(header)?;
Ok(RegionResponse::from_region_response(response)) Ok(affected_rows)
} }
pub async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> { pub async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
self.handle_inner(request).await self.handle_inner(request).await
} }
} }
pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> { pub fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
let status = header let status = header
.as_ref() .and_then(|header| header.status)
.and_then(|header| header.status.as_ref())
.context(IllegalDatabaseResponseSnafu { .context(IllegalDatabaseResponseSnafu {
err_msg: "either response header or status is missing", err_msg: "either response header or status is missing",
})?; })?;
@@ -220,7 +220,7 @@ pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> {
})?; })?;
ServerSnafu { ServerSnafu {
code, code,
msg: status.err_msg.clone(), msg: status.err_msg,
} }
.fail() .fail()
} }
@@ -235,19 +235,19 @@ mod test {
#[test] #[test]
fn test_check_response_header() { fn test_check_response_header() {
let result = check_response_header(&None); let result = check_response_header(None);
assert!(matches!( assert!(matches!(
result.unwrap_err(), result.unwrap_err(),
IllegalDatabaseResponse { .. } IllegalDatabaseResponse { .. }
)); ));
let result = check_response_header(&Some(ResponseHeader { status: None })); let result = check_response_header(Some(ResponseHeader { status: None }));
assert!(matches!( assert!(matches!(
result.unwrap_err(), result.unwrap_err(),
IllegalDatabaseResponse { .. } IllegalDatabaseResponse { .. }
)); ));
let result = check_response_header(&Some(ResponseHeader { let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus { status: Some(PbStatus {
status_code: StatusCode::Success as u32, status_code: StatusCode::Success as u32,
err_msg: String::default(), err_msg: String::default(),
@@ -255,7 +255,7 @@ mod test {
})); }));
assert!(result.is_ok()); assert!(result.is_ok());
let result = check_response_header(&Some(ResponseHeader { let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus { status: Some(PbStatus {
status_code: u32::MAX, status_code: u32::MAX,
err_msg: String::default(), err_msg: String::default(),
@@ -266,7 +266,7 @@ mod test {
IllegalDatabaseResponse { .. } IllegalDatabaseResponse { .. }
)); ));
let result = check_response_header(&Some(ResponseHeader { let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus { status: Some(PbStatus {
status_code: StatusCode::Internal as u32, status_code: StatusCode::Internal as u32,
err_msg: "blabla".to_string(), err_msg: "blabla".to_string(),

View File

@@ -12,10 +12,8 @@ path = "src/bin/greptime.rs"
[features] [features]
tokio-console = ["common-telemetry/tokio-console"] tokio-console = ["common-telemetry/tokio-console"]
[lints]
workspace = true
[dependencies] [dependencies]
anymap = "1.0.0-beta.2"
async-trait.workspace = true async-trait.workspace = true
auth.workspace = true auth.workspace = true
catalog.workspace = true catalog.workspace = true
@@ -51,6 +49,7 @@ meta-client.workspace = true
meta-srv.workspace = true meta-srv.workspace = true
mito2.workspace = true mito2.workspace = true
nu-ansi-term = "0.46" nu-ansi-term = "0.46"
partition.workspace = true
plugins.workspace = true plugins.workspace = true
prometheus.workspace = true prometheus.workspace = true
prost.workspace = true prost.workspace = true
@@ -76,7 +75,6 @@ tikv-jemallocator = "0.5"
common-test-util.workspace = true common-test-util.workspace = true
serde.workspace = true serde.workspace = true
temp-env = "0.3" temp-env = "0.3"
tempfile.workspace = true
[target.'cfg(not(windows))'.dev-dependencies] [target.'cfg(not(windows))'.dev-dependencies]
rexpect = "0.5" rexpect = "0.5"

View File

@@ -13,8 +13,5 @@
// limitations under the License. // limitations under the License.
fn main() { fn main() {
// Trigger this script if the git branch/commit changes
println!("cargo:rerun-if-changed=.git/refs/heads");
common_version::setup_build_info(); common_version::setup_build_info();
} }

View File

@@ -84,10 +84,10 @@ impl Command {
let mut logging_opts = LoggingOptions::default(); let mut logging_opts = LoggingOptions::default();
if let Some(dir) = &cli_options.log_dir { if let Some(dir) = &cli_options.log_dir {
logging_opts.dir.clone_from(dir); logging_opts.dir = dir.clone();
} }
logging_opts.level.clone_from(&cli_options.log_level); logging_opts.level = cli_options.log_level.clone();
Ok(Options::Cli(Box::new(logging_opts))) Ok(Options::Cli(Box::new(logging_opts)))
} }

View File

@@ -62,9 +62,7 @@ pub struct BenchTableMetadataCommand {
impl BenchTableMetadataCommand { impl BenchTableMetadataCommand {
pub async fn build(&self) -> Result<Instance> { pub async fn build(&self) -> Result<Instance> {
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128) let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
.await
.unwrap();
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store)); let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));

View File

@@ -106,12 +106,9 @@ impl TableMetadataBencher {
.await .await
.unwrap(); .unwrap();
let start = Instant::now(); let start = Instant::now();
let table_info = table_info.unwrap();
let table_route = table_route.unwrap();
let table_id = table_info.table_info.ident.table_id;
let _ = self let _ = self
.table_metadata_manager .table_metadata_manager
.delete_table_metadata(table_id, &table_info.table_name(), &table_route) .delete_table_metadata(&table_info.unwrap(), &table_route.unwrap())
.await; .await;
start.elapsed() start.elapsed()
}, },
@@ -137,7 +134,7 @@ impl TableMetadataBencher {
let start = Instant::now(); let start = Instant::now();
let _ = self let _ = self
.table_metadata_manager .table_metadata_manager
.rename_table(&table_info.unwrap(), new_table_name) .rename_table(table_info.unwrap(), new_table_name)
.await; .await;
start.elapsed() start.elapsed()

View File

@@ -19,7 +19,8 @@ use async_trait::async_trait;
use clap::{Parser, ValueEnum}; use clap::{Parser, ValueEnum};
use client::api::v1::auth_header::AuthScheme; use client::api::v1::auth_header::AuthScheme;
use client::api::v1::Basic; use client::api::v1::Basic;
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME}; use client::{Client, Database, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util::collect; use common_recordbatch::util::collect;
use common_telemetry::{debug, error, info, warn}; use common_telemetry::{debug, error, info, warn};
use datatypes::scalars::ScalarVector; use datatypes::scalars::ScalarVector;
@@ -141,7 +142,7 @@ impl Export {
.with_context(|_| RequestDatabaseSnafu { .with_context(|_| RequestDatabaseSnafu {
sql: "show databases".to_string(), sql: "show databases".to_string(),
})?; })?;
let OutputData::Stream(stream) = result.data else { let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()? NotDataFromOutputSnafu.fail()?
}; };
let record_batch = collect(stream) let record_batch = collect(stream)
@@ -182,7 +183,7 @@ impl Export {
.sql(&sql) .sql(&sql)
.await .await
.with_context(|_| RequestDatabaseSnafu { sql })?; .with_context(|_| RequestDatabaseSnafu { sql })?;
let OutputData::Stream(stream) = result.data else { let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()? NotDataFromOutputSnafu.fail()?
}; };
let Some(record_batch) = collect(stream) let Some(record_batch) = collect(stream)
@@ -226,10 +227,7 @@ impl Export {
} }
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> { async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
let sql = format!( let sql = format!("show create table {}.{}.{}", catalog, schema, table);
r#"show create table "{}"."{}"."{}""#,
catalog, schema, table
);
let mut client = self.client.clone(); let mut client = self.client.clone();
client.set_catalog(catalog); client.set_catalog(catalog);
client.set_schema(schema); client.set_schema(schema);
@@ -237,7 +235,7 @@ impl Export {
.sql(&sql) .sql(&sql)
.await .await
.with_context(|_| RequestDatabaseSnafu { sql })?; .with_context(|_| RequestDatabaseSnafu { sql })?;
let OutputData::Stream(stream) = result.data else { let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()? NotDataFromOutputSnafu.fail()?
}; };
let record_batch = collect(stream) let record_batch = collect(stream)
@@ -276,7 +274,7 @@ impl Export {
for (c, s, t) in table_list { for (c, s, t) in table_list {
match self.show_create_table(&c, &s, &t).await { match self.show_create_table(&c, &s, &t).await {
Err(e) => { Err(e) => {
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t) error!(e; "Failed to export table {}.{}.{}", c, s, t)
} }
Ok(create_table) => { Ok(create_table) => {
file.write_all(create_table.as_bytes()) file.write_all(create_table.as_bytes())
@@ -420,84 +418,3 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
Ok((catalog.to_string(), Some(schema.to_string()))) Ok((catalog.to_string(), Some(schema.to_string())))
} }
} }
#[cfg(test)]
mod tests {
use clap::Parser;
use client::{Client, Database};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use crate::error::Result;
use crate::options::{CliOptions, Options};
use crate::{cli, standalone, App};
#[tokio::test(flavor = "multi_thread")]
async fn test_export_create_table_with_quoted_names() -> Result<()> {
let output_dir = tempfile::tempdir().unwrap();
let standalone = standalone::Command::parse_from([
"standalone",
"start",
"--data-home",
&*output_dir.path().to_string_lossy(),
]);
let Options::Standalone(standalone_opts) =
standalone.load_options(&CliOptions::default())?
else {
unreachable!()
};
let mut instance = standalone.build(*standalone_opts).await?;
instance.start().await?;
let client = Client::with_urls(["127.0.0.1:4001"]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
database
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
.await
.unwrap();
database
.sql(
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
ts TIMESTAMP,
TIME INDEX (ts)
) engine=mito;
"#,
)
.await
.unwrap();
let output_dir = tempfile::tempdir().unwrap();
let cli = cli::Command::parse_from([
"cli",
"export",
"--addr",
"127.0.0.1:4001",
"--output-dir",
&*output_dir.path().to_string_lossy(),
"--target",
"create-table",
]);
let mut cli_app = cli.build().await?;
cli_app.start().await?;
instance.stop().await?;
let output_file = output_dir
.path()
.join("greptime-cli.export.create_table.sql");
let res = std::fs::read_to_string(output_file).unwrap();
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
"ts" TIMESTAMP(3) NOT NULL,
TIME INDEX ("ts")
)
ENGINE=mito
WITH(
regions = 1
);
"#;
assert_eq!(res.trim(), expect.trim());
Ok(())
}
}

View File

@@ -19,10 +19,9 @@ use std::time::Instant;
use catalog::kvbackend::{ use catalog::kvbackend::{
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
}; };
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins; use common_base::Plugins;
use common_error::ext::ErrorExt; use common_error::ext::ErrorExt;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_query::Output; use common_query::Output;
use common_recordbatch::RecordBatches; use common_recordbatch::RecordBatches;
use common_telemetry::logging; use common_telemetry::logging;
@@ -185,15 +184,15 @@ impl Repl {
} }
.context(RequestDatabaseSnafu { sql: &sql })?; .context(RequestDatabaseSnafu { sql: &sql })?;
let either = match output.data { let either = match output {
OutputData::Stream(s) => { Output::Stream(s) => {
let x = RecordBatches::try_collect(s) let x = RecordBatches::try_collect(s)
.await .await
.context(CollectRecordBatchesSnafu)?; .context(CollectRecordBatchesSnafu)?;
Either::Left(x) Either::Left(x)
} }
OutputData::RecordBatches(x) => Either::Left(x), Output::RecordBatches(x) => Either::Left(x),
OutputData::AffectedRows(rows) => Either::Right(rows), Output::AffectedRows(rows) => Either::Right(rows),
}; };
let end = Instant::now(); let end = Instant::now();
@@ -253,17 +252,14 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend = let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build()); Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_list = let catalog_list =
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await; KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
let plugins: Plugins = Default::default(); let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new( let state = Arc::new(QueryEngineState::new(
catalog_list, catalog_list,
None, None,
None, None,
None,
false, false,
plugins.clone(), plugins.clone(),
)); ));

View File

@@ -70,7 +70,7 @@ impl UpgradeCommand {
etcd_addr: &self.etcd_addr, etcd_addr: &self.etcd_addr,
})?; })?;
let tool = MigrateTableMetadata { let tool = MigrateTableMetadata {
etcd_store: EtcdStore::with_etcd_client(client, 128), etcd_store: EtcdStore::with_etcd_client(client),
dryrun: self.dryrun, dryrun: self.dryrun,
skip_catalog_keys: self.skip_catalog_keys, skip_catalog_keys: self.skip_catalog_keys,
skip_table_global_keys: self.skip_table_global_keys, skip_table_global_keys: self.skip_table_global_keys,

View File

@@ -43,10 +43,6 @@ impl Instance {
pub fn datanode_mut(&mut self) -> &mut Datanode { pub fn datanode_mut(&mut self) -> &mut Datanode {
&mut self.datanode &mut self.datanode
} }
pub fn datanode(&self) -> &Datanode {
&self.datanode
}
} }
#[async_trait] #[async_trait]
@@ -139,19 +135,19 @@ impl StartCommand {
)?; )?;
if let Some(dir) = &cli_options.log_dir { if let Some(dir) = &cli_options.log_dir {
opts.logging.dir.clone_from(dir); opts.logging.dir = dir.clone();
} }
if cli_options.log_level.is_some() { if cli_options.log_level.is_some() {
opts.logging.level.clone_from(&cli_options.log_level); opts.logging.level = cli_options.log_level.clone();
} }
if let Some(addr) = &self.rpc_addr { if let Some(addr) = &self.rpc_addr {
opts.rpc_addr.clone_from(addr); opts.rpc_addr = addr.clone();
} }
if self.rpc_hostname.is_some() { if self.rpc_hostname.is_some() {
opts.rpc_hostname.clone_from(&self.rpc_hostname); opts.rpc_hostname = self.rpc_hostname.clone();
} }
if let Some(node_id) = self.node_id { if let Some(node_id) = self.node_id {
@@ -161,8 +157,7 @@ impl StartCommand {
if let Some(metasrv_addrs) = &self.metasrv_addr { if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client opts.meta_client
.get_or_insert_with(MetaClientOptions::default) .get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs .metasrv_addrs = metasrv_addrs.clone();
.clone_from(metasrv_addrs);
opts.mode = Mode::Distributed; opts.mode = Mode::Distributed;
} }
@@ -174,7 +169,7 @@ impl StartCommand {
} }
if let Some(data_home) = &self.data_home { if let Some(data_home) = &self.data_home {
opts.storage.data_home.clone_from(data_home); opts.storage.data_home = data_home.clone();
} }
// `wal_dir` only affects raft-engine config. // `wal_dir` only affects raft-engine config.
@@ -192,7 +187,7 @@ impl StartCommand {
} }
if let Some(http_addr) = &self.http_addr { if let Some(http_addr) = &self.http_addr {
opts.http.addr.clone_from(http_addr); opts.http.addr = http_addr.clone();
} }
if let Some(http_timeout) = self.http_timeout { if let Some(http_timeout) = self.http_timeout {
@@ -240,7 +235,6 @@ impl StartCommand {
.with_default_grpc_server(&datanode.region_server()) .with_default_grpc_server(&datanode.region_server())
.enable_http_service() .enable_http_service()
.build() .build()
.await
.context(StartDatanodeSnafu)?; .context(StartDatanodeSnafu)?;
datanode.setup_services(services); datanode.setup_services(services);

View File

@@ -16,10 +16,9 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use async_trait::async_trait; use async_trait::async_trait;
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager}; use catalog::kvbackend::CachedMetaKvBackendBuilder;
use clap::Parser; use clap::Parser;
use client::client_manager::DatanodeClients; use client::client_manager::DatanodeClients;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler; use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor; use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::logging; use common_telemetry::logging;
@@ -44,17 +43,13 @@ pub struct Instance {
} }
impl Instance { impl Instance {
pub fn new(frontend: FeInstance) -> Self { fn new(frontend: FeInstance) -> Self {
Self { frontend } Self { frontend }
} }
pub fn mut_inner(&mut self) -> &mut FeInstance { pub fn mut_inner(&mut self) -> &mut FeInstance {
&mut self.frontend &mut self.frontend
} }
pub fn inner(&self) -> &FeInstance {
&self.frontend
}
} }
#[async_trait] #[async_trait]
@@ -157,11 +152,11 @@ impl StartCommand {
)?; )?;
if let Some(dir) = &cli_options.log_dir { if let Some(dir) = &cli_options.log_dir {
opts.logging.dir.clone_from(dir); opts.logging.dir = dir.clone();
} }
if cli_options.log_level.is_some() { if cli_options.log_level.is_some() {
opts.logging.level.clone_from(&cli_options.log_level); opts.logging.level = cli_options.log_level.clone();
} }
let tls_opts = TlsOption::new( let tls_opts = TlsOption::new(
@@ -171,7 +166,7 @@ impl StartCommand {
); );
if let Some(addr) = &self.http_addr { if let Some(addr) = &self.http_addr {
opts.http.addr.clone_from(addr); opts.http.addr = addr.clone()
} }
if let Some(http_timeout) = self.http_timeout { if let Some(http_timeout) = self.http_timeout {
@@ -183,24 +178,24 @@ impl StartCommand {
} }
if let Some(addr) = &self.rpc_addr { if let Some(addr) = &self.rpc_addr {
opts.grpc.addr.clone_from(addr); opts.grpc.addr = addr.clone()
} }
if let Some(addr) = &self.mysql_addr { if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true; opts.mysql.enable = true;
opts.mysql.addr.clone_from(addr); opts.mysql.addr = addr.clone();
opts.mysql.tls = tls_opts.clone(); opts.mysql.tls = tls_opts.clone();
} }
if let Some(addr) = &self.postgres_addr { if let Some(addr) = &self.postgres_addr {
opts.postgres.enable = true; opts.postgres.enable = true;
opts.postgres.addr.clone_from(addr); opts.postgres.addr = addr.clone();
opts.postgres.tls = tls_opts; opts.postgres.tls = tls_opts;
} }
if let Some(addr) = &self.opentsdb_addr { if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb.enable = true; opts.opentsdb.enable = true;
opts.opentsdb.addr.clone_from(addr); opts.opentsdb.addr = addr.clone();
} }
if let Some(enable) = self.influxdb_enable { if let Some(enable) = self.influxdb_enable {
@@ -210,12 +205,11 @@ impl StartCommand {
if let Some(metasrv_addrs) = &self.metasrv_addr { if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client opts.meta_client
.get_or_insert_with(MetaClientOptions::default) .get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs .metasrv_addrs = metasrv_addrs.clone();
.clone_from(metasrv_addrs);
opts.mode = Mode::Distributed; opts.mode = Mode::Distributed;
} }
opts.user_provider.clone_from(&self.user_provider); opts.user_provider = self.user_provider.clone();
Ok(Options::Frontend(Box::new(opts))) Ok(Options::Frontend(Box::new(opts)))
} }
@@ -249,19 +243,11 @@ impl StartCommand {
.cache_tti(cache_tti) .cache_tti(cache_tti)
.build(); .build();
let cached_meta_backend = Arc::new(cached_meta_backend); let cached_meta_backend = Arc::new(cached_meta_backend);
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_manager = KvBackendCatalogManager::new(
cached_meta_backend.clone(),
multi_cache_invalidator.clone(),
)
.await;
let executor = HandlerGroupExecutor::new(vec![ let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler), Arc::new(ParseMailboxMessageHandler),
Arc::new(InvalidateTableCacheHandler::new( Arc::new(InvalidateTableCacheHandler::new(
multi_cache_invalidator.clone(), cached_meta_backend.clone(),
)), )),
]); ]);
@@ -273,12 +259,11 @@ impl StartCommand {
let mut instance = FrontendBuilder::new( let mut instance = FrontendBuilder::new(
cached_meta_backend.clone(), cached_meta_backend.clone(),
catalog_manager,
Arc::new(DatanodeClients::default()), Arc::new(DatanodeClients::default()),
meta_client, meta_client,
) )
.with_cache_invalidator(cached_meta_backend)
.with_plugin(plugins.clone()) .with_plugin(plugins.clone())
.with_cache_invalidator(multi_cache_invalidator)
.with_heartbeat_task(heartbeat_task) .with_heartbeat_task(heartbeat_task)
.try_build() .try_build()
.await .await
@@ -286,7 +271,6 @@ impl StartCommand {
let servers = Services::new(opts.clone(), Arc::new(instance.clone()), plugins) let servers = Services::new(opts.clone(), Arc::new(instance.clone()), plugins)
.build() .build()
.await
.context(StartFrontendSnafu)?; .context(StartFrontendSnafu)?;
instance instance
.build_servers(opts, servers) .build_servers(opts, servers)

View File

@@ -32,11 +32,11 @@ lazy_static::lazy_static! {
} }
#[async_trait] #[async_trait]
pub trait App: Send { pub trait App {
fn name(&self) -> &str; fn name(&self) -> &str;
/// A hook for implementor to make something happened before actual startup. Defaults to no-op. /// A hook for implementor to make something happened before actual startup. Defaults to no-op.
async fn pre_start(&mut self) -> error::Result<()> { fn pre_start(&mut self) -> error::Result<()> {
Ok(()) Ok(())
} }
@@ -46,21 +46,24 @@ pub trait App: Send {
} }
pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> { pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
info!("Starting app: {}", app.name()); let name = app.name().to_string();
app.pre_start().await?; app.pre_start()?;
app.start().await?; tokio::select! {
result = app.start() => {
if let Err(e) = tokio::signal::ctrl_c().await { if let Err(err) = result {
error!("Failed to listen for ctrl-c signal: {}", e); error!(err; "Failed to start app {name}!");
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in }
// the underlying system. So we stop the app instead of running nonetheless to let people }
// investigate the issue. _ = tokio::signal::ctrl_c() => {
if let Err(err) = app.stop().await {
error!(err; "Failed to stop app {name}!");
}
info!("Goodbye!");
}
} }
app.stop().await?;
info!("Goodbye!");
Ok(()) Ok(())
} }

View File

@@ -17,8 +17,8 @@ use std::time::Duration;
use async_trait::async_trait; use async_trait::async_trait;
use clap::Parser; use clap::Parser;
use common_telemetry::logging; use common_telemetry::logging;
use meta_srv::bootstrap::MetasrvInstance; use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetasrvOptions; use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt; use snafu::ResultExt;
use crate::error::{self, Result, StartMetaServerSnafu}; use crate::error::{self, Result, StartMetaServerSnafu};
@@ -26,11 +26,11 @@ use crate::options::{CliOptions, Options};
use crate::App; use crate::App;
pub struct Instance { pub struct Instance {
instance: MetasrvInstance, instance: MetaSrvInstance,
} }
impl Instance { impl Instance {
fn new(instance: MetasrvInstance) -> Self { fn new(instance: MetaSrvInstance) -> Self {
Self { instance } Self { instance }
} }
} }
@@ -42,7 +42,7 @@ impl App for Instance {
} }
async fn start(&mut self) -> Result<()> { async fn start(&mut self) -> Result<()> {
plugins::start_metasrv_plugins(self.instance.plugins()) plugins::start_meta_srv_plugins(self.instance.plugins())
.await .await
.context(StartMetaServerSnafu)?; .context(StartMetaServerSnafu)?;
@@ -64,7 +64,7 @@ pub struct Command {
} }
impl Command { impl Command {
pub async fn build(self, opts: MetasrvOptions) -> Result<Instance> { pub async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
self.subcmd.build(opts).await self.subcmd.build(opts).await
} }
@@ -79,7 +79,7 @@ enum SubCommand {
} }
impl SubCommand { impl SubCommand {
async fn build(self, opts: MetasrvOptions) -> Result<Instance> { async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
match self { match self {
SubCommand::Start(cmd) => cmd.build(opts).await, SubCommand::Start(cmd) => cmd.build(opts).await,
} }
@@ -117,40 +117,38 @@ struct StartCommand {
/// The working home directory of this metasrv instance. /// The working home directory of this metasrv instance.
#[clap(long)] #[clap(long)]
data_home: Option<String>, data_home: Option<String>,
/// If it's not empty, the metasrv will store all data with this key prefix. /// If it's not empty, the metasrv will store all data with this key prefix.
#[clap(long, default_value = "")] #[clap(long, default_value = "")]
store_key_prefix: String, store_key_prefix: String,
/// The max operations per txn
#[clap(long)]
max_txn_ops: Option<usize>,
} }
impl StartCommand { impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> { fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
let mut opts: MetasrvOptions = Options::load_layered_options( let mut opts: MetaSrvOptions = Options::load_layered_options(
self.config_file.as_deref(), self.config_file.as_deref(),
self.env_prefix.as_ref(), self.env_prefix.as_ref(),
MetasrvOptions::env_list_keys(), MetaSrvOptions::env_list_keys(),
)?; )?;
if let Some(dir) = &cli_options.log_dir { if let Some(dir) = &cli_options.log_dir {
opts.logging.dir.clone_from(dir); opts.logging.dir = dir.clone();
} }
if cli_options.log_level.is_some() { if cli_options.log_level.is_some() {
opts.logging.level.clone_from(&cli_options.log_level); opts.logging.level = cli_options.log_level.clone();
} }
if let Some(addr) = &self.bind_addr { if let Some(addr) = &self.bind_addr {
opts.bind_addr.clone_from(addr); opts.bind_addr = addr.clone();
} }
if let Some(addr) = &self.server_addr { if let Some(addr) = &self.server_addr {
opts.server_addr.clone_from(addr); opts.server_addr = addr.clone();
} }
if let Some(addr) = &self.store_addr { if let Some(addr) = &self.store_addr {
opts.store_addr.clone_from(addr); opts.store_addr = addr.clone();
} }
if let Some(selector_type) = &self.selector { if let Some(selector_type) = &self.selector {
@@ -168,7 +166,7 @@ impl StartCommand {
} }
if let Some(http_addr) = &self.http_addr { if let Some(http_addr) = &self.http_addr {
opts.http.addr.clone_from(http_addr); opts.http.addr = http_addr.clone();
} }
if let Some(http_timeout) = self.http_timeout { if let Some(http_timeout) = self.http_timeout {
@@ -176,15 +174,11 @@ impl StartCommand {
} }
if let Some(data_home) = &self.data_home { if let Some(data_home) = &self.data_home {
opts.data_home.clone_from(data_home); opts.data_home = data_home.clone();
} }
if !self.store_key_prefix.is_empty() { if !self.store_key_prefix.is_empty() {
opts.store_key_prefix.clone_from(&self.store_key_prefix) opts.store_key_prefix = self.store_key_prefix.clone()
}
if let Some(max_txn_ops) = self.max_txn_ops {
opts.max_txn_ops = max_txn_ops;
} }
// Disable dashboard in metasrv. // Disable dashboard in metasrv.
@@ -193,20 +187,20 @@ impl StartCommand {
Ok(Options::Metasrv(Box::new(opts))) Ok(Options::Metasrv(Box::new(opts)))
} }
async fn build(self, mut opts: MetasrvOptions) -> Result<Instance> { async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
let plugins = plugins::setup_metasrv_plugins(&mut opts) let plugins = plugins::setup_meta_srv_plugins(&mut opts)
.await .await
.context(StartMetaServerSnafu)?; .context(StartMetaServerSnafu)?;
logging::info!("Metasrv start command: {:#?}", self); logging::info!("MetaSrv start command: {:#?}", self);
logging::info!("Metasrv options: {:#?}", opts); logging::info!("MetaSrv options: {:#?}", opts);
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None) let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await .await
.context(error::BuildMetaServerSnafu)?; .context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?; let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
let instance = MetasrvInstance::new(opts, plugins, metasrv) let instance = MetaSrvInstance::new(opts, plugins, metasrv)
.await .await
.context(error::BuildMetaServerSnafu)?; .context(error::BuildMetaServerSnafu)?;
@@ -218,7 +212,6 @@ impl StartCommand {
mod tests { mod tests {
use std::io::Write; use std::io::Write;
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file; use common_test_util::temp_dir::create_named_temp_file;
use meta_srv::selector::SelectorType; use meta_srv::selector::SelectorType;
@@ -298,10 +291,6 @@ mod tests {
.first_heartbeat_estimate .first_heartbeat_estimate
.as_millis() .as_millis()
); );
assert_eq!(
options.procedure.max_metadata_value_size,
Some(ReadableSize::kb(1500))
);
} }
#[test] #[test]

View File

@@ -15,12 +15,12 @@
use clap::ArgMatches; use clap::ArgMatches;
use common_config::KvBackendConfig; use common_config::KvBackendConfig;
use common_telemetry::logging::{LoggingOptions, TracingOptions}; use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_wal::config::MetasrvWalConfig; use common_wal::config::MetaSrvWalConfig;
use config::{Config, Environment, File, FileFormat}; use config::{Config, Environment, File, FileFormat};
use datanode::config::{DatanodeOptions, ProcedureConfig}; use datanode::config::{DatanodeOptions, ProcedureConfig};
use frontend::error::{Result as FeResult, TomlFormatSnafu}; use frontend::error::{Result as FeResult, TomlFormatSnafu};
use frontend::frontend::{FrontendOptions, TomlSerializable}; use frontend::frontend::{FrontendOptions, TomlSerializable};
use meta_srv::metasrv::MetasrvOptions; use meta_srv::metasrv::MetaSrvOptions;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use snafu::ResultExt; use snafu::ResultExt;
@@ -38,7 +38,7 @@ pub struct MixOptions {
pub frontend: FrontendOptions, pub frontend: FrontendOptions,
pub datanode: DatanodeOptions, pub datanode: DatanodeOptions,
pub logging: LoggingOptions, pub logging: LoggingOptions,
pub wal_meta: MetasrvWalConfig, pub wal_meta: MetaSrvWalConfig,
} }
impl From<MixOptions> for FrontendOptions { impl From<MixOptions> for FrontendOptions {
@@ -56,7 +56,7 @@ impl TomlSerializable for MixOptions {
pub enum Options { pub enum Options {
Datanode(Box<DatanodeOptions>), Datanode(Box<DatanodeOptions>),
Frontend(Box<FrontendOptions>), Frontend(Box<FrontendOptions>),
Metasrv(Box<MetasrvOptions>), Metasrv(Box<MetaSrvOptions>),
Standalone(Box<MixOptions>), Standalone(Box<MixOptions>),
Cli(Box<LoggingOptions>), Cli(Box<LoggingOptions>),
} }

View File

@@ -16,14 +16,13 @@ use std::sync::Arc;
use std::{fs, path}; use std::{fs, path};
use async_trait::async_trait; use async_trait::async_trait;
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser; use clap::Parser;
use common_catalog::consts::MIN_USER_TABLE_ID; use common_catalog::consts::MIN_USER_TABLE_ID;
use common_config::{metadata_store_dir, KvBackendConfig}; use common_config::{metadata_store_dir, KvBackendConfig};
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator}; use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::datanode_manager::DatanodeManagerRef; use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef}; use common_meta::ddl::table_meta::TableMetadataAllocator;
use common_meta::ddl::ProcedureExecutorRef; use common_meta::ddl::DdlTaskExecutorRef;
use common_meta::ddl_manager::DdlManager; use common_meta::ddl_manager::DdlManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::KvBackendRef;
@@ -293,11 +292,11 @@ impl StartCommand {
opts.mode = Mode::Standalone; opts.mode = Mode::Standalone;
if let Some(dir) = &cli_options.log_dir { if let Some(dir) = &cli_options.log_dir {
opts.logging.dir.clone_from(dir); opts.logging.dir = dir.clone();
} }
if cli_options.log_level.is_some() { if cli_options.log_level.is_some() {
opts.logging.level.clone_from(&cli_options.log_level); opts.logging.level = cli_options.log_level.clone();
} }
let tls_opts = TlsOption::new( let tls_opts = TlsOption::new(
@@ -307,11 +306,11 @@ impl StartCommand {
); );
if let Some(addr) = &self.http_addr { if let Some(addr) = &self.http_addr {
opts.http.addr.clone_from(addr); opts.http.addr = addr.clone()
} }
if let Some(data_home) = &self.data_home { if let Some(data_home) = &self.data_home {
opts.storage.data_home.clone_from(data_home); opts.storage.data_home = data_home.clone();
} }
if let Some(addr) = &self.rpc_addr { if let Some(addr) = &self.rpc_addr {
@@ -325,31 +324,31 @@ impl StartCommand {
} }
.fail(); .fail();
} }
opts.grpc.addr.clone_from(addr) opts.grpc.addr = addr.clone()
} }
if let Some(addr) = &self.mysql_addr { if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true; opts.mysql.enable = true;
opts.mysql.addr.clone_from(addr); opts.mysql.addr = addr.clone();
opts.mysql.tls = tls_opts.clone(); opts.mysql.tls = tls_opts.clone();
} }
if let Some(addr) = &self.postgres_addr { if let Some(addr) = &self.postgres_addr {
opts.postgres.enable = true; opts.postgres.enable = true;
opts.postgres.addr.clone_from(addr); opts.postgres.addr = addr.clone();
opts.postgres.tls = tls_opts; opts.postgres.tls = tls_opts;
} }
if let Some(addr) = &self.opentsdb_addr { if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb.enable = true; opts.opentsdb.enable = true;
opts.opentsdb.addr.clone_from(addr); opts.opentsdb.addr = addr.clone();
} }
if self.influxdb_enable { if self.influxdb_enable {
opts.influxdb.enable = self.influxdb_enable; opts.influxdb.enable = self.influxdb_enable;
} }
opts.user_provider.clone_from(&self.user_provider); opts.user_provider = self.user_provider.clone();
let metadata_store = opts.metadata_store.clone(); let metadata_store = opts.metadata_store.clone();
let procedure = opts.procedure.clone(); let procedure = opts.procedure.clone();
@@ -400,10 +399,6 @@ impl StartCommand {
.await .await
.context(StartFrontendSnafu)?; .context(StartFrontendSnafu)?;
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
let catalog_manager =
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
let builder = let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone()); DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?; let datanode = builder.build().await.context(StartDatanodeSnafu)?;
@@ -424,26 +419,21 @@ impl StartCommand {
let table_metadata_manager = let table_metadata_manager =
Self::create_table_metadata_manager(kv_backend.clone()).await?; Self::create_table_metadata_manager(kv_backend.clone()).await?;
let table_meta_allocator = Arc::new(TableMetadataAllocator::new( let table_meta_allocator = TableMetadataAllocator::new(
table_id_sequence, table_id_sequence,
wal_options_allocator.clone(), wal_options_allocator.clone(),
)); table_metadata_manager.clone(),
);
let ddl_task_executor = Self::create_ddl_task_executor( let ddl_task_executor = Self::create_ddl_task_executor(
table_metadata_manager, table_metadata_manager,
procedure_manager.clone(), procedure_manager.clone(),
datanode_manager.clone(), datanode_manager.clone(),
multi_cache_invalidator,
table_meta_allocator, table_meta_allocator,
) )
.await?; .await?;
let mut frontend = FrontendBuilder::new( let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
kv_backend,
catalog_manager,
datanode_manager,
ddl_task_executor,
)
.with_plugin(fe_plugins.clone()) .with_plugin(fe_plugins.clone())
.try_build() .try_build()
.await .await
@@ -451,7 +441,6 @@ impl StartCommand {
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins) let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build() .build()
.await
.context(StartFrontendSnafu)?; .context(StartFrontendSnafu)?;
frontend frontend
.build_servers(fe_opts, servers) .build_servers(fe_opts, servers)
@@ -469,23 +458,21 @@ impl StartCommand {
table_metadata_manager: TableMetadataManagerRef, table_metadata_manager: TableMetadataManagerRef,
procedure_manager: ProcedureManagerRef, procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef, datanode_manager: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef, table_meta_allocator: TableMetadataAllocator,
table_meta_allocator: TableMetadataAllocatorRef, ) -> Result<DdlTaskExecutorRef> {
) -> Result<ProcedureExecutorRef> { let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new( DdlManager::try_new(
procedure_manager, procedure_manager,
datanode_manager, datanode_manager,
cache_invalidator, Arc::new(DummyCacheInvalidator),
table_metadata_manager, table_metadata_manager,
table_meta_allocator, table_meta_allocator,
Arc::new(MemoryRegionKeeper::default()), Arc::new(MemoryRegionKeeper::default()),
true,
) )
.context(InitDdlManagerSnafu)?, .context(InitDdlManagerSnafu)?,
); );
Ok(procedure_executor) Ok(ddl_task_executor)
} }
pub async fn create_table_metadata_manager( pub async fn create_table_metadata_manager(

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[lints]
workspace = true
[dependencies] [dependencies]
anymap = "1.0.0-beta.2" anymap = "1.0.0-beta.2"
bitvec = "1.0" bitvec = "1.0"

Some files were not shown because too many files have changed in this diff Show More