Compare commits

..

2 Commits

Author SHA1 Message Date
Lei, HUANG
2ed98ff558 fix: some cr comments 2024-02-20 14:10:57 +08:00
Lei, HUANG
b46386d52a feat: data buffer and related structs 2024-02-19 22:57:25 +08:00
721 changed files with 10593 additions and 44645 deletions

View File

@@ -3,3 +3,13 @@ linker = "aarch64-linux-gnu-gcc"
[alias]
sqlness = "run --bin sqlness-runner --"
[build]
rustflags = [
# lints
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
]

View File

@@ -1,10 +0,0 @@
root = true
[*]
end_of_line = lf
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[{Makefile,**.mk}]
indent_style = tab

View File

@@ -21,6 +21,3 @@ GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_ENDPOINT = GCS end point
# Settings for kafka wal test
GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002

View File

@@ -34,7 +34,7 @@ runs:
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log

View File

@@ -67,7 +67,7 @@ runs:
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log

View File

@@ -62,15 +62,15 @@ runs:
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
- name: Build greptime binary
shell: pwsh
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts

View File

@@ -1,13 +0,0 @@
name: Fuzz Test
description: 'Fuzz test given setup and service'
inputs:
target:
description: "The fuzz target to test"
runs:
using: composite
steps:
- name: Run Fuzz Test
shell: bash
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
env:
GT_MYSQL_ADDR: 127.0.0.1:4002

View File

@@ -1,10 +1,8 @@
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
## Refer to a related PR or issue link (optional)
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
## What's changed and what's your intention?
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
Please explain IN DETAIL what the changes are in this PR and why they are needed:
@@ -18,3 +16,5 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
- [x] This PR does not require documentation updates.
## Refer to a related PR or issue link (optional)

View File

@@ -40,4 +40,3 @@ jobs:
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc
single-commit: true

View File

@@ -1,7 +1,7 @@
on:
merge_group:
pull_request:
types: [ opened, synchronize, reopened, ready_for_review ]
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- 'docs/**'
- 'config/**'
@@ -57,7 +57,7 @@ jobs:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
with:
# Shares across multiple jobs
# Shares with `Clippy` job
shared-key: "check-lint"
@@ -75,7 +75,7 @@ jobs:
toolchain: stable
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo
@@ -102,7 +102,7 @@ jobs:
shared-key: "build-binaries"
- name: Build greptime binaries
shell: bash
run: cargo build --bin greptime --bin sqlness-runner
run: cargo build
- name: Pack greptime binaries
shell: bash
run: |
@@ -117,46 +117,6 @@ jobs:
artifacts-dir: bins
version: current
fuzztest:
name: Fuzz Test
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table" ]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
with:
target: ${{ matrix.target }}
sqlness:
name: Sqlness Test
needs: build
@@ -176,12 +136,13 @@ jobs:
run: tar -xvf ./bins.tar.gz
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
# FIXME: Logs cannot found be on failure (or even success). Need to figure out the cause.
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
sqlness-kafka-wal:
@@ -206,12 +167,13 @@ jobs:
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
# FIXME: Logs cannot be found on failure (or even success). Need to figure out the cause.
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs-with-kafka-wal
path: /tmp/greptime-*.log
name: sqlness-logs
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
fmt:
@@ -229,7 +191,7 @@ jobs:
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Run cargo fmt
@@ -250,7 +212,7 @@ jobs:
components: clippy
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
with:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
@@ -279,10 +241,6 @@ jobs:
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
@@ -313,28 +271,10 @@ jobs:
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info
flags: rust
fail_ci_if_error: false
verbose: true
compat:
name: Compatibility Test
needs: build
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
mkdir -p ./bins/current
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
- run: ./tests/compat/test-compat.sh 0.6.0

View File

@@ -61,18 +61,6 @@ jobs:
sqlness:
name: Sqlness Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
steps:
- run: 'echo "No action required"'
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'

View File

@@ -13,4 +13,4 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Check License Header
uses: korandoru/hawkeye@v5
uses: korandoru/hawkeye@v4

View File

@@ -45,10 +45,10 @@ jobs:
{"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
path: /tmp/greptime-*.log
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
test-on-windows:

View File

@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.8.0
NEXT_RELEASE_VERSION: v0.7.0
jobs:
allocate-runners:
@@ -221,8 +221,6 @@ jobs:
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }}
outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
needs: [
allocate-runners,
]
@@ -262,8 +260,6 @@ jobs:
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }}
outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
needs: [
allocate-runners,
]
@@ -288,7 +284,7 @@ jobs:
- name: Set build windows result
id: set-build-windows-result
run: |
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
echo "build-windows-result=success" >> $GITHUB_OUTPUT
release-images-to-dockerhub:
name: Build and push images to DockerHub
@@ -299,8 +295,6 @@ jobs:
build-linux-arm64-artifacts,
]
runs-on: ubuntu-2004-16-cores
outputs:
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
steps:
- uses: actions/checkout@v4
with:
@@ -316,7 +310,7 @@ jobs:
version: ${{ needs.allocate-runners.outputs.version }}
- name: Set build image result
id: set-build-image-result
id: set-image-build-result
run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT

View File

@@ -1,21 +0,0 @@
name: Auto Unassign
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
auto-unassign:
name: Auto Unassign
runs-on: ubuntu-latest
steps:
- name: Auto Unassign
uses: tisonspieces/auto-unassign@main
with:
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
repository: ${{ github.repository }}

4
.gitignore vendored
View File

@@ -46,7 +46,3 @@ benchmarks/data
*.code-workspace
venv/
# Fuzz tests
tests-fuzz/artifacts/
tests-fuzz/corpus/

132
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
info@greptime.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

805
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,6 @@ members = [
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/meta",
"src/common/plugins",
"src/common/procedure",
"src/common/procedure-test",
"src/common/query",
@@ -62,23 +61,17 @@ members = [
resolver = "2"
[workspace.package]
version = "0.7.2"
version = "0.6.0"
edition = "2021"
license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.implicit_clone = "warn"
rust.unknown_lints = "deny"
[workspace.dependencies]
ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3"
arrow = { version = "47.0" }
arrow-array = "47.0"
arrow-flight = "47.0"
arrow-ipc = { version = "47.0", features = ["lz4"] }
arrow-ipc = "47.0"
arrow-schema = { version = "47.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
@@ -99,20 +92,17 @@ datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.g
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = "0.12"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1bd2398b686e5ac6c1eef6daf615867ce27f75c1" }
humantime = "2.1"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96f1f0404f421ee560a4310c73c5071e49168168" }
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
mockall = "0.11.4"
moka = "0.12"
notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18"
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
@@ -128,7 +118,7 @@ prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
regex = "1.8"
regex-automata = { version = "0.4" }
regex-automata = { version = "0.2", features = ["transducer"] }
reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls-native-roots",
@@ -136,9 +126,8 @@ reqwest = { version = "0.11", default-features = false, features = [
] }
rskafka = "0.5"
rust_decimal = "1.33"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_json = "1.0"
serde_with = "3"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.7"
@@ -155,7 +144,6 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.10", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
## workspaces members
api = { path = "src/api" }
@@ -176,7 +164,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-plugins = { path = "src/common/plugins" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" }
@@ -214,7 +201,7 @@ table = { path = "src/table" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
[profile.release]
debug = 1

View File

@@ -3,7 +3,6 @@ CARGO_PROFILE ?=
FEATURES ?=
TARGET_DIR ?=
TARGET ?=
BUILD_BIN ?= greptime
CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
@@ -46,10 +45,6 @@ ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET}
endif
ifneq ($(strip $(BUILD_BIN)),)
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
endif
ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release
endif

219
README.md
View File

@@ -6,154 +6,145 @@
</picture>
</p>
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
<div align="center">
<h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4>
The next-generation hybrid time-series/analytics processing database in the cloud
</h3>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
</a>
<a href="https://hub.docker.com/r/greptime/greptimedb/">
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
</a>
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
</a>
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
</a>
<p align="center">
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C"></img></a>
&nbsp;
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
&nbsp;
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
</p>
<br/>
<p align="center">
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
<a href="https://greptime.com/slack">
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
</a>
<a href="https://twitter.com/greptime">
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
</a>
<a href="https://www.linkedin.com/company/greptime/">
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
</a>
</div>
## What is GreptimeDB
## Introduction
GreptimeDB is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage.
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
Our core developers have been building time-series data platforms for years. Based on their best-practices, GreptimeDB is born to give you:
## Why GreptimeDB
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
- Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
- Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
- Native SQL and PromQL for queries, and Python scripting to facilitate complex analytical tasks.
- Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down.
- Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc.
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
## Quick Start
* **Easy horizontal scaling**
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
* **Analyzing time-series data**
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
* **Cloud-native distributed database**
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
* **Performance and Cost-effective**
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
## Try GreptimeDB
### 1. [GreptimePlay](https://greptime.com/playground)
### [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/)
### Build
Start instantly with a free cluster.
#### Build from Source
### 3. Docker Image
To compile GreptimeDB from source, you'll need:
To install GreptimeDB locally, the recommended way is via Docker:
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
available as `build-essential` on ubuntu and similar name on other platforms.
- Rust: the easiest way to install Rust is to use
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
```shell
docker pull greptime/greptimedb
#### Build with Docker
A docker image with necessary dependencies is provided:
```
docker build --network host -f docker/Dockerfile -t greptimedb .
```
Start a GreptimeDB container with:
### Run
Start GreptimeDB from source code, in standalone mode:
```shell
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
```
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
## Build
Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
Build GreptimeDB binary:
```shell
make
```
Run a standalone server:
```shell
cargo run -- standalone start
```
## Extension
Or if you built from docker:
```
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
### Get started
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
## Resources
### Installation
- [Pre-built Binaries](https://greptime.com/download):
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
### SDK
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptime-ingester-js)
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
### Grafana Dashboard
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
Our official Grafana dashboard is available at [grafana](./grafana/README.md) directory.
## Project Status
The current version has not yet reached General Availability version standards.
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
This project is in its early stage and under heavy development. We move fast and
break things. Benchmark on development branch may not represent its potential
performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community
@@ -163,12 +154,12 @@ and what went wrong. If you have any questions or if you would like to get invol
community, please check out:
- GreptimeDB Community on [Slack](https://greptime.com/slack)
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [website](https://greptime.com)
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [Website](https://greptime.com)
In addition, you may:
- View our official [Blog](https://greptime.com/blogs/)
- View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
@@ -179,7 +170,7 @@ open contributions and allowing you to use the software however you want.
## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement

View File

@@ -4,35 +4,13 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
client.workspace = true
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
dotenv.workspace = true
futures.workspace = true
futures-util.workspace = true
humantime.workspace = true
humantime-serde.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
lazy_static.workspace = true
log-store.workspace = true
mito2.workspace = true
num_cpus.workspace = true
parquet.workspace = true
prometheus.workspace = true
rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,11 +0,0 @@
Benchmarkers for GreptimeDB
--------------------------------
## Wal Benchmarker
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
### How to use
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.

View File

@@ -1,21 +0,0 @@
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
wal_provider = "kafka"
bootstrap_brokers = ["localhost:9092"]
num_workers = 10
num_topics = 32
num_regions = 1000
num_scrapes = 1000
num_rows = 5
col_types = "ifs"
max_batch_size = "512KB"
linger = "1ms"
backoff_init = "10ms"
backoff_max = "1ms"
backoff_base = 2
backoff_deadline = "3s"
compression = "zstd"
rng_seed = 42
skip_read = false
skip_write = false
random_topics = true
report_metrics = false

View File

@@ -29,7 +29,7 @@ use client::api::v1::column::Values;
use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures_util::TryStreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -502,9 +502,9 @@ async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
for i in 0..num_iter {
let now = Instant::now();
let res = db.sql(&query).await.unwrap();
match res.data {
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
OutputData::Stream(stream) => {
match res {
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
Output::Stream(stream) => {
stream.try_collect::<Vec<_>>().await.unwrap();
}
}

View File

@@ -1,326 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(int_roundings)]
use std::fs;
use std::sync::Arc;
use std::time::Instant;
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
use benchmarks::metrics;
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
use clap::Parser;
use common_telemetry::info;
use common_wal::config::kafka::common::BackoffConfig;
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::options::{KafkaWalOptions, WalOptions};
use itertools::Itertools;
use log_store::kafka::log_store::KafkaLogStore;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use mito2::wal::Wal;
use prometheus::{Encoder, TextEncoder};
use rand::distributions::{Alphanumeric, DistString};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use rskafka::client::partition::Compression;
use rskafka::client::ClientBuilder;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
let region_chunks = (0..cfg.num_regions)
.map(|id| {
build_region(
id as u64,
topics,
&mut SmallRng::seed_from_u64(cfg.rng_seed),
cfg,
)
})
.chunks(chunk_size as usize)
.into_iter()
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
.collect::<Vec<_>>();
let mut write_elapsed = 0;
let mut read_elapsed = 0;
if !cfg.skip_write {
info!("Benchmarking write ...");
let num_scrapes = cfg.num_scrapes;
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for _ in 0..num_scrapes {
let mut wal_writer = wal.writer();
regions
.iter()
.for_each(|region| region.add_wal_entry(&mut wal_writer));
wal_writer.write_to_wal().await.unwrap();
}
})
}))
.await;
write_elapsed += timer.elapsed().as_millis();
}
if !cfg.skip_read {
info!("Benchmarking read ...");
let timer = Instant::now();
futures::future::join_all((0..cfg.num_workers).map(|i| {
let wal = wal.clone();
let regions = region_chunks[i as usize].clone();
tokio::spawn(async move {
for region in regions.iter() {
region.replay(&wal).await;
}
})
}))
.await;
read_elapsed = timer.elapsed().as_millis();
}
dump_report(cfg, write_elapsed, read_elapsed);
}
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
let wal_options = match cfg.wal_provider {
WalProvider::Kafka => {
assert!(!topics.is_empty());
WalOptions::Kafka(KafkaWalOptions {
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
})
}
WalProvider::RaftEngine => WalOptions::RaftEngine,
};
Region::new(
RegionId::from_u64(id),
build_schema(&parse_col_types(&cfg.col_types), rng),
wal_options,
cfg.num_rows,
cfg.rng_seed,
)
}
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
col_types
.iter()
.map(|col_type| ColumnSchema {
column_name: Alphanumeric.sample_string(&mut rng, 5),
datatype: *col_type as i32,
semantic_type: SemanticType::Field as i32,
datatype_extension: None,
})
.chain(vec![ColumnSchema {
column_name: "ts".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Tag as i32,
datatype_extension: None,
}])
.collect()
}
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
let cost_report = format!(
"write costs: {} ms, read costs: {} ms",
write_elapsed, read_elapsed,
);
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
let write_throughput = if write_elapsed > 0 {
(total_written_bytes * 1000).div_floor(write_elapsed)
} else {
0
};
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
let read_throughput = if read_elapsed > 0 {
(total_read_bytes * 1000).div_floor(read_elapsed)
} else {
0
};
let throughput_report = format!(
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
total_written_bytes,
total_read_bytes,
write_throughput,
write_throughput.div_floor(1 << 20),
read_throughput,
read_throughput.div_floor(1 << 20),
);
let metrics_report = if cfg.report_metrics {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
let metrics = prometheus::gather();
encoder.encode(&metrics, &mut buffer).unwrap();
String::from_utf8(buffer).unwrap()
} else {
String::new()
};
info!(
r#"
Benchmark config:
{cfg:?}
Benchmark report:
{cost_report}
{throughput_report}
{metrics_report}"#
);
}
async fn create_topics(cfg: &Config) -> Vec<String> {
// Creates topics.
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
.build()
.await
.unwrap();
let ctrl_client = client.controller_client().unwrap();
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
.map(|i| {
let topic = if cfg.random_topics {
format!(
"greptime_wal_bench_topic_{}_{}",
uuid::Uuid::new_v4().as_u128(),
i
)
} else {
format!("greptime_wal_bench_topic_{}", i)
};
let task = ctrl_client.create_topic(
topic.clone(),
1,
cfg.bootstrap_brokers.len() as i16,
2000,
);
(topic, task)
})
.unzip();
// Must ignore errors since we allow topics being created more than once.
let _ = futures::future::try_join_all(tasks).await;
topics
}
fn parse_compression(comp: &str) -> Compression {
match comp {
"no" => Compression::NoCompression,
"gzip" => Compression::Gzip,
"lz4" => Compression::Lz4,
"snappy" => Compression::Snappy,
"zstd" => Compression::Zstd,
other => unreachable!("Unrecognized compression {other}"),
}
}
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
let parts = col_types.split('x').collect::<Vec<_>>();
assert!(parts.len() <= 2);
let pattern = parts[0];
let repeat = parts
.get(1)
.map(|r| r.parse::<usize>().unwrap())
.unwrap_or(1);
pattern
.chars()
.map(|c| match c {
'i' | 'I' => ColumnDataType::Int64,
'f' | 'F' => ColumnDataType::Float64,
's' | 'S' => ColumnDataType::String,
other => unreachable!("Cannot parse {other} as a column data type"),
})
.cycle()
.take(pattern.len() * repeat)
.collect()
}
fn main() {
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
common_telemetry::init_default_ut_logging();
let args = Args::parse();
let cfg = if !args.cfg_file.is_empty() {
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
} else {
Config::from(args)
};
// Validates arguments.
if cfg.num_regions < cfg.num_workers {
panic!("num_regions must be greater than or equal to num_workers");
}
if cfg
.num_workers
.min(cfg.num_topics)
.min(cfg.num_regions)
.min(cfg.num_scrapes)
.min(cfg.max_batch_size.as_bytes() as u32)
.min(cfg.bootstrap_brokers.len() as u32)
== 0
{
panic!("Invalid arguments");
}
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
match cfg.wal_provider {
WalProvider::Kafka => {
let topics = create_topics(&cfg).await;
let kafka_cfg = KafkaConfig {
broker_endpoints: cfg.bootstrap_brokers.clone(),
max_batch_size: cfg.max_batch_size,
linger: cfg.linger,
backoff: BackoffConfig {
init: cfg.backoff_init,
max: cfg.backoff_max,
base: cfg.backoff_base,
deadline: Some(cfg.backoff_deadline),
},
compression: parse_compression(&cfg.compression),
..Default::default()
};
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &topics, wal).await;
}
WalProvider::RaftEngine => {
// The benchmarker assumes the raft engine directory exists.
let store = RaftEngineLogStore::try_new(
"/tmp/greptimedb/raft-engine-wal".to_string(),
RaftEngineConfig::default(),
)
.await
.map(Arc::new)
.unwrap();
let wal = Arc::new(Wal::new(store));
run_benchmarker(&cfg, &[], wal).await;
}
}
});
}

View File

@@ -1,16 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod metrics;
pub mod wal_bench;

View File

@@ -1,39 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use lazy_static::lazy_static;
use prometheus::*;
/// Logstore label.
pub const LOGSTORE_LABEL: &str = "logstore";
/// Operation type label.
pub const OPTYPE_LABEL: &str = "optype";
lazy_static! {
/// Counters of bytes of each operation on a logstore.
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
"greptime_bench_wal_op_bytes_total",
"wal operation bytes total",
&[OPTYPE_LABEL],
)
.unwrap();
/// Counter of bytes of the append_batch operation.
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["write"],
);
/// Counter of bytes of the read operation.
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
&["read"],
);
}

View File

@@ -1,361 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::mem::size_of;
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
use clap::{Parser, ValueEnum};
use common_base::readable_size::ReadableSize;
use common_wal::options::WalOptions;
use futures::StreamExt;
use mito2::wal::{Wal, WalWriter};
use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
use crate::metrics;
/// The wal provider.
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum WalProvider {
#[default]
RaftEngine,
Kafka,
}
#[derive(Parser)]
pub struct Args {
/// The provided configuration file.
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
#[clap(long, short = 'c')]
pub cfg_file: String,
/// The wal provider.
#[clap(long, value_enum, default_value_t = WalProvider::default())]
pub wal_provider: WalProvider,
/// The advertised addresses of the kafka brokers.
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
#[clap(long, short = 'b', default_value = "localhost:9092")]
pub bootstrap_brokers: String,
/// The number of workers each running in a dedicated thread.
#[clap(long, default_value_t = num_cpus::get() as u32)]
pub num_workers: u32,
/// The number of kafka topics to be created.
#[clap(long, default_value_t = 32)]
pub num_topics: u32,
/// The number of regions.
#[clap(long, default_value_t = 1000)]
pub num_regions: u32,
/// The number of times each region is scraped.
#[clap(long, default_value_t = 1000)]
pub num_scrapes: u32,
/// The number of rows in each wal entry.
/// Each time a region is scraped, a wal entry containing will be produced.
#[clap(long, default_value_t = 5)]
pub num_rows: u32,
/// The column types of the schema for each region.
/// Currently, three column types are supported:
/// - i = ColumnDataType::Int64
/// - f = ColumnDataType::Float64
/// - s = ColumnDataType::String
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
///
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
/// This feature is useful if you want to specify many columns.
#[clap(long, default_value = "ifs")]
pub col_types: String,
/// The maximum size of a batch of kafka records.
/// The default value is 1mb.
#[clap(long, default_value = "512KB")]
pub max_batch_size: ReadableSize,
/// The minimum latency the kafka client issues a batch of kafka records.
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
#[clap(long, default_value = "1ms")]
pub linger: String,
/// The initial backoff delay of the kafka consumer.
#[clap(long, default_value = "10ms")]
pub backoff_init: String,
/// The maximum backoff delay of the kafka consumer.
#[clap(long, default_value = "1s")]
pub backoff_max: String,
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
#[clap(long, default_value_t = 2)]
pub backoff_base: u32,
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
#[clap(long, default_value = "3s")]
pub backoff_deadline: String,
/// The client-side compression algorithm for kafka records.
#[clap(long, default_value = "zstd")]
pub compression: String,
/// The seed of random number generators.
#[clap(long, default_value_t = 42)]
pub rng_seed: u64,
/// Skips the read phase, aka. region replay, if set to true.
#[clap(long, default_value_t = false)]
pub skip_read: bool,
/// Skips the write phase if set to true.
#[clap(long, default_value_t = false)]
pub skip_write: bool,
/// Randomly generates topic names if set to true.
/// Useful when you want to run the benchmarker without worrying about the topics created before.
#[clap(long, default_value_t = false)]
pub random_topics: bool,
/// Logs out the gathered prometheus metrics when the benchmarker ends.
#[clap(long, default_value_t = false)]
pub report_metrics: bool,
}
/// Benchmarker config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub wal_provider: WalProvider,
pub bootstrap_brokers: Vec<String>,
pub num_workers: u32,
pub num_topics: u32,
pub num_regions: u32,
pub num_scrapes: u32,
pub num_rows: u32,
pub col_types: String,
pub max_batch_size: ReadableSize,
#[serde(with = "humantime_serde")]
pub linger: Duration,
#[serde(with = "humantime_serde")]
pub backoff_init: Duration,
#[serde(with = "humantime_serde")]
pub backoff_max: Duration,
pub backoff_base: u32,
#[serde(with = "humantime_serde")]
pub backoff_deadline: Duration,
pub compression: String,
pub rng_seed: u64,
pub skip_read: bool,
pub skip_write: bool,
pub random_topics: bool,
pub report_metrics: bool,
}
impl From<Args> for Config {
fn from(args: Args) -> Self {
let cfg = Self {
wal_provider: args.wal_provider,
bootstrap_brokers: args
.bootstrap_brokers
.split(',')
.map(ToString::to_string)
.collect::<Vec<_>>(),
num_workers: args.num_workers.min(num_cpus::get() as u32),
num_topics: args.num_topics,
num_regions: args.num_regions,
num_scrapes: args.num_scrapes,
num_rows: args.num_rows,
col_types: args.col_types,
max_batch_size: args.max_batch_size,
linger: humantime::parse_duration(&args.linger).unwrap(),
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
backoff_base: args.backoff_base,
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
compression: args.compression,
rng_seed: args.rng_seed,
skip_read: args.skip_read,
skip_write: args.skip_write,
random_topics: args.random_topics,
report_metrics: args.report_metrics,
};
cfg
}
}
/// The region used for wal benchmarker.
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
rng: Mutex<Option<SmallRng>>,
num_rows: u32,
}
impl Region {
/// Creates a new region.
pub fn new(
id: RegionId,
schema: Vec<ColumnSchema>,
wal_options: WalOptions,
num_rows: u32,
rng_seed: u64,
) -> Self {
Self {
id,
schema,
wal_options,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
num_rows,
}
}
/// Scrapes the region and adds the generated entry to wal.
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
let mutation = Mutation {
op_type: OpType::Put as i32,
sequence: self
.next_sequence
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
rows: Some(self.build_rows()),
};
let entry = WalEntry {
mutations: vec![mutation],
};
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
wal_writer
.add_entry(
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
&self.wal_options,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
}
}
/// Computes the estimated size in bytes of the entry.
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
let wrapper_size = size_of::<WalEntry>()
+ entry.mutations.capacity() * size_of::<Mutation>()
+ size_of::<Rows>();
let rows = entry.mutations[0].rows.as_ref().unwrap();
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
+ rows
.schema
.iter()
.map(|s| s.column_name.capacity())
.sum::<usize>();
let values_size = (rows.rows.capacity() * size_of::<Row>())
+ rows
.rows
.iter()
.map(|r| r.values.capacity() * size_of::<Value>())
.sum::<usize>();
wrapper_size + schema_size + values_size
}
fn build_rows(&self) -> Rows {
let cols = self
.schema
.iter()
.map(|col_schema| {
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
self.build_col(&col_data_type, self.num_rows)
})
.collect::<Vec<_>>();
let rows = (0..self.num_rows)
.map(|i| {
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
Row { values }
})
.collect();
Rows {
schema: self.schema.clone(),
rows,
}
}
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
let mut rng_guard = self.rng.lock().unwrap();
let rng = rng_guard.as_mut().unwrap();
match col_data_type {
ColumnDataType::TimestampMillisecond => (0..num_rows)
.map(|_| {
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
Value {
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
}
})
.collect(),
ColumnDataType::Int64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0, 10_000));
Value {
value_data: Some(ValueData::I64Value(v)),
}
})
.collect(),
ColumnDataType::Float64 => (0..num_rows)
.map(|_| {
let v = rng.sample(Uniform::new(0.0, 5000.0));
Value {
value_data: Some(ValueData::F64Value(v)),
}
})
.collect(),
ColumnDataType::String => (0..num_rows)
.map(|_| {
let v = Alphanumeric.sample_string(rng, 10);
Value {
value_data: Some(ValueData::StringValue(v)),
}
})
.collect(),
_ => unreachable!(),
}
}
}

View File

@@ -1,117 +0,0 @@
# https://git-cliff.org/docs/configuration
[remote.github]
owner = "GreptimeTeam"
repo = "greptimedb"
[changelog]
header = ""
footer = ""
# template for the changelog body
# https://keats.github.io/tera/docs/#introduction
body = """
# {{ version }}
Release date: {{ timestamp | date(format="%B %d, %Y") }}
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
{%- if breakings | length > 0 %}
## Breaking changes
{% for commit in breakings %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor %}
{%- endif -%}
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
{% for group, commits in grouped_commits %}
### {{ group | striptags | trim | upper_first }}
{% for commit in commits %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor -%}
{% endfor %}
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
{% raw %}\n{% endraw -%}
## New Contributors
{% endif -%}
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
* @{{ contributor.username }} made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
{%- endfor -%}
{% if github.contributors | length != 0 %}
{% raw %}\n{% endraw -%}
## All Contributors
We would like to thank the following contributors from the GreptimeDB community:
{{ github.contributors | map(attribute="username") | join(sep=", ") }}
{%- endif %}
{% raw %}\n{% endraw %}
{%- macro remote_url() -%}
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
{%- endmacro -%}
"""
trim = true
[git]
# parse the commits based on https://www.conventionalcommits.org
conventional_commits = true
# filter out the commits that are not conventional
filter_unconventional = true
# process each line of a commit as an individual commit
split_commits = false
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
{ message = "^chore\\(release\\): prepare for", skip = true },
{ message = "^chore\\(deps.*\\)", skip = true },
{ message = "^chore\\(pr\\)", skip = true },
{ message = "^chore\\(pull\\)", skip = true },
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
]
# protect breaking changes from being skipped due to matching a skipping commit_parser
protect_breaking_commits = false
# filter out the commits that are not matched by commit parsers
filter_commits = false
# regex for matching git tags
# tag_pattern = "v[0-9].*"
# regex for skipping tags
# skip_tags = ""
# regex for ignoring tags
ignore_tags = ".*-nightly-.*"
# sort the tags topologically
topo_order = false
# sort the commits inside sections by oldest/newest order
sort_commits = "oldest"
# limit the number of commits included in the changelog.
# limit_commits = 42

View File

@@ -8,6 +8,5 @@ coverage:
ignore:
- "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner
- "tests-integration/**/*.rs" # ignore integration tests
comment: # this is a top-level key
layout: "diff"

View File

@@ -134,22 +134,10 @@ create_on_compaction = "auto"
apply_on_query = "auto"
# Memory threshold for performing an external sort during index creation.
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
mem_threshold_on_create = "64M"
mem_threshold_on_create = "64MB"
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = ""
[region_engine.mito.memtable]
# Memtable type.
# - "partition_tree": partition tree memtable
# - "time_series": time-series memtable (deprecated)
type = "partition_tree"
# The max number of keys in one shard.
index_max_keys_per_shard = 8192
# The max rows of data inside the actively writing buffer in one shard.
data_freeze_threshold = 32768
# Max dictionary bytes.
fork_dictionary_bytes = "1GiB"
# Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"

View File

@@ -31,7 +31,6 @@ runtime_size = 2
mode = "disable"
cert_path = ""
key_path = ""
watch = false
# PostgresSQL server options, see `standalone.example.toml`.
[postgres]
@@ -44,7 +43,6 @@ runtime_size = 2
mode = "disable"
cert_path = ""
key_path = ""
watch = false
# OpenTSDB protocol options, see `standalone.example.toml`.
[opentsdb]

View File

@@ -29,12 +29,6 @@ store_key_prefix = ""
max_retry_times = 12
# Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
# Auto split large value
# GreptimeDB procedure uses etcd as the default metadata storage backend.
# The etcd the maximum size of any request is 1.5 MiB
# 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
# Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB"
# Failure detectors options.
[failure_detector]

View File

@@ -44,8 +44,6 @@ mode = "disable"
cert_path = ""
# Private key file path.
key_path = ""
# Watch for Certificate and key file change and auto reload
watch = false
# PostgresSQL server options.
[postgres]
@@ -64,8 +62,6 @@ mode = "disable"
cert_path = ""
# private key file path.
key_path = ""
# Watch for Certificate and key file change and auto reload
watch = false
# OpenTSDB protocol options.
[opentsdb]
@@ -122,7 +118,7 @@ sync_period = "1000ms"
# Number of topics to be created upon start.
# num_topics = 64
# Topic selector type.
# Available selector types:
# Available selector types:
# - "round_robin" (default)
# selector_type = "round_robin"
# The prefix of topic name.
@@ -244,18 +240,6 @@ mem_threshold_on_create = "64M"
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
intermediate_path = ""
[region_engine.mito.memtable]
# Memtable type.
# - "partition_tree": partition tree memtable
# - "time_series": time-series memtable (deprecated)
type = "partition_tree"
# The max number of keys in one shard.
index_max_keys_per_shard = 8192
# The max rows of data inside the actively writing buffer in one shard.
data_freeze_threshold = 32768
# Max dictionary bytes.
fork_dictionary_bytes = "1GiB"
# Log options
# [logging]
# Specify logs directory.
@@ -266,11 +250,10 @@ fork_dictionary_bytes = "1GiB"
# enable_otlp_tracing = false
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
# otlp_endpoint = "localhost:4317"
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# tracing_sample_ratio = 1.0
# Whether to append logs to stdout. Defaults to true.
# append_stdout = true
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# [logging.tracing_sample_ratio]
# default_ratio = 0.0
# Standalone export the metrics generated by itself
# encoded to Prometheus remote-write format

View File

@@ -1,50 +0,0 @@
# TSBS benchmark - v0.7.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | -------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 50GB (GP3) |
| OS | Ubuntu 22.04.1 |
## Write performance
| Environment | Ingest rate (rows/s) |
| ------------------ | --------------------- |
| Local | 3695814.64 |
| EC2 c5d.2xlarge | 2987166.64 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | ---------------------- |
| cpu-max-all-1 | 30.56 | 54.74 |
| cpu-max-all-8 | 52.69 | 70.50 |
| double-groupby-1 | 664.30 | 1366.63 |
| double-groupby-5 | 1391.26 | 2141.71 |
| double-groupby-all | 2828.94 | 3389.59 |
| groupby-orderby-limit | 718.92 | 1213.90 |
| high-cpu-1 | 29.21 | 52.98 |
| high-cpu-all | 5514.12 | 7194.91 |
| lastpoint | 7571.40 | 9423.41 |
| single-groupby-1-1-1 | 19.09 | 7.77 |
| single-groupby-1-1-12 | 27.28 | 51.64 |
| single-groupby-1-8-1 | 31.85 | 11.64 |
| single-groupby-5-1-1 | 16.14 | 9.67 |
| single-groupby-5-1-12 | 27.21 | 53.62 |
| single-groupby-5-8-1 | 39.62 | 14.96 |

View File

@@ -79,7 +79,7 @@ This RFC proposes to add a new expression node `MergeScan` to merge result from
│ │ │ │
└─Frontend──────┘ └─Remote-Sources──────────────┘
```
This merge operation simply chains all the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
## Commutativity of MergeScan

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

View File

@@ -1,101 +0,0 @@
---
Feature Name: Multi-dimension Partition Rule
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3351
Date: 2024-02-21
Author: "Ruihang Xia <waynestxia@gmail.com>"
---
# Summary
A new region partition scheme that runs on multiple dimensions of the key space. The partition rule is defined by a set of simple expressions on the partition key columns.
# Motivation
The current partition rule is from MySQL's [`RANGE Partition`](https://dev.mysql.com/doc/refman/8.0/en/partitioning-range.html), which is based on a single dimension. It is sort of a [Hilbert Curve](https://en.wikipedia.org/wiki/Hilbert_curve) and pick several point on the curve to divide the space. It is neither easy to understand how the data get partitioned nor flexible enough to handle complex partitioning requirements.
Considering the future requirements like region repartitioning or autonomous rebalancing, where both workload and partition may change frequently. Here proposes a new region partition scheme that uses a set of simple expressions on the partition key columns to divide the key space.
# Details
## Partition rule
First, we define a simple expression that can be used to define the partition rule. The simple expression is a binary expression expression on the partition key columns that can be evaluated to a boolean value. The binary operator is limited to comparison operators only, like `=`, `!=`, `>`, `>=`, `<`, `<=`. And the operands are limited either literal value or partition column.
Example of valid simple expressions are $`col_A = 10`$, $`col_A \gt 10 \& col_B \gt 20`$ or $`col_A \ne 10`$.
Those expressions can be used as predicates to divide the key space into different regions. The following example have two partition columns `Col A` and `Col B`, and four partitioned regions.
```math
\left\{\begin{aligned}
&col_A \le 10 &Region_1 \\
&10 \lt col_A \& col_A \le 20 &Region_2 \\
&20 \lt col_A \space \& \space col_B \lt 100 &Region_3 \\
&20 \lt col_A \space \& \space col_B \ge 100 &Region_4
\end{aligned}\right\}
```
An advantage of this scheme is that it is easy to understand how the data get partitioned. The above example can be visualized in a 2D space (two partition column is involved in the example).
![example](2d-example.png)
Here each expression draws a line in the 2D space. Managing data partitioning becomes a matter of drawing lines in the key space.
To make it easy to use, there is a "default region" which catches all the data that doesn't match any of previous expressions. The default region exist by default and do not need to specify. It is also possible to remove this default region if the DB finds it is not necessary.
## SQL interface
The SQL interface is in response to two parts: specifying the partition columns and the partition rule. Thouth we are targeting an autonomous system, it's still allowed to give some bootstrap rules or hints on creating table.
Partition column is specified by `PARTITION ON COLUMNS` sub-clause in `CREATE TABLE`:
```sql
CREATE TABLE t (...)
PARTITION ON COLUMNS (...) ();
```
Two following brackets are for partition columns and partition rule respectively.
Columns provided here are only used as an allow-list of how the partition rule can be defined. Which means (a) the sequence between columns doesn't matter, (b) the columns provided here are not necessarily being used in the partition rule.
The partition rule part is a list of comma-separated simple expressions. Expressions here are not corresponding to region, as they might be changed by system to fit various workload.
A full example of `CREATE TABLE` with partition rule is:
```sql
CREATE TABLE IF NOT EXISTS demo (
a STRING,
b STRING,
c STRING,
d STRING,
ts TIMESTAMP,
memory DOUBLE,
TIME INDEX (ts),
PRIMARY KEY (a, b, c, d)
)
PARTITION ON COLUMNS (c, b, a) (
a < 10,
10 >= a AND a < 20,
20 >= a AND b < 100,
20 >= a AND b > 100
)
```
## Combine with storage
Examining columns separately suits our columnar storage very well in two aspects.
1. The simple expression can be pushed down to storage and file format, and is likely to hit existing index. Makes pruning operation very efficient.
2. Columns in columnar storage are not tightly coupled like in the traditional row storages, which means we can easily add or remove columns from partition rule without much impact (like a global reshuffle) on data.
The data file itself can be "projected" to the key space as a polyhedron, it is guaranteed that each plane is in parallel with some coordinate planes (in a 2D scenario, this is saying that all the files can be projected to a rectangle). Thus partition or repartition also only need to consider related columns.
![sst-project](sst-project.png)
An additional limitation is that considering how the index works and how we organize the primary keys at present, the partition columns are limited to be a subset of primary keys for better performance.
# Drawbacks
This is a breaking change.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

View File

@@ -66,7 +66,7 @@
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"graphTooltip": 0,
"id": null,
"links": [],
"liveNow": false,
@@ -2116,7 +2116,7 @@
}
]
},
"unit": "none"
"unit": "bytes"
},
"overrides": []
},
@@ -2126,7 +2126,7 @@
"x": 0,
"y": 61
},
"id": 17,
"id": 12,
"interval": "1s",
"options": {
"legend": {
@@ -2147,8 +2147,8 @@
"uid": "${DS_PROMETHEUS-1}"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "rate(raft_engine_sync_log_duration_seconds_count[2s])",
"editorMode": "builder",
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
@@ -2158,7 +2158,7 @@
"useBackend": false
}
],
"title": "raft engine sync count",
"title": "wal write size",
"type": "timeseries"
},
{
@@ -2378,120 +2378,6 @@
],
"title": "raft engine write duration seconds",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "bytes"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 68
},
"id": 12,
"interval": "1s",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
"legendFormat": "req-size-p95",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
"editorMode": "code",
"expr": "rate(raft_engine_write_size_sum[$__rate_interval])",
"hide": false,
"instant": false,
"legendFormat": "throughput",
"range": true,
"refId": "B"
}
],
"title": "wal write size",
"type": "timeseries"
}
],
"refresh": "10s",
@@ -2501,13 +2387,13 @@
"list": []
},
"time": {
"from": "now-30m",
"from": "now-3h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "GreptimeDB",
"uid": "e7097237-669b-4f8d-b751-13067afbfb68",
"version": 12,
"version": 9,
"weekStart": ""
}

View File

@@ -19,12 +19,6 @@ includes = [
"*.py",
]
excludes = [
# copied sources
"src/common/base/src/readable_size.rs",
"src/servers/src/repeated_field.rs",
]
[properties]
inceptionYear = 2023
copyrightOwner = "Greptime Team"

View File

@@ -1,7 +1,8 @@
#!/usr/bin/env bash
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
set -ex
set -e -x
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
@@ -12,34 +13,13 @@ RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
echo "Downloading assets to dir: $OUT_DIR"
cd $OUT_DIR
if [[ -z "$GITHUB_PROXY_URL" ]]; then
GITHUB_URL="https://github.com"
else
GITHUB_URL="${GITHUB_PROXY_URL%/}"
fi
function retry_fetch() {
local url=$1
local filename=$2
curl --connect-timeout 10 --retry 3 -fsSL $url --output $filename || {
echo "Failed to download $url"
echo "You may try to set http_proxy and https_proxy environment variables."
if [[ -z "$GITHUB_PROXY_URL" ]]; then
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/https://github.com/"
fi
exit 1
}
}
# Download the SHA256 checksum attached to the release. To verify the integrity
# of the download, this checksum will be used to check the download tar file
# containing the built dashboard assets.
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
# Download the tar file containing the built dashboard assets.
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/build.tar.gz" build.tar.gz
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
# Verify the checksums match; exit if they don't.
case "$(uname -s)" in

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
common-base.workspace = true
common-decimal.workspace = true
@@ -18,6 +15,7 @@ greptime-proto.workspace = true
paste = "1.0"
prost.workspace = true
snafu.workspace = true
tonic.workspace = true
[build-dependencies]
tonic-build = "0.9"

View File

@@ -707,6 +707,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
}
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
// TODO(fys): use macros to optimize code
match data_type {
ConcreteDataType::Int64(_) => values
.i64_values

View File

@@ -8,17 +8,13 @@ license.workspace = true
default = []
testing = []
[lints]
workspace = true
[dependencies]
api.workspace = true
async-trait.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
digest = "0.10"
notify.workspace = true
hex = { version = "0.4" }
secrecy = { version = "0.8", features = ["serde", "alloc"] }
sha1 = "0.10"
snafu.workspace = true

View File

@@ -22,9 +22,6 @@ use snafu::{ensure, OptionExt};
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
use crate::user_info::DefaultUserInfo;
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
use crate::user_provider::watch_file_user_provider::{
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
};
use crate::{UserInfoRef, UserProviderRef};
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
@@ -43,12 +40,9 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
match name {
STATIC_USER_PROVIDER => {
let provider =
StaticUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)?;
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
Ok(provider)
}
WATCH_FILE_USER_PROVIDER => {
WatchFileUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)
}
_ => InvalidConfigSnafu {
value: name.to_string(),
msg: "Invalid UserProviderOption",

View File

@@ -64,13 +64,6 @@ pub enum Error {
username: String,
},
#[snafu(display("Failed to initialize a watcher for file {}", path))]
FileWatch {
path: String,
#[snafu(source)]
error: notify::Error,
},
#[snafu(display("User is not authorized to perform this action"))]
PermissionDenied { location: Location },
}
@@ -80,7 +73,6 @@ impl ErrorExt for Error {
match self {
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
Error::FileWatch { .. } => StatusCode::InvalidArguments,
Error::InternalState { .. } => StatusCode::Unexpected,
Error::Io { .. } => StatusCode::Internal,
Error::AuthBackend { .. } => StatusCode::Internal,

View File

@@ -13,24 +13,10 @@
// limitations under the License.
pub(crate) mod static_user_provider;
pub(crate) mod watch_file_user_provider;
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::common::{Identity, Password};
use crate::error::{
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
UserNotFoundSnafu, UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
use crate::{auth_mysql, UserInfoRef};
use crate::error::Result;
use crate::UserInfoRef;
#[async_trait::async_trait]
pub trait UserProvider: Send + Sync {
@@ -58,88 +44,3 @@ pub trait UserProvider: Send + Sync {
Ok(user_info)
}
}
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
// check valid path
let path = Path::new(filepath);
if !path.exists() {
return Ok(None);
}
ensure!(
path.is_file(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider file must be a file",
}
);
let file = File::open(path).context(IoSnafu)?;
let credential = io::BufReader::new(file)
.lines()
.map_while(std::result::Result::ok)
.filter_map(|line| {
if let Some((k, v)) = line.split_once('=') {
Some((k.to_string(), v.as_bytes().to_vec()))
} else {
None
}
})
.collect::<HashMap<String, Vec<u8>>>();
ensure!(
!credential.is_empty(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider's file must contains at least one valid credential",
}
);
Ok(Some(credential))
}
fn authenticate_with_credential(
users: &HashMap<String, Vec<u8>>,
input_id: Identity<'_>,
input_pwd: Password<'_>,
) -> Result<UserInfoRef> {
match input_id {
Identity::UserId(username, _) => {
ensure!(
!username.is_empty(),
IllegalParamSnafu {
msg: "blank username"
}
);
let save_pwd = users.get(username).context(UserNotFoundSnafu {
username: username.to_string(),
})?;
match input_pwd {
Password::PlainText(pwd) => {
ensure!(
!pwd.expose_secret().is_empty(),
IllegalParamSnafu {
msg: "blank password"
}
);
if save_pwd == pwd.expose_secret().as_bytes() {
Ok(DefaultUserInfo::with_name(username))
} else {
UserPasswordMismatchSnafu {
username: username.to_string(),
}
.fail()
}
}
Password::MysqlNativePassword(auth_data, salt) => {
auth_mysql(auth_data, salt, username, save_pwd)
.map(|_| DefaultUserInfo::with_name(username))
}
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
password_type: "pg_md5",
}
.fail(),
}
}
}
}

View File

@@ -13,34 +13,60 @@
// limitations under the License.
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use async_trait::async_trait;
use snafu::OptionExt;
use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{InvalidConfigSnafu, Result};
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
use crate::{Identity, Password, UserInfoRef, UserProvider};
use crate::error::{
Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
UserNotFoundSnafu, UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
pub(crate) struct StaticUserProvider {
users: HashMap<String, Vec<u8>>,
}
impl TryFrom<&str> for StaticUserProvider {
type Error = Error;
impl StaticUserProvider {
pub(crate) fn new(value: &str) -> Result<Self> {
fn try_from(value: &str) -> Result<Self> {
let (mode, content) = value.split_once(':').context(InvalidConfigSnafu {
value: value.to_string(),
msg: "StaticUserProviderOption must be in format `<option>:<value>`",
})?;
return match mode {
"file" => {
let users = load_credential_from_file(content)?
.context(InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticFileUserProvider must be a valid file path",
})?;
Ok(StaticUserProvider { users })
// check valid path
let path = Path::new(content);
ensure!(path.exists() && path.is_file(), InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticUserProviderOption file must be a valid file path",
});
let file = File::open(path).context(IoSnafu)?;
let credential = io::BufReader::new(file)
.lines()
.map_while(std::result::Result::ok)
.filter_map(|line| {
if let Some((k, v)) = line.split_once('=') {
Some((k.to_string(), v.as_bytes().to_vec()))
} else {
None
}
})
.collect::<HashMap<String, Vec<u8>>>();
ensure!(!credential.is_empty(), InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticUserProviderOption file must contains at least one valid credential",
});
Ok(StaticUserProvider { users: credential, })
}
"cmd" => content
.split(',')
@@ -57,19 +83,66 @@ impl StaticUserProvider {
value: mode.to_string(),
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
}
.fail(),
.fail(),
};
}
}
pub(crate) struct StaticUserProvider {
users: HashMap<String, Vec<u8>>,
}
#[async_trait]
impl UserProvider for StaticUserProvider {
fn name(&self) -> &str {
STATIC_USER_PROVIDER
}
async fn authenticate(&self, id: Identity<'_>, pwd: Password<'_>) -> Result<UserInfoRef> {
authenticate_with_credential(&self.users, id, pwd)
async fn authenticate(
&self,
input_id: Identity<'_>,
input_pwd: Password<'_>,
) -> Result<UserInfoRef> {
match input_id {
Identity::UserId(username, _) => {
ensure!(
!username.is_empty(),
IllegalParamSnafu {
msg: "blank username"
}
);
let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
username: username.to_string(),
})?;
match input_pwd {
Password::PlainText(pwd) => {
ensure!(
!pwd.expose_secret().is_empty(),
IllegalParamSnafu {
msg: "blank password"
}
);
return if save_pwd == pwd.expose_secret().as_bytes() {
Ok(DefaultUserInfo::with_name(username))
} else {
UserPasswordMismatchSnafu {
username: username.to_string(),
}
.fail()
};
}
Password::MysqlNativePassword(auth_data, salt) => {
auth_mysql(auth_data, salt, username, save_pwd)
.map(|_| DefaultUserInfo::with_name(username))
}
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
password_type: "pg_md5",
}
.fail(),
}
}
}
}
async fn authorize(
@@ -108,7 +181,7 @@ pub mod test {
#[tokio::test]
async fn test_authorize() {
let user_info = DefaultUserInfo::with_name("root");
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
provider
.authorize("catalog", "schema", &user_info)
.await
@@ -117,7 +190,7 @@ pub mod test {
#[tokio::test]
async fn test_inline_provider() {
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
test_authenticate(&provider, "root", "123456").await;
test_authenticate(&provider, "admin", "654321").await;
}
@@ -141,7 +214,7 @@ admin=654321",
}
let param = format!("file:{file_path}");
let provider = StaticUserProvider::new(param.as_str()).unwrap();
let provider = StaticUserProvider::try_from(param.as_str()).unwrap();
test_authenticate(&provider, "root", "123456").await;
test_authenticate(&provider, "admin", "654321").await;
}

View File

@@ -1,215 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::path::Path;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_telemetry::{info, warn};
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::{ensure, ResultExt};
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
use crate::user_info::DefaultUserInfo;
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
use crate::{Identity, Password, UserInfoRef, UserProvider};
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
/// A user provider that reads user credential from a file and watches the file for changes.
///
/// Empty file is invalid; but file not exist means every user can be authenticated.
pub(crate) struct WatchFileUserProvider {
users: WatchedCredentialRef,
}
impl WatchFileUserProvider {
pub fn new(filepath: &str) -> Result<Self> {
let credential = load_credential_from_file(filepath)?;
let users = Arc::new(Mutex::new(credential));
let this = WatchFileUserProvider {
users: users.clone(),
};
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut debouncer =
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
let mut dir = Path::new(filepath).to_path_buf();
ensure!(
dir.pop(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider path must be a file path",
}
);
debouncer
.watch(&dir, RecursiveMode::NonRecursive)
.context(FileWatchSnafu { path: filepath })?;
let filepath = filepath.to_string();
std::thread::spawn(move || {
let filename = Path::new(&filepath).file_name();
let _hold = debouncer;
while let Ok(res) = rx.recv() {
if let Ok(event) = res {
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
let is_relevant_event = matches!(
event.kind,
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
);
if is_this_file && is_relevant_event {
info!(?event.kind, "User provider file {} changed", &filepath);
match load_credential_from_file(&filepath) {
Ok(credential) => {
let mut users =
users.lock().expect("users credential must be valid");
#[cfg(not(test))]
info!("User provider file {filepath} reloaded");
#[cfg(test)]
info!("User provider file {filepath} reloaded: {credential:?}");
*users = credential;
}
Err(err) => {
warn!(
?err,
"Fail to load credential from file {filepath}; keep the old one",
)
}
}
}
}
}
});
Ok(this)
}
}
#[async_trait]
impl UserProvider for WatchFileUserProvider {
fn name(&self) -> &str {
WATCH_FILE_USER_PROVIDER
}
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
let users = self.users.lock().expect("users credential must be valid");
if let Some(users) = users.as_ref() {
authenticate_with_credential(users, id, password)
} else {
match id {
Identity::UserId(id, _) => {
warn!(id, "User provider file not exist, allow all users");
Ok(DefaultUserInfo::with_name(id))
}
}
}
}
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
// default allow all
Ok(())
}
}
#[cfg(test)]
pub mod test {
use std::time::{Duration, Instant};
use common_test_util::temp_dir::create_temp_dir;
use tokio::time::sleep;
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
use crate::user_provider::{Identity, Password};
use crate::UserProvider;
async fn test_authenticate(
provider: &dyn UserProvider,
username: &str,
password: &str,
ok: bool,
timeout: Option<Duration>,
) {
if let Some(timeout) = timeout {
let deadline = Instant::now().checked_add(timeout).unwrap();
loop {
let re = provider
.authenticate(
Identity::UserId(username, None),
Password::PlainText(password.to_string().into()),
)
.await;
if re.is_ok() == ok {
break;
} else if Instant::now() < deadline {
sleep(Duration::from_millis(100)).await;
} else {
panic!("timeout (username: {username}, password: {password}, expected: {ok})");
}
}
} else {
let re = provider
.authenticate(
Identity::UserId(username, None),
Password::PlainText(password.to_string().into()),
)
.await;
assert_eq!(
re.is_ok(),
ok,
"username: {}, password: {}",
username,
password
);
}
}
#[tokio::test]
async fn test_file_provider() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("test_file_provider");
let file_path = format!("{}/test_file_provider", dir.path().to_str().unwrap());
// write a tmp file
assert!(std::fs::write(&file_path, "root=123456\nadmin=654321\n").is_ok());
let provider = WatchFileUserProvider::new(file_path.as_str()).unwrap();
let timeout = Duration::from_secs(60);
test_authenticate(&provider, "root", "123456", true, None).await;
test_authenticate(&provider, "admin", "654321", true, None).await;
test_authenticate(&provider, "root", "654321", false, None).await;
// update the tmp file
assert!(std::fs::write(&file_path, "root=654321\n").is_ok());
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
// remove the tmp file
assert!(std::fs::remove_file(&file_path).is_ok());
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
// recreate the tmp file
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", false, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
}
}

View File

@@ -7,21 +7,21 @@ license.workspace = true
[features]
testing = []
[lints]
workspace = true
[dependencies]
api.workspace = true
arc-swap = "1.0"
arrow.workspace = true
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
common-catalog.workspace = true
common-error.workspace = true
common-grpc.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-version.workspace = true
@@ -34,13 +34,15 @@ itertools.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
moka = { workspace = true, features = ["future", "sync"] }
parking_lot = "0.12"
partition.workspace = true
paste = "1.0"
prometheus.workspace = true
regex.workspace = true
serde.workspace = true
serde_json.workspace = true
session.workspace = true
snafu.workspace = true
sql.workspace = true
store-api.workspace = true
table.workspace = true
tokio.workspace = true

View File

@@ -164,8 +164,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to find table partitions"))]
FindPartitions { source: partition::error::Error },
#[snafu(display("Failed to find table partitions: #{table}"))]
FindPartitions {
source: partition::error::Error,
table: String,
},
#[snafu(display("Failed to find region routes"))]
FindRegionRoutes { source: partition::error::Error },
@@ -251,12 +254,6 @@ pub enum Error {
source: common_meta::error::Error,
location: Location,
},
#[snafu(display("Get null from table cache, key: {}", key))]
TableCacheNotGet { key: String, location: Location },
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
GetTableCache { err_msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -317,7 +314,6 @@ impl ErrorExt for Error {
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(),
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
}
}

View File

@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod columns;
pub mod key_column_usage;
mod columns;
mod key_column_usage;
mod memory_table;
mod partitions;
mod predicate;
mod region_peers;
mod runtime_metrics;
pub mod schemata;
mod schemata;
mod table_names;
pub mod tables;
mod tables;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
@@ -41,7 +41,8 @@ use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
use table::metadata::{
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
};
use table::{Table, TableRef};
use table::thin_table::{ThinTable, ThinTableAdapter};
use table::TableRef;
pub use table_names::*;
use self::columns::InformationSchemaColumns;
@@ -186,9 +187,10 @@ impl InformationSchemaProvider {
self.information_table(name).map(|table| {
let table_info = Self::table_info(self.catalog_name.clone(), &table);
let filter_pushdown = FilterPushDownType::Inexact;
let thin_table = ThinTable::new(table_info, filter_pushdown);
let data_source = Arc::new(InformationTableDataSource::new(table));
let table = Table::new(table_info, filter_pushdown, data_source);
Arc::new(table)
Arc::new(ThinTableAdapter::new(thin_table, data_source)) as _
})
}

View File

@@ -26,16 +26,13 @@ use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{
ConstantVector, Int64Vector, Int64VectorBuilder, StringVector, StringVectorBuilder, VectorRef,
};
use datatypes::vectors::{StringVectorBuilder, VectorRef};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use sql::statements;
use store_api::storage::{ScanRequest, TableId};
use super::{InformationTable, COLUMNS};
@@ -51,42 +48,18 @@ pub(super) struct InformationSchemaColumns {
catalog_manager: Weak<dyn CatalogManager>,
}
pub const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name";
pub const COLUMN_NAME: &str = "column_name";
const ORDINAL_POSITION: &str = "ordinal_position";
const CHARACTER_MAXIMUM_LENGTH: &str = "character_maximum_length";
const CHARACTER_OCTET_LENGTH: &str = "character_octet_length";
const NUMERIC_PRECISION: &str = "numeric_precision";
const NUMERIC_SCALE: &str = "numeric_scale";
const DATETIME_PRECISION: &str = "datetime_precision";
const CHARACTER_SET_NAME: &str = "character_set_name";
pub const COLLATION_NAME: &str = "collation_name";
pub const COLUMN_KEY: &str = "column_key";
pub const EXTRA: &str = "extra";
pub const PRIVILEGES: &str = "privileges";
const GENERATION_EXPRESSION: &str = "generation_expression";
// Extension field to keep greptime data type name
pub const GREPTIME_DATA_TYPE: &str = "greptime_data_type";
pub const DATA_TYPE: &str = "data_type";
pub const SEMANTIC_TYPE: &str = "semantic_type";
pub const COLUMN_DEFAULT: &str = "column_default";
pub const IS_NULLABLE: &str = "is_nullable";
const TABLE_CATALOG: &str = "table_catalog";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const COLUMN_NAME: &str = "column_name";
const DATA_TYPE: &str = "data_type";
const SEMANTIC_TYPE: &str = "semantic_type";
const COLUMN_DEFAULT: &str = "column_default";
const IS_NULLABLE: &str = "is_nullable";
const COLUMN_TYPE: &str = "column_type";
pub const COLUMN_COMMENT: &str = "column_comment";
const SRS_ID: &str = "srs_id";
const COLUMN_COMMENT: &str = "column_comment";
const INIT_CAPACITY: usize = 42;
// The maximum length of string type
const MAX_STRING_LENGTH: i64 = 2147483647;
const UTF8_CHARSET_NAME: &str = "utf8";
const UTF8_COLLATE_NAME: &str = "utf8_bin";
const PRI_COLUMN_KEY: &str = "PRI";
const TIME_INDEX_COLUMN_KEY: &str = "TIME INDEX";
const DEFAULT_PRIVILEGES: &str = "select,insert";
const EMPTY_STR: &str = "";
impl InformationSchemaColumns {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
@@ -102,46 +75,12 @@ impl InformationSchemaColumns {
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(ORDINAL_POSITION, ConcreteDataType::int64_datatype(), false),
ColumnSchema::new(
CHARACTER_MAXIMUM_LENGTH,
ConcreteDataType::int64_datatype(),
true,
),
ColumnSchema::new(
CHARACTER_OCTET_LENGTH,
ConcreteDataType::int64_datatype(),
true,
),
ColumnSchema::new(NUMERIC_PRECISION, ConcreteDataType::int64_datatype(), true),
ColumnSchema::new(NUMERIC_SCALE, ConcreteDataType::int64_datatype(), true),
ColumnSchema::new(DATETIME_PRECISION, ConcreteDataType::int64_datatype(), true),
ColumnSchema::new(
CHARACTER_SET_NAME,
ConcreteDataType::string_datatype(),
true,
),
ColumnSchema::new(COLLATION_NAME, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(COLUMN_KEY, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(EXTRA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(PRIVILEGES, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
GENERATION_EXPRESSION,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(
GREPTIME_DATA_TYPE,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_DEFAULT, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(IS_NULLABLE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_COMMENT, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(SRS_ID, ConcreteDataType::int64_datatype(), true),
]))
}
@@ -197,18 +136,9 @@ struct InformationSchemaColumnsBuilder {
schema_names: StringVectorBuilder,
table_names: StringVectorBuilder,
column_names: StringVectorBuilder,
ordinal_positions: Int64VectorBuilder,
character_maximum_lengths: Int64VectorBuilder,
character_octet_lengths: Int64VectorBuilder,
numeric_precisions: Int64VectorBuilder,
numeric_scales: Int64VectorBuilder,
datetime_precisions: Int64VectorBuilder,
character_set_names: StringVectorBuilder,
collation_names: StringVectorBuilder,
column_keys: StringVectorBuilder,
greptime_data_types: StringVectorBuilder,
data_types: StringVectorBuilder,
semantic_types: StringVectorBuilder,
column_defaults: StringVectorBuilder,
is_nullables: StringVectorBuilder,
column_types: StringVectorBuilder,
@@ -229,16 +159,6 @@ impl InformationSchemaColumnsBuilder {
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
character_maximum_lengths: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
character_octet_lengths: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
numeric_precisions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
numeric_scales: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
datetime_precisions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
character_set_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_keys: StringVectorBuilder::with_capacity(INIT_CAPACITY),
greptime_data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
semantic_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_defaults: StringVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -274,7 +194,6 @@ impl InformationSchemaColumnsBuilder {
};
self.add_column(
idx,
&predicates,
&catalog_name,
&schema_name,
@@ -289,10 +208,8 @@ impl InformationSchemaColumnsBuilder {
self.finish()
}
#[allow(clippy::too_many_arguments)]
fn add_column(
&mut self,
index: usize,
predicates: &Predicates,
catalog_name: &str,
schema_name: &str,
@@ -300,16 +217,7 @@ impl InformationSchemaColumnsBuilder {
semantic_type: &str,
column_schema: &ColumnSchema,
) {
// Use sql data type name
let data_type = statements::concrete_data_type_to_sql_data_type(&column_schema.data_type)
.map(|dt| dt.to_string().to_lowercase())
.unwrap_or_else(|_| column_schema.data_type.name());
let column_key = match semantic_type {
SEMANTIC_TYPE_PRIMARY_KEY => PRI_COLUMN_KEY,
SEMANTIC_TYPE_TIME_INDEX => TIME_INDEX_COLUMN_KEY,
_ => EMPTY_STR,
};
let data_type = &column_schema.data_type.name();
let row = [
(TABLE_CATALOG, &Value::from(catalog_name)),
@@ -318,8 +226,6 @@ impl InformationSchemaColumnsBuilder {
(COLUMN_NAME, &Value::from(column_schema.name.as_str())),
(DATA_TYPE, &Value::from(data_type.as_str())),
(SEMANTIC_TYPE, &Value::from(semantic_type)),
(ORDINAL_POSITION, &Value::from((index + 1) as i64)),
(COLUMN_KEY, &Value::from(column_key)),
];
if !predicates.eval(&row) {
@@ -330,63 +236,7 @@ impl InformationSchemaColumnsBuilder {
self.schema_names.push(Some(schema_name));
self.table_names.push(Some(table_name));
self.column_names.push(Some(&column_schema.name));
// Starts from 1
self.ordinal_positions.push(Some((index + 1) as i64));
if column_schema.data_type.is_string() {
self.character_maximum_lengths.push(Some(MAX_STRING_LENGTH));
self.character_octet_lengths.push(Some(MAX_STRING_LENGTH));
self.numeric_precisions.push(None);
self.numeric_scales.push(None);
self.datetime_precisions.push(None);
self.character_set_names.push(Some(UTF8_CHARSET_NAME));
self.collation_names.push(Some(UTF8_COLLATE_NAME));
} else if column_schema.data_type.is_numeric() || column_schema.data_type.is_decimal() {
self.character_maximum_lengths.push(None);
self.character_octet_lengths.push(None);
self.numeric_precisions.push(
column_schema
.data_type
.numeric_precision()
.map(|x| x as i64),
);
self.numeric_scales
.push(column_schema.data_type.numeric_scale().map(|x| x as i64));
self.datetime_precisions.push(None);
self.character_set_names.push(None);
self.collation_names.push(None);
} else {
self.character_maximum_lengths.push(None);
self.character_octet_lengths.push(None);
self.numeric_precisions.push(None);
self.numeric_scales.push(None);
match &column_schema.data_type {
ConcreteDataType::DateTime(datetime_type) => {
self.datetime_precisions
.push(Some(datetime_type.precision() as i64));
}
ConcreteDataType::Timestamp(ts_type) => {
self.datetime_precisions
.push(Some(ts_type.precision() as i64));
}
ConcreteDataType::Time(time_type) => {
self.datetime_precisions
.push(Some(time_type.precision() as i64));
}
_ => self.datetime_precisions.push(None),
}
self.character_set_names.push(None);
self.collation_names.push(None);
}
self.column_keys.push(Some(column_key));
self.greptime_data_types
.push(Some(&column_schema.data_type.name()));
self.data_types.push(Some(&data_type));
self.data_types.push(Some(data_type));
self.semantic_types.push(Some(semantic_type));
self.column_defaults.push(
column_schema
@@ -399,52 +249,23 @@ impl InformationSchemaColumnsBuilder {
} else {
self.is_nullables.push(Some("No"));
}
self.column_types.push(Some(&data_type));
self.column_types.push(Some(data_type));
self.column_comments
.push(column_schema.column_comment().map(|x| x.as_ref()));
}
fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.collation_names.len();
let privileges = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec![DEFAULT_PRIVILEGES])),
rows_num,
));
let empty_string = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec![EMPTY_STR])),
rows_num,
));
let srs_ids = Arc::new(ConstantVector::new(
Arc::new(Int64Vector::from(vec![None])),
rows_num,
));
let columns: Vec<VectorRef> = vec![
Arc::new(self.catalog_names.finish()),
Arc::new(self.schema_names.finish()),
Arc::new(self.table_names.finish()),
Arc::new(self.column_names.finish()),
Arc::new(self.ordinal_positions.finish()),
Arc::new(self.character_maximum_lengths.finish()),
Arc::new(self.character_octet_lengths.finish()),
Arc::new(self.numeric_precisions.finish()),
Arc::new(self.numeric_scales.finish()),
Arc::new(self.datetime_precisions.finish()),
Arc::new(self.character_set_names.finish()),
Arc::new(self.collation_names.finish()),
Arc::new(self.column_keys.finish()),
empty_string.clone(),
privileges,
empty_string,
Arc::new(self.greptime_data_types.finish()),
Arc::new(self.data_types.finish()),
Arc::new(self.semantic_types.finish()),
Arc::new(self.column_defaults.finish()),
Arc::new(self.is_nullables.finish()),
Arc::new(self.column_types.finish()),
Arc::new(self.column_comments.finish()),
srs_ids,
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)

View File

@@ -37,16 +37,13 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
pub const CONSTRAINT_NAME: &str = "constraint_name";
// It's always `def` in MySQL
pub const TABLE_CATALOG: &str = "table_catalog";
// The real catalog name for this key column.
pub const REAL_TABLE_CATALOG: &str = "real_table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name";
pub const COLUMN_NAME: &str = "column_name";
pub const ORDINAL_POSITION: &str = "ordinal_position";
const CONSTRAINT_SCHEMA: &str = "constraint_schema";
const CONSTRAINT_NAME: &str = "constraint_name";
const TABLE_CATALOG: &str = "table_catalog";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const COLUMN_NAME: &str = "column_name";
const ORDINAL_POSITION: &str = "ordinal_position";
const INIT_CAPACITY: usize = 42;
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
@@ -79,11 +76,6 @@ impl InformationSchemaKeyColumnUsage {
),
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
REAL_TABLE_CATALOG,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
@@ -166,7 +158,6 @@ struct InformationSchemaKeyColumnUsageBuilder {
constraint_schema: StringVectorBuilder,
constraint_name: StringVectorBuilder,
table_catalog: StringVectorBuilder,
real_table_catalog: StringVectorBuilder,
table_schema: StringVectorBuilder,
table_name: StringVectorBuilder,
column_name: StringVectorBuilder,
@@ -188,7 +179,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
real_table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -233,7 +223,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
&predicates,
&schema_name,
"TIME INDEX",
&catalog_name,
&schema_name,
&table_name,
&column.name,
@@ -242,7 +231,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
}
if keys.contains(&idx) {
primary_constraints.push((
catalog_name.clone(),
schema_name.clone(),
table_name.clone(),
column.name.clone(),
@@ -256,14 +244,13 @@ impl InformationSchemaKeyColumnUsageBuilder {
}
}
for (i, (catalog_name, schema_name, table_name, column_name)) in
for (i, (schema_name, table_name, column_name)) in
primary_constraints.into_iter().enumerate()
{
self.add_key_column_usage(
&predicates,
&schema_name,
"PRIMARY",
&catalog_name,
&schema_name,
&table_name,
&column_name,
@@ -282,7 +269,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
predicates: &Predicates,
constraint_schema: &str,
constraint_name: &str,
table_catalog: &str,
table_schema: &str,
table_name: &str,
column_name: &str,
@@ -291,7 +277,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
let row = [
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
(CONSTRAINT_NAME, &Value::from(constraint_name)),
(REAL_TABLE_CATALOG, &Value::from(table_catalog)),
(TABLE_SCHEMA, &Value::from(table_schema)),
(TABLE_NAME, &Value::from(table_name)),
(COLUMN_NAME, &Value::from(column_name)),
@@ -306,7 +291,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.constraint_schema.push(Some(constraint_schema));
self.constraint_name.push(Some(constraint_name));
self.table_catalog.push(Some("def"));
self.real_table_catalog.push(Some(table_catalog));
self.table_schema.push(Some(table_schema));
self.table_name.push(Some(table_name));
self.column_name.push(Some(column_name));
@@ -326,7 +310,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
Arc::new(self.constraint_schema.finish()),
Arc::new(self.constraint_name.finish()),
Arc::new(self.table_catalog.finish()),
Arc::new(self.real_table_catalog.finish()),
Arc::new(self.table_schema.finish()),
Arc::new(self.table_name.finish()),
Arc::new(self.column_name.finish()),

View File

@@ -14,15 +14,13 @@
use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
use common_catalog::consts::MITO_ENGINE;
use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector};
use crate::information_schema::table_names::*;
const NO_VALUE: &str = "NO";
/// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
@@ -61,15 +59,14 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
"SAVEPOINTS",
]),
vec![
Arc::new(StringVector::from(vec![MITO_ENGINE, METRIC_ENGINE])),
Arc::new(StringVector::from(vec!["DEFAULT", "YES"])),
Arc::new(StringVector::from(vec![MITO_ENGINE])),
Arc::new(StringVector::from(vec!["DEFAULT"])),
Arc::new(StringVector::from(vec![
"Storage engine for time-series data",
"Storage engine for observability scenarios, which is adept at handling a large number of small tables, making it particularly suitable for cloud-native monitoring",
])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
Arc::new(StringVector::from(vec!["NO"])),
Arc::new(StringVector::from(vec!["NO"])),
Arc::new(StringVector::from(vec!["NO"])),
],
),

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use core::pin::pin;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -32,7 +31,7 @@ use datatypes::vectors::{
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
};
use futures::{StreamExt, TryStreamExt};
use futures::TryStreamExt;
use partition::manager::PartitionInfo;
use partition::partition::PartitionDef;
use snafu::{OptionExt, ResultExt};
@@ -241,64 +240,40 @@ impl InformationSchemaPartitionsBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_info_stream = catalog_manager
.tables(&catalog_name, &schema_name)
.await
.try_filter_map(|t| async move {
let table_info = t.table_info();
if table_info.table_type == TableType::Temporary {
Ok(None)
} else {
Ok(Some(table_info))
}
});
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
const BATCH_SIZE: usize = 128;
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();
// Split table infos into chunks
let mut table_info_chunks = pin!(table_info_stream.ready_chunks(BATCH_SIZE));
if table_info.table_type == TableType::Temporary {
continue;
}
while let Some(table_infos) = table_info_chunks.next().await {
let table_infos = table_infos.into_iter().collect::<Result<Vec<_>>>()?;
let table_ids: Vec<TableId> =
table_infos.iter().map(|info| info.ident.table_id).collect();
let mut table_partitions = if let Some(partition_manager) = &partition_manager {
let table_id = table_info.ident.table_id;
let partitions = if let Some(partition_manager) = &partition_manager {
partition_manager
.batch_find_table_partitions(&table_ids)
.find_table_partitions(table_id)
.await
.context(FindPartitionsSnafu)?
.context(FindPartitionsSnafu {
table: &table_info.name,
})?
} else {
// Current node must be a standalone instance, contains only one partition by default.
// TODO(dennis): change it when we support multi-regions for standalone.
table_ids
.into_iter()
.map(|table_id| {
(
table_id,
vec![PartitionInfo {
id: RegionId::new(table_id, 0),
partition: PartitionDef::new(vec![], vec![]),
}],
)
})
.collect()
vec![PartitionInfo {
id: RegionId::new(table_id, 0),
partition: PartitionDef::new(vec![], vec![]),
}]
};
for table_info in table_infos {
let partitions = table_partitions
.remove(&table_info.ident.table_id)
.unwrap_or(vec![]);
self.add_partitions(
&predicates,
&table_info,
&catalog_name,
&schema_name,
&table_info.name,
&partitions,
);
}
self.add_partitions(
&predicates,
&table_info,
&catalog_name,
&schema_name,
&table_info.name,
&partitions,
);
}
}

View File

@@ -199,7 +199,7 @@ impl InformationSchemaRegionPeersBuilder {
let table_routes = if let Some(partition_manager) = &partition_manager {
partition_manager
.batch_find_region_routes(&table_ids)
.find_region_routes_batch(&table_ids)
.await
.context(FindRegionRoutesSnafu)?
} else {

View File

@@ -37,8 +37,8 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
pub const CATALOG_NAME: &str = "catalog_name";
pub const SCHEMA_NAME: &str = "schema_name";
const CATALOG_NAME: &str = "catalog_name";
const SCHEMA_NAME: &str = "schema_name";
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
const INIT_CAPACITY: usize = 42;

View File

@@ -39,10 +39,10 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
pub const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name";
pub const TABLE_TYPE: &str = "table_type";
const TABLE_CATALOG: &str = "table_catalog";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const TABLE_TYPE: &str = "table_type";
const TABLE_ID: &str = "table_id";
const ENGINE: &str = "engine";
const INIT_CAPACITY: usize = 42;

View File

@@ -82,10 +82,12 @@ impl CachedMetaKvBackendBuilder {
let cache_ttl = self.cache_ttl.unwrap_or(DEFAULT_CACHE_TTL);
let cache_tti = self.cache_tti.unwrap_or(DEFAULT_CACHE_TTI);
let cache = CacheBuilder::new(cache_max_capacity)
.time_to_live(cache_ttl)
.time_to_idle(cache_tti)
.build();
let cache = Arc::new(
CacheBuilder::new(cache_max_capacity)
.time_to_live(cache_ttl)
.time_to_idle(cache_tti)
.build(),
);
let kv_backend = Arc::new(MetaKvBackend {
client: self.meta_client,
@@ -102,7 +104,7 @@ impl CachedMetaKvBackendBuilder {
}
}
pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
/// A wrapper of `MetaKvBackend` with cache support.
///
@@ -115,7 +117,7 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
/// TTL and TTI for cache.
pub struct CachedMetaKvBackend {
kv_backend: KvBackendRef,
cache: CacheBackend,
cache: CacheBackendRef,
name: String,
version: AtomicUsize,
}
@@ -315,10 +317,12 @@ impl CachedMetaKvBackend {
// only for test
#[cfg(test)]
fn wrap(kv_backend: KvBackendRef) -> Self {
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let cache = Arc::new(
CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build(),
);
let name = format!("CachedKvBackend({})", kv_backend.name());
Self {
@@ -329,7 +333,7 @@ impl CachedMetaKvBackend {
}
}
pub fn cache(&self) -> &CacheBackend {
pub fn cache(&self) -> &CacheBackendRef {
&self.cache
}
@@ -364,10 +368,6 @@ impl KvBackend for MetaKvBackend {
"MetaKvBackend"
}
fn as_any(&self) -> &dyn Any {
self
}
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
self.client
.range(req)
@@ -376,6 +376,27 @@ impl KvBackend for MetaKvBackend {
.context(ExternalSnafu)
}
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
key: kv.take_key(),
value: kv.take_value(),
}))
}
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
self.client
.batch_put(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
self.client
.put(req)
@@ -384,9 +405,17 @@ impl KvBackend for MetaKvBackend {
.context(ExternalSnafu)
}
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
self.client
.batch_put(req)
.delete_range(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
self.client
.batch_delete(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
@@ -411,33 +440,8 @@ impl KvBackend for MetaKvBackend {
.context(ExternalSnafu)
}
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
self.client
.delete_range(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
self.client
.batch_delete(req)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
}
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
key: kv.take_key(),
value: kv.take_value(),
}))
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -15,36 +15,32 @@
use std::any::Any;
use std::collections::BTreeSet;
use std::sync::{Arc, Weak};
use std::time::Duration;
use async_stream::try_stream;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
};
use common_catalog::format_full_table_name;
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
use common_meta::instruction::CacheIdent;
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
use common_meta::error::Result as MetaResult;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::table_name::TableName;
use futures_util::stream::BoxStream;
use futures_util::{StreamExt, TryStreamExt};
use moka::future::{Cache as AsyncCache, CacheBuilder};
use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*;
use table::dist_table::DistTable;
use table::metadata::TableId;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::TableRef;
use crate::error::Error::{GetTableCache, TableCacheNotGet};
use crate::error::{
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu,
Result as CatalogResult, TableMetadataManagerSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::CatalogManager;
@@ -56,74 +52,56 @@ use crate::CatalogManager;
/// comes from `SystemCatalog`, which is static and read-only.
#[derive(Clone)]
pub struct KvBackendCatalogManager {
// TODO(LFC): Maybe use a real implementation for Standalone mode.
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
// is implemented by RaftEngine. Maybe we need a cache for it?
cache_invalidator: CacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef,
/// A sub-CatalogManager that handles system tables
system_catalog: SystemCatalog,
table_cache: AsyncCache<String, TableRef>,
}
struct TableCacheInvalidator {
table_cache: AsyncCache<String, TableRef>,
}
impl TableCacheInvalidator {
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
Self { table_cache }
}
fn make_table(table_info_value: TableInfoValue) -> CatalogResult<TableRef> {
let table_info = table_info_value
.table_info
.try_into()
.context(catalog_err::InvalidTableInfoInCatalogSnafu)?;
Ok(DistTable::table(Arc::new(table_info)))
}
#[async_trait::async_trait]
impl CacheInvalidator for TableCacheInvalidator {
async fn invalidate(
&self,
_ctx: &Context,
caches: Vec<CacheIdent>,
) -> common_meta::error::Result<()> {
for cache in caches {
if let CacheIdent::TableName(table_name) = cache {
let table_cache_key = format_full_table_name(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
);
self.table_cache.invalidate(&table_cache_key).await;
}
}
Ok(())
impl CacheInvalidator for KvBackendCatalogManager {
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_id(ctx, table_id)
.await
}
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_name(ctx, table_name)
.await
}
}
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
const DEFAULT_CACHED_CATALOG: u64 = 128;
impl KvBackendCatalogManager {
pub async fn new(
backend: KvBackendRef,
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
) -> Arc<Self> {
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
.time_to_live(TABLE_CACHE_TTL)
.time_to_idle(TABLE_CACHE_TTI)
.build();
multi_cache_invalidator
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
.await;
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
Arc::new_cyclic(|me| Self {
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
cache_invalidator,
system_catalog: SystemCatalog {
catalog_manager: me.clone(),
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
catalog_cache: Cache::new(DEFAULT_CACHED_CATALOG),
information_schema_provider: Arc::new(InformationSchemaProvider::new(
DEFAULT_CATALOG_NAME.to_string(),
// The catalog name is not used in system_catalog, so let it empty
String::default(),
me.clone(),
)),
},
table_cache,
})
}
@@ -142,11 +120,12 @@ impl CatalogManager for KvBackendCatalogManager {
self
}
async fn catalog_names(&self) -> Result<Vec<String>> {
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.catalog_manager()
.catalog_names();
.catalog_names()
.await;
let keys = stream
.try_collect::<Vec<_>>()
@@ -157,11 +136,12 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys)
}
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.schema_manager()
.schema_names(catalog);
.schema_names(catalog)
.await;
let mut keys = stream
.try_collect::<BTreeSet<_>>()
.await
@@ -173,11 +153,12 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys.into_iter().collect())
}
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.table_name_manager()
.tables(catalog, schema);
.tables(catalog, schema)
.await;
let mut tables = stream
.try_collect::<Vec<_>>()
.await
@@ -191,7 +172,7 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(tables.into_iter().collect())
}
async fn catalog_exists(&self, catalog: &str) -> Result<bool> {
async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
self.table_metadata_manager
.catalog_manager()
.exists(CatalogNameKey::new(catalog))
@@ -199,7 +180,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)
}
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
if self.system_catalog.schema_exist(schema) {
return Ok(true);
}
@@ -211,7 +192,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)
}
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
if self.system_catalog.table_exist(schema, table) {
return Ok(true);
}
@@ -230,64 +211,41 @@ impl CatalogManager for KvBackendCatalogManager {
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
) -> CatalogResult<Option<TableRef>> {
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
return Ok(Some(table));
}
let init = async {
let table_name_key = TableNameKey::new(catalog, schema, table_name);
let Some(table_name_value) = self
.table_metadata_manager
.table_name_manager()
.get(table_name_key)
.await
.context(TableMetadataManagerSnafu)?
else {
return TableCacheNotGetSnafu {
key: table_name_key.to_string(),
}
.fail();
};
let table_id = table_name_value.table_id();
let Some(table_info_value) = self
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await
.context(TableMetadataManagerSnafu)?
.map(|v| v.into_inner())
else {
return TableCacheNotGetSnafu {
key: table_name_key.to_string(),
}
.fail();
};
build_table(table_info_value)
};
match self
.table_cache
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
let key = TableNameKey::new(catalog, schema, table_name);
let Some(table_name_value) = self
.table_metadata_manager
.table_name_manager()
.get(key)
.await
{
Ok(table) => Ok(Some(table)),
Err(err) => match err.as_ref() {
TableCacheNotGet { .. } => Ok(None),
_ => Err(err),
},
}
.map_err(|err| GetTableCache {
err_msg: err.to_string(),
})
.context(TableMetadataManagerSnafu)?
else {
return Ok(None);
};
let table_id = table_name_value.table_id();
let Some(table_info_value) = self
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await
.context(TableMetadataManagerSnafu)?
.map(|v| v.into_inner())
else {
return Ok(None);
};
make_table(table_info_value).map(Some)
}
async fn tables<'a>(
&'a self,
catalog: &'a str,
schema: &'a str,
) -> BoxStream<'a, Result<TableRef>> {
) -> BoxStream<'a, CatalogResult<TableRef>> {
let sys_tables = try_stream!({
// System tables
let sys_table_names = self.system_catalog.table_names(schema);
@@ -302,6 +260,7 @@ impl CatalogManager for KvBackendCatalogManager {
.table_metadata_manager
.table_name_manager()
.tables(catalog, schema)
.await
.map_ok(|(_, v)| v.table_id());
const BATCH_SIZE: usize = 128;
let user_tables = try_stream!({
@@ -311,7 +270,7 @@ impl CatalogManager for KvBackendCatalogManager {
while let Some(table_ids) = table_id_chunks.next().await {
let table_ids = table_ids
.into_iter()
.collect::<std::result::Result<Vec<_>, _>>()
.collect::<Result<Vec<_>, _>>()
.map_err(BoxedError::new)
.context(ListTablesSnafu { catalog, schema })?;
@@ -323,7 +282,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)?;
for table_info_value in table_info_values.into_values() {
yield build_table(table_info_value)?;
yield make_table(table_info_value)?;
}
}
});
@@ -332,14 +291,6 @@ impl CatalogManager for KvBackendCatalogManager {
}
}
fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
let table_info = table_info_value
.table_info
.try_into()
.context(InvalidTableInfoInCatalogSnafu)?;
Ok(DistTable::table(Arc::new(table_info)))
}
// TODO: This struct can hold a static map of all system tables when
// the upper layer (e.g., procedure) can inform the catalog manager
// a new catalog is created.

View File

@@ -19,10 +19,10 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::CreateTableExpr;
use futures::future::BoxFuture;
use futures_util::stream::BoxStream;
use table::metadata::TableId;
use table::requests::CreateTableRequest;
use table::TableRef;
use crate::error::Result;
@@ -75,9 +75,9 @@ pub type OpenSystemTableHook =
/// Register system table request:
/// - When system table is already created and registered, the hook will be called
/// with table ref after opening the system table
/// - When system table is not exists, create and register the table by `create_table_expr` and calls `open_hook` with the created table.
/// - When system table is not exists, create and register the table by create_table_request and calls open_hook with the created table.
pub struct RegisterSystemTableRequest {
pub create_table_expr: CreateTableExpr,
pub create_table_request: CreateTableRequest,
pub open_hook: Option<OpenSystemTableHook>,
}

View File

@@ -7,15 +7,13 @@ license.workspace = true
[features]
testing = []
[lints]
workspace = true
[dependencies]
api.workspace = true
arc-swap = "1.6"
arrow-flight.workspace = true
async-stream.workspace = true
async-trait.workspace = true
common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
common-grpc.workspace = true
@@ -24,6 +22,10 @@ common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
datafusion.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
enum_dispatch = "0.3"
futures-util.workspace = true
lazy_static.workspace = true
@@ -32,7 +34,7 @@ parking_lot = "0.12"
prometheus.workspace = true
prost.workspace = true
rand.workspace = true
serde_json.workspace = true
session.workspace = true
snafu.workspace = true
tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] }

View File

@@ -307,7 +307,7 @@ impl Database {
reason: "Expect 'AffectedRows' Flight messages to be the one and the only!"
}
);
Ok(Output::new_with_affected_rows(rows))
Ok(Output::AffectedRows(rows))
}
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
IllegalFlightMessagesSnafu {
@@ -340,7 +340,7 @@ impl Database {
output_ordering: None,
metrics: Default::default(),
};
Ok(Output::new_with_stream(Box::pin(record_batch_stream)))
Ok(Output::Stream(Box::pin(record_batch_stream)))
}
}
}

View File

@@ -134,17 +134,10 @@ impl From<Status> for Error {
impl Error {
pub fn should_retry(&self) -> bool {
// TODO(weny): figure out each case of these codes.
matches!(
!matches!(
self,
Self::RegionServer {
code: Code::Cancelled,
..
} | Self::RegionServer {
code: Code::DeadlineExceeded,
..
} | Self::RegionServer {
code: Code::Unavailable,
code: Code::InvalidArgument,
..
}
)

View File

@@ -26,7 +26,7 @@ use api::v1::greptime_response::Response;
use api::v1::{AffectedRows, GreptimeResponse};
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::status_code::StatusCode;
pub use common_query::{Output, OutputData, OutputMeta};
pub use common_query::Output;
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt;

View File

@@ -14,7 +14,7 @@
use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use api::v1::ResponseHeader;
use arc_swap::ArcSwapOption;
use arrow_flight::Ticket;
@@ -23,7 +23,7 @@ use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::datanode_manager::{Datanode, HandleResponse};
use common_meta::datanode_manager::{AffectedRows, Datanode};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
@@ -46,7 +46,7 @@ pub struct RegionRequester {
#[async_trait]
impl Datanode for RegionRequester {
async fn handle(&self, request: RegionRequest) -> MetaResult<HandleResponse> {
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
self.handle_inner(request).await.map_err(|err| {
if err.should_retry() {
meta_error::Error::RetryLater {
@@ -123,8 +123,8 @@ impl RegionRequester {
.fail();
};
let metrics = Arc::new(ArcSwapOption::from(None));
let metrics_ref = metrics.clone();
let metrics_str = Arc::new(ArcSwapOption::from(None));
let ref_str = metrics_str.clone();
let tracing_context = TracingContext::from_current_span();
@@ -140,8 +140,7 @@ impl RegionRequester {
match flight_message {
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
FlightMessage::Metrics(s) => {
let m = serde_json::from_str(&s).ok().map(Arc::new);
metrics_ref.swap(m);
ref_str.swap(Some(Arc::new(s)));
break;
}
_ => {
@@ -160,12 +159,12 @@ impl RegionRequester {
schema,
stream,
output_ordering: None,
metrics,
metrics: metrics_str,
};
Ok(Box::pin(record_batch_stream))
}
async fn handle_inner(&self, request: RegionRequest) -> Result<HandleResponse> {
async fn handle_inner(&self, request: RegionRequest) -> Result<AffectedRows> {
let request_type = request
.body
.as_ref()
@@ -178,7 +177,10 @@ impl RegionRequester {
let mut client = self.client.raw_region_client()?;
let response = client
let RegionResponse {
header,
affected_rows,
} = client
.handle(request)
.await
.map_err(|e| {
@@ -192,20 +194,19 @@ impl RegionRequester {
})?
.into_inner();
check_response_header(&response.header)?;
check_response_header(header)?;
Ok(HandleResponse::from_region_response(response))
Ok(affected_rows)
}
pub async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
pub async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
self.handle_inner(request).await
}
}
pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> {
pub fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
let status = header
.as_ref()
.and_then(|header| header.status.as_ref())
.and_then(|header| header.status)
.context(IllegalDatabaseResponseSnafu {
err_msg: "either response header or status is missing",
})?;
@@ -219,7 +220,7 @@ pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> {
})?;
ServerSnafu {
code,
msg: status.err_msg.clone(),
msg: status.err_msg,
}
.fail()
}
@@ -234,19 +235,19 @@ mod test {
#[test]
fn test_check_response_header() {
let result = check_response_header(&None);
let result = check_response_header(None);
assert!(matches!(
result.unwrap_err(),
IllegalDatabaseResponse { .. }
));
let result = check_response_header(&Some(ResponseHeader { status: None }));
let result = check_response_header(Some(ResponseHeader { status: None }));
assert!(matches!(
result.unwrap_err(),
IllegalDatabaseResponse { .. }
));
let result = check_response_header(&Some(ResponseHeader {
let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus {
status_code: StatusCode::Success as u32,
err_msg: String::default(),
@@ -254,7 +255,7 @@ mod test {
}));
assert!(result.is_ok());
let result = check_response_header(&Some(ResponseHeader {
let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus {
status_code: u32::MAX,
err_msg: String::default(),
@@ -265,7 +266,7 @@ mod test {
IllegalDatabaseResponse { .. }
));
let result = check_response_header(&Some(ResponseHeader {
let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus {
status_code: StatusCode::Internal as u32,
err_msg: "blabla".to_string(),

View File

@@ -12,10 +12,8 @@ path = "src/bin/greptime.rs"
[features]
tokio-console = ["common-telemetry/tokio-console"]
[lints]
workspace = true
[dependencies]
anymap = "1.0.0-beta.2"
async-trait.workspace = true
auth.workspace = true
catalog.workspace = true
@@ -51,6 +49,7 @@ meta-client.workspace = true
meta-srv.workspace = true
mito2.workspace = true
nu-ansi-term = "0.46"
partition.workspace = true
plugins.workspace = true
prometheus.workspace = true
prost.workspace = true

View File

@@ -13,8 +13,5 @@
// limitations under the License.
fn main() {
// Trigger this script if the git branch/commit changes
println!("cargo:rerun-if-changed=.git/refs/heads");
common_version::setup_build_info();
}

View File

@@ -62,9 +62,7 @@ pub struct BenchTableMetadataCommand {
impl BenchTableMetadataCommand {
pub async fn build(&self) -> Result<Instance> {
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
.await
.unwrap();
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));

View File

@@ -106,15 +106,9 @@ impl TableMetadataBencher {
.await
.unwrap();
let start = Instant::now();
let table_info = table_info.unwrap();
let table_id = table_info.table_info.ident.table_id;
let _ = self
.table_metadata_manager
.delete_table_metadata(
table_id,
&table_info.table_name(),
table_route.unwrap().region_routes().unwrap(),
)
.delete_table_metadata(&table_info.unwrap(), &table_route.unwrap())
.await;
start.elapsed()
},

View File

@@ -19,7 +19,8 @@ use async_trait::async_trait;
use clap::{Parser, ValueEnum};
use client::api::v1::auth_header::AuthScheme;
use client::api::v1::Basic;
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME};
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util::collect;
use common_telemetry::{debug, error, info, warn};
use datatypes::scalars::ScalarVector;
@@ -141,7 +142,7 @@ impl Export {
.with_context(|_| RequestDatabaseSnafu {
sql: "show databases".to_string(),
})?;
let OutputData::Stream(stream) = result.data else {
let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()?
};
let record_batch = collect(stream)
@@ -182,7 +183,7 @@ impl Export {
.sql(&sql)
.await
.with_context(|_| RequestDatabaseSnafu { sql })?;
let OutputData::Stream(stream) = result.data else {
let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()?
};
let Some(record_batch) = collect(stream)
@@ -234,7 +235,7 @@ impl Export {
.sql(&sql)
.await
.with_context(|_| RequestDatabaseSnafu { sql })?;
let OutputData::Stream(stream) = result.data else {
let Output::Stream(stream) = result else {
NotDataFromOutputSnafu.fail()?
};
let record_batch = collect(stream)

View File

@@ -19,10 +19,9 @@ use std::time::Instant;
use catalog::kvbackend::{
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_error::ext::ErrorExt;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
@@ -185,15 +184,15 @@ impl Repl {
}
.context(RequestDatabaseSnafu { sql: &sql })?;
let either = match output.data {
OutputData::Stream(s) => {
let either = match output {
Output::Stream(s) => {
let x = RecordBatches::try_collect(s)
.await
.context(CollectRecordBatchesSnafu)?;
Either::Left(x)
}
OutputData::RecordBatches(x) => Either::Left(x),
OutputData::AffectedRows(rows) => Either::Right(rows),
Output::RecordBatches(x) => Either::Left(x),
Output::AffectedRows(rows) => Either::Right(rows),
};
let end = Instant::now();
@@ -253,17 +252,14 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_list =
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await;
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,
None,
None,
None,
false,
plugins.clone(),
));

View File

@@ -70,7 +70,7 @@ impl UpgradeCommand {
etcd_addr: &self.etcd_addr,
})?;
let tool = MigrateTableMetadata {
etcd_store: EtcdStore::with_etcd_client(client, 128),
etcd_store: EtcdStore::with_etcd_client(client),
dryrun: self.dryrun,
skip_catalog_keys: self.skip_catalog_keys,
skip_table_global_keys: self.skip_table_global_keys,

View File

@@ -43,10 +43,6 @@ impl Instance {
pub fn datanode_mut(&mut self) -> &mut Datanode {
&mut self.datanode
}
pub fn datanode(&self) -> &Datanode {
&self.datanode
}
}
#[async_trait]
@@ -239,7 +235,6 @@ impl StartCommand {
.with_default_grpc_server(&datanode.region_server())
.enable_http_service()
.build()
.await
.context(StartDatanodeSnafu)?;
datanode.setup_services(services);

View File

@@ -16,10 +16,9 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager};
use catalog::kvbackend::CachedMetaKvBackendBuilder;
use clap::Parser;
use client::client_manager::DatanodeClients;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::logging;
@@ -44,17 +43,13 @@ pub struct Instance {
}
impl Instance {
pub fn new(frontend: FeInstance) -> Self {
fn new(frontend: FeInstance) -> Self {
Self { frontend }
}
pub fn mut_inner(&mut self) -> &mut FeInstance {
&mut self.frontend
}
pub fn inner(&self) -> &FeInstance {
&self.frontend
}
}
#[async_trait]
@@ -248,19 +243,11 @@ impl StartCommand {
.cache_tti(cache_tti)
.build();
let cached_meta_backend = Arc::new(cached_meta_backend);
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_manager = KvBackendCatalogManager::new(
cached_meta_backend.clone(),
multi_cache_invalidator.clone(),
)
.await;
let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(InvalidateTableCacheHandler::new(
multi_cache_invalidator.clone(),
cached_meta_backend.clone(),
)),
]);
@@ -272,12 +259,11 @@ impl StartCommand {
let mut instance = FrontendBuilder::new(
cached_meta_backend.clone(),
catalog_manager,
Arc::new(DatanodeClients::default()),
meta_client,
)
.with_cache_invalidator(cached_meta_backend)
.with_plugin(plugins.clone())
.with_cache_invalidator(multi_cache_invalidator)
.with_heartbeat_task(heartbeat_task)
.try_build()
.await
@@ -285,7 +271,6 @@ impl StartCommand {
let servers = Services::new(opts.clone(), Arc::new(instance.clone()), plugins)
.build()
.await
.context(StartFrontendSnafu)?;
instance
.build_servers(opts, servers)

View File

@@ -32,11 +32,11 @@ lazy_static::lazy_static! {
}
#[async_trait]
pub trait App: Send {
pub trait App {
fn name(&self) -> &str;
/// A hook for implementor to make something happened before actual startup. Defaults to no-op.
async fn pre_start(&mut self) -> error::Result<()> {
fn pre_start(&mut self) -> error::Result<()> {
Ok(())
}
@@ -46,21 +46,24 @@ pub trait App: Send {
}
pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
info!("Starting app: {}", app.name());
let name = app.name().to_string();
app.pre_start().await?;
app.pre_start()?;
app.start().await?;
if let Err(e) = tokio::signal::ctrl_c().await {
error!("Failed to listen for ctrl-c signal: {}", e);
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
// the underlying system. So we stop the app instead of running nonetheless to let people
// investigate the issue.
tokio::select! {
result = app.start() => {
if let Err(err) = result {
error!(err; "Failed to start app {name}!");
}
}
_ = tokio::signal::ctrl_c() => {
if let Err(err) = app.stop().await {
error!(err; "Failed to stop app {name}!");
}
info!("Goodbye!");
}
}
app.stop().await?;
info!("Goodbye!");
Ok(())
}

View File

@@ -117,12 +117,10 @@ struct StartCommand {
/// The working home directory of this metasrv instance.
#[clap(long)]
data_home: Option<String>,
/// If it's not empty, the metasrv will store all data with this key prefix.
#[clap(long, default_value = "")]
store_key_prefix: String,
/// The max operations per txn
#[clap(long)]
max_txn_ops: Option<usize>,
}
impl StartCommand {
@@ -183,10 +181,6 @@ impl StartCommand {
opts.store_key_prefix = self.store_key_prefix.clone()
}
if let Some(max_txn_ops) = self.max_txn_ops {
opts.max_txn_ops = max_txn_ops;
}
// Disable dashboard in metasrv.
opts.http.disable_dashboard = true;
@@ -218,7 +212,6 @@ impl StartCommand {
mod tests {
use std::io::Write;
use common_base::readable_size::ReadableSize;
use common_test_util::temp_dir::create_named_temp_file;
use meta_srv::selector::SelectorType;
@@ -298,10 +291,6 @@ mod tests {
.first_heartbeat_estimate
.as_millis()
);
assert_eq!(
options.procedure.max_metadata_value_size,
Some(ReadableSize::kb(1500))
);
}
#[test]

View File

@@ -16,14 +16,13 @@ use std::sync::Arc;
use std::{fs, path};
use async_trait::async_trait;
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_config::{metadata_store_dir, KvBackendConfig};
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl::ProcedureExecutorRef;
use common_meta::ddl::table_meta::TableMetadataAllocator;
use common_meta::ddl::DdlTaskExecutorRef;
use common_meta::ddl_manager::DdlManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
@@ -400,10 +399,6 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
let catalog_manager =
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
@@ -424,34 +419,28 @@ impl StartCommand {
let table_metadata_manager =
Self::create_table_metadata_manager(kv_backend.clone()).await?;
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
let table_meta_allocator = TableMetadataAllocator::new(
table_id_sequence,
wal_options_allocator.clone(),
));
table_metadata_manager.clone(),
);
let ddl_task_executor = Self::create_ddl_task_executor(
table_metadata_manager,
procedure_manager.clone(),
datanode_manager.clone(),
multi_cache_invalidator,
table_meta_allocator,
)
.await?;
let mut frontend = FrontendBuilder::new(
kv_backend,
catalog_manager,
datanode_manager,
ddl_task_executor,
)
.with_plugin(fe_plugins.clone())
.try_build()
.await
.context(StartFrontendSnafu)?;
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
.with_plugin(fe_plugins.clone())
.try_build()
.await
.context(StartFrontendSnafu)?;
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
.await
.context(StartFrontendSnafu)?;
frontend
.build_servers(fe_opts, servers)
@@ -469,23 +458,21 @@ impl StartCommand {
table_metadata_manager: TableMetadataManagerRef,
procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_meta_allocator: TableMetadataAllocatorRef,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
table_meta_allocator: TableMetadataAllocator,
) -> Result<DdlTaskExecutorRef> {
let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
DdlManager::try_new(
procedure_manager,
datanode_manager,
cache_invalidator,
Arc::new(DummyCacheInvalidator),
table_metadata_manager,
table_meta_allocator,
Arc::new(MemoryRegionKeeper::default()),
true,
)
.context(InitDdlManagerSnafu)?,
);
Ok(procedure_executor)
Ok(ddl_task_executor)
}
pub async fn create_table_metadata_manager(

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
anymap = "1.0.0-beta.2"
bitvec = "1.0"

View File

@@ -21,8 +21,6 @@ pub mod readable_size;
use core::any::Any;
use std::sync::{Arc, Mutex, MutexGuard};
pub type AffectedRows = usize;
pub use bit_vec::BitVec;
/// [`Plugins`] is a wrapper of Arc contents.

View File

@@ -1,6 +1,20 @@
// Copyright (c) 2017-present, PingCAP, Inc. Licensed under Apache-2.0.
// This file is copied from https://github.com/tikv/raft-engine/blob/0.3.0/src/util.rs
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs
use std::fmt::{self, Debug, Display, Write};
use std::ops::{Div, Mul};

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
common-error.workspace = true
common-macro.workspace = true

View File

@@ -55,10 +55,10 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
/// schema name
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
/// `<catalog>` and `<schema>`.
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
match parse_optional_catalog_and_schema_from_db_string(db) {
(Some(catalog), schema) => (catalog, schema),
(None, schema) => (DEFAULT_CATALOG_NAME.to_string(), schema),
(None, schema) => (DEFAULT_CATALOG_NAME, schema),
}
}
@@ -66,12 +66,12 @@ pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
///
/// Similar to [`parse_catalog_and_schema_from_db_string`] but returns an optional
/// catalog if it's not provided in the database name.
pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<String>, String) {
pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<&str>, &str) {
let parts = db.splitn(2, '-').collect::<Vec<&str>>();
if parts.len() == 2 {
(Some(parts[0].to_lowercase()), parts[1].to_lowercase())
(Some(parts[0]), parts[1])
} else {
(None, db.to_lowercase())
(None, db)
}
}
@@ -88,37 +88,32 @@ mod tests {
#[test]
fn test_parse_catalog_and_schema() {
assert_eq!(
(DEFAULT_CATALOG_NAME.to_string(), "fullschema".to_string()),
(DEFAULT_CATALOG_NAME, "fullschema"),
parse_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
("catalog".to_string(), "schema".to_string()),
("catalog", "schema"),
parse_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
("catalog".to_string(), "schema1-schema2".to_string()),
("catalog", "schema1-schema2"),
parse_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
assert_eq!(
(None, "fullschema".to_string()),
(None, "fullschema"),
parse_optional_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
(Some("catalog".to_string()), "schema".to_string()),
(Some("catalog"), "schema"),
parse_optional_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
(Some("catalog".to_string()), "schema".to_string()),
parse_optional_catalog_and_schema_from_db_string("CATALOG-SCHEMA")
);
assert_eq!(
(Some("catalog".to_string()), "schema1-schema2".to_string()),
(Some("catalog"), "schema1-schema2"),
parse_optional_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
}

View File

@@ -4,11 +4,9 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
common-base.workspace = true
humantime-serde.workspace = true
num_cpus.workspace = true
serde.workspace = true
sysinfo.workspace = true

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
arrow.workspace = true
arrow-schema.workspace = true

View File

@@ -28,15 +28,12 @@ const REGION: &str = "region";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "enable_virtual_host_style";
pub fn is_supported_in_s3(key: &str) -> bool {
[
ENDPOINT,
ACCESS_KEY_ID,
SECRET_ACCESS_KEY,
SESSION_TOKEN,
REGION,
ENABLE_VIRTUAL_HOST_STYLE,
]
.contains(&key)
key == ENDPOINT
|| key == ACCESS_KEY_ID
|| key == SECRET_ACCESS_KEY
|| key == SESSION_TOKEN
|| key == REGION
|| key == ENABLE_VIRTUAL_HOST_STYLE
}
pub fn build_s3_backend(

View File

@@ -4,10 +4,8 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
arrow.workspace = true
bigdecimal.workspace = true
common-error.workspace = true
common-macro.workspace = true

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
snafu.workspace = true
strum.workspace = true

View File

@@ -19,9 +19,7 @@ pub mod format;
pub mod mock;
pub mod status_code;
pub use snafu;
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
// please define in `src/servers/src/http/header.rs`.
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
pub use snafu;

View File

@@ -4,18 +4,13 @@ edition.workspace = true
version.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
api.workspace = true
arc-swap = "1.0"
async-trait.workspace = true
common-base.workspace = true
common-catalog.workspace = true
chrono-tz = "0.6"
common-error.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-query.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
@@ -23,16 +18,14 @@ common-time.workspace = true
common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
libc = "0.2"
num = "0.4"
num-traits = "0.2"
once_cell.workspace = true
paste = "1.0"
serde.workspace = true
serde_json.workspace = true
session.workspace = true
snafu.workspace = true
statrs = "0.16"
store-api.workspace = true
table.workspace = true
[dev-dependencies]

View File

@@ -30,17 +30,6 @@ pub struct FunctionContext {
pub state: Arc<FunctionState>,
}
impl FunctionContext {
/// Create a mock [`FunctionContext`] for test.
#[cfg(any(test, feature = "testing"))]
pub fn mock() -> Self {
Self {
query_ctx: QueryContextBuilder::default().build(),
state: Arc::new(FunctionState::mock()),
}
}
}
impl Default for FunctionContext {
fn default() -> Self {
Self {

View File

@@ -21,7 +21,6 @@ use once_cell::sync::Lazy;
use crate::function::FunctionRef;
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
use crate::scalars::date::DateFunction;
use crate::scalars::expression::ExpressionFunction;
use crate::scalars::math::MathFunction;
use crate::scalars::numpy::NumpyFunction;
use crate::scalars::timestamp::TimestampFunction;
@@ -81,7 +80,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
NumpyFunction::register(&function_registry);
TimestampFunction::register(&function_registry);
DateFunction::register(&function_registry);
ExpressionFunction::register(&function_registry);
// Aggregate functions
AggregateFunctions::register(&function_registry);

View File

@@ -13,58 +13,42 @@
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::ProcedureStateResponse;
use async_trait::async_trait;
use common_base::AffectedRows;
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
use common_query::error::Result;
use common_query::Output;
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
use table::requests::{DeleteRequest, InsertRequest};
pub type AffectedRows = usize;
/// A trait for handling table mutations in `QueryEngine`.
#[async_trait]
pub trait TableMutationHandler: Send + Sync {
/// Inserts rows into the table.
async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<Output>;
async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
/// Delete rows from the table.
async fn delete(&self, request: DeleteRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
/// Trigger a flush task for table.
async fn flush(&self, request: FlushTableRequest, ctx: QueryContextRef)
-> Result<AffectedRows>;
/// Trigger a compaction task for table.
async fn compact(
/// Migrate a region from source peer to target peer, returns the procedure id if success.
async fn migrate_region(
&self,
request: CompactTableRequest,
ctx: QueryContextRef,
) -> Result<AffectedRows>;
/// Trigger a flush task for a table region.
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
-> Result<AffectedRows>;
/// Trigger a compaction task for a table region.
async fn compact_region(
&self,
region_id: RegionId,
ctx: QueryContextRef,
) -> Result<AffectedRows>;
region_id: u64,
from_peer: u64,
to_peer: u64,
replay_timeout: Duration,
) -> Result<String>;
}
/// A trait for handling procedure service requests in `QueryEngine`.
/// A trait for handling meta service requests in `QueryEngine`.
#[async_trait]
pub trait ProcedureServiceHandler: Send + Sync {
/// Migrate a region from source peer to target peer, returns the procedure id if success.
async fn migrate_region(&self, request: MigrateRegionRequest) -> Result<Option<String>>;
pub trait MetaServiceHandler: Send + Sync {
/// Query the procedure' state by its id
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
}
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
pub type ProcedureServiceHandlerRef = Arc<dyn ProcedureServiceHandler>;
pub type MetaServiceHandlerRef = Arc<dyn MetaServiceHandler>;

View File

@@ -12,12 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_query::error::{InvalidInputTypeSnafu, Result};
use common_query::prelude::{Signature, TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::cast::cast;
use datatypes::value::ValueRef;
use snafu::ResultExt;
/// Create a function signature with oneof signatures of interleaving two arguments.
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
@@ -31,15 +27,3 @@ pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>)
Signature::one_of(sigs, Volatility::Immutable)
}
/// Cast a [`ValueRef`] to u64, returns `None` if fails
pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
cast((*value).into(), &ConcreteDataType::uint64_datatype())
.context(InvalidInputTypeSnafu {
err_msg: format!(
"Failed to cast input into uint64, actual type: {:#?}",
value.data_type(),
),
})
.map(|v| v.as_u64())
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod macros;
pub mod scalars;
mod system;
mod table;

View File

@@ -1,27 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Ensure current function is invokded under `greptime` catalog.
#[macro_export]
macro_rules! ensure_greptime {
($func_ctx: expr) => {{
use common_catalog::consts::DEFAULT_CATALOG_NAME;
snafu::ensure!(
$func_ctx.query_ctx.current_catalog() == DEFAULT_CATALOG_NAME,
common_query::error::PermissionDeniedSnafu {
err_msg: format!("current catalog is not {DEFAULT_CATALOG_NAME}")
}
);
}};
}

View File

@@ -14,22 +14,8 @@
mod binary;
mod ctx;
mod is_null;
mod unary;
use std::sync::Arc;
pub use binary::scalar_binary_op;
pub use ctx::EvalContext;
pub use unary::scalar_unary_op;
use crate::function_registry::FunctionRegistry;
use crate::scalars::expression::is_null::IsNullFunction;
pub(crate) struct ExpressionFunction;
impl ExpressionFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(IsNullFunction));
}
}

Some files were not shown because too many files have changed in this diff Show More