mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 13:52:59 +00:00
Compare commits
197 Commits
feat/merge
...
v0.8.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a5bb698a9 | ||
|
|
18d676802a | ||
|
|
93da45f678 | ||
|
|
7a19f66be0 | ||
|
|
500f9f10fc | ||
|
|
f49cd0ca18 | ||
|
|
ffbb132f27 | ||
|
|
14267c2aed | ||
|
|
77cc7216af | ||
|
|
63681f0e4d | ||
|
|
06a90527a3 | ||
|
|
d5ba2fcf9d | ||
|
|
e3b37ee2c9 | ||
|
|
5d7ce08358 | ||
|
|
92a8e863de | ||
|
|
9428cb8e7c | ||
|
|
5addb7d75a | ||
|
|
623c930736 | ||
|
|
5fa01e7a96 | ||
|
|
922b1a9b66 | ||
|
|
653697f1d5 | ||
|
|
83643eb195 | ||
|
|
d83279567b | ||
|
|
150454b1fd | ||
|
|
58c7858cd4 | ||
|
|
dd18d8c97b | ||
|
|
175929426a | ||
|
|
8f9676aad2 | ||
|
|
74565151e9 | ||
|
|
83c1b485ea | ||
|
|
c2dd1136fe | ||
|
|
7c1c6e8b8c | ||
|
|
62d8bbb10c | ||
|
|
bf14d33962 | ||
|
|
0f1747b80d | ||
|
|
992c7ec71b | ||
|
|
2ad0b24efa | ||
|
|
2b2fd80bf4 | ||
|
|
24886b9530 | ||
|
|
8345f1753c | ||
|
|
3420a010e6 | ||
|
|
9f020aa414 | ||
|
|
c9ac72e7f8 | ||
|
|
86fb9d8ac7 | ||
|
|
1f0fc40287 | ||
|
|
8b7a5aaa4a | ||
|
|
856a4e1e4f | ||
|
|
39b69f1e3b | ||
|
|
bbcdb28b7c | ||
|
|
6377982501 | ||
|
|
ddbcff68dd | ||
|
|
5b315c2d40 | ||
|
|
9816d2a08b | ||
|
|
a99d6eb3f9 | ||
|
|
2c115bc22a | ||
|
|
641592644d | ||
|
|
fa0f3555d4 | ||
|
|
3cad844acd | ||
|
|
cf25cf984b | ||
|
|
3acd5bfad0 | ||
|
|
343525dab8 | ||
|
|
0afac58e4d | ||
|
|
393ea44de0 | ||
|
|
44731fd653 | ||
|
|
d36a5a74d3 | ||
|
|
74862f8c3f | ||
|
|
a52aedec5b | ||
|
|
b6fac619a6 | ||
|
|
a29e7ebb7d | ||
|
|
8ca9e01455 | ||
|
|
3a326775ee | ||
|
|
5ad3b7984e | ||
|
|
4fc27bdc75 | ||
|
|
e3c82568e5 | ||
|
|
61f0703af8 | ||
|
|
b85d7bb575 | ||
|
|
d334d74986 | ||
|
|
5ca8521e87 | ||
|
|
e4333969b4 | ||
|
|
b55905cf66 | ||
|
|
fb4da05f25 | ||
|
|
904484b525 | ||
|
|
cafb4708ce | ||
|
|
7c895e2605 | ||
|
|
9afe327bca | ||
|
|
58bd065c6b | ||
|
|
9aa8f756ab | ||
|
|
7639c227ca | ||
|
|
1255c1fc9e | ||
|
|
06dcd0f6ed | ||
|
|
0a4444a43a | ||
|
|
b7ac8d6aa8 | ||
|
|
e767f37241 | ||
|
|
da098f5568 | ||
|
|
aa953dcc34 | ||
|
|
aa125a50f9 | ||
|
|
d8939eb891 | ||
|
|
0bb949787c | ||
|
|
8c37c3fc0f | ||
|
|
21ff3620be | ||
|
|
aeca0d8e8a | ||
|
|
a309cd018a | ||
|
|
3ee53360ee | ||
|
|
352bd7b6fd | ||
|
|
3f3ef2e7af | ||
|
|
a218f12bd9 | ||
|
|
c884c56151 | ||
|
|
9ec288cab9 | ||
|
|
1f1491e429 | ||
|
|
c52bc613e0 | ||
|
|
a9d42f7b87 | ||
|
|
86ce2d8713 | ||
|
|
5d644c0b7f | ||
|
|
020635063c | ||
|
|
97cbfcfe23 | ||
|
|
7183fa198c | ||
|
|
02b18fbca1 | ||
|
|
7b1c3503d0 | ||
|
|
6fd2ff49d5 | ||
|
|
53f2a5846c | ||
|
|
49157868f9 | ||
|
|
ae2c18e1cf | ||
|
|
e6819412c5 | ||
|
|
2a675e0794 | ||
|
|
0edf1bbacc | ||
|
|
8609977b52 | ||
|
|
2d975e4f22 | ||
|
|
00cbbc97ae | ||
|
|
7d30c2484b | ||
|
|
376409b857 | ||
|
|
d4a54a085b | ||
|
|
c1a370649e | ||
|
|
3cad9d989d | ||
|
|
a50025269f | ||
|
|
a3533c4ea0 | ||
|
|
3413fc0781 | ||
|
|
dc205a2c5d | ||
|
|
a0a8e8c587 | ||
|
|
c3c80b92c8 | ||
|
|
a8cbec824c | ||
|
|
33d894c1f0 | ||
|
|
7942b8fae9 | ||
|
|
b97f957489 | ||
|
|
f3d69e9563 | ||
|
|
4b36c285f1 | ||
|
|
dbb1ce1a9b | ||
|
|
3544c9334c | ||
|
|
492a00969d | ||
|
|
206666bff6 | ||
|
|
7453d9779d | ||
|
|
8e3e0fd528 | ||
|
|
b1e290f959 | ||
|
|
d8dc93fccc | ||
|
|
3887d207b6 | ||
|
|
e859f0e67d | ||
|
|
ce397ebcc6 | ||
|
|
26011ed0b6 | ||
|
|
8087822ab2 | ||
|
|
e481f073f5 | ||
|
|
606309f49a | ||
|
|
8059b95e37 | ||
|
|
afe4633320 | ||
|
|
abbfd23d4b | ||
|
|
1df64f294b | ||
|
|
a6564e72b4 | ||
|
|
1f1d1b4f57 | ||
|
|
b144836935 | ||
|
|
93d9f48dd7 | ||
|
|
90e9b69035 | ||
|
|
2035e7bf4c | ||
|
|
7341f23019 | ||
|
|
41ee0cdd5a | ||
|
|
578dd8f87a | ||
|
|
1dc4fec662 | ||
|
|
f26505b625 | ||
|
|
8289b0dec2 | ||
|
|
53105b99e7 | ||
|
|
564fe3beca | ||
|
|
e9a2b0a9ee | ||
|
|
860b1e9d9e | ||
|
|
7c88d721c2 | ||
|
|
90169c868d | ||
|
|
4c07606da6 | ||
|
|
a7bf458a37 | ||
|
|
fa08085119 | ||
|
|
86a98c80f5 | ||
|
|
085a380019 | ||
|
|
d9a96344ee | ||
|
|
41656c8635 | ||
|
|
cf08a3de6b | ||
|
|
f087a843bb | ||
|
|
450dfe324d | ||
|
|
3dfe4a2e5a | ||
|
|
eded08897d | ||
|
|
b1f54d8a03 | ||
|
|
bf5e1905cd | ||
|
|
6628c41c36 |
@@ -3,13 +3,3 @@ linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
|
||||
[build]
|
||||
rustflags = [
|
||||
# lints
|
||||
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
]
|
||||
|
||||
10
.editorconfig
Normal file
10
.editorconfig
Normal file
@@ -0,0 +1,10 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[{Makefile,**.mk}]
|
||||
indent_style = tab
|
||||
@@ -21,3 +21,6 @@ GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
# Settings for kafka wal test
|
||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
# Setting for fuzz tests
|
||||
GT_MYSQL_ADDR = localhost:4002
|
||||
|
||||
@@ -34,7 +34,7 @@ runs:
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
|
||||
@@ -67,7 +67,7 @@ runs:
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
|
||||
@@ -62,15 +62,15 @@ runs:
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
13
.github/actions/fuzz-test/action.yaml
vendored
Normal file
13
.github/actions/fuzz-test/action.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Fuzz Test
|
||||
description: 'Fuzz test given setup and service'
|
||||
inputs:
|
||||
target:
|
||||
description: "The fuzz target to test"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run Fuzz Test
|
||||
shell: bash
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
||||
env:
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
8
.github/pull_request_template.md
vendored
8
.github/pull_request_template.md
vendored
@@ -1,8 +1,10 @@
|
||||
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
|
||||
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
|
||||
## What's changed and what's your intention?
|
||||
|
||||
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
|
||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||
|
||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||
|
||||
@@ -16,5 +18,3 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [x] This PR does not require documentation updates.
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
|
||||
1
.github/workflows/apidoc.yml
vendored
1
.github/workflows/apidoc.yml
vendored
@@ -40,3 +40,4 @@ jobs:
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
single-commit: true
|
||||
|
||||
88
.github/workflows/develop.yml
vendored
88
.github/workflows/develop.yml
vendored
@@ -1,7 +1,7 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Clippy` job
|
||||
shared-key: "check-lint"
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
toolchain: stable
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-toml"
|
||||
- name: Install taplo
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
shared-key: "build-binaries"
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
run: cargo build
|
||||
run: cargo build --bin greptime --bin sqlness-runner
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -117,6 +117,46 @@ jobs:
|
||||
artifacts-dir: bins
|
||||
version: current
|
||||
|
||||
fuzztest:
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
needs: build
|
||||
@@ -136,13 +176,12 @@ jobs:
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||
# FIXME: Logs cannot found be on failure (or even success). Need to figure out the cause.
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
sqlness-kafka-wal:
|
||||
@@ -167,13 +206,12 @@ jobs:
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||
# FIXME: Logs cannot be found on failure (or even success). Need to figure out the cause.
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -191,7 +229,7 @@ jobs:
|
||||
components: rustfmt
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Run cargo fmt
|
||||
@@ -212,7 +250,7 @@ jobs:
|
||||
components: clippy
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
@@ -241,6 +279,10 @@ jobs:
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
- name: Docker Cache
|
||||
uses: ScribeMD/docker-cache@0.3.7
|
||||
with:
|
||||
key: docker-${{ runner.os }}-coverage
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
@@ -271,10 +313,28 @@ jobs:
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
compat:
|
||||
name: Compatibility Test
|
||||
needs: build
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
mkdir -p ./bins/current
|
||||
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||
|
||||
14
.github/workflows/docs.yml
vendored
14
.github/workflows/docs.yml
vendored
@@ -61,6 +61,18 @@ jobs:
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
4
.github/workflows/nightly-ci.yml
vendored
4
.github/workflows/nightly-ci.yml
vendored
@@ -45,10 +45,10 @@ jobs:
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.7.0
|
||||
NEXT_RELEASE_VERSION: v0.8.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -221,6 +221,8 @@ jobs:
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -260,6 +262,8 @@ jobs:
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -284,7 +288,7 @@ jobs:
|
||||
- name: Set build windows result
|
||||
id: set-build-windows-result
|
||||
run: |
|
||||
echo "build-windows-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -295,6 +299,8 @@ jobs:
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
outputs:
|
||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -310,7 +316,7 @@ jobs:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-image-build-result
|
||||
id: set-build-image-result
|
||||
run: |
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
21
.github/workflows/unassign.yml
vendored
Normal file
21
.github/workflows/unassign.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Auto Unassign
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
auto-unassign:
|
||||
name: Auto Unassign
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Auto Unassign
|
||||
uses: tisonspieces/auto-unassign@main
|
||||
with:
|
||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
repository: ${{ github.repository }}
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -46,3 +46,7 @@ benchmarks/data
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
# Fuzz tests
|
||||
tests-fuzz/artifacts/
|
||||
tests-fuzz/corpus/
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
info@greptime.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
781
Cargo.lock
generated
781
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
24
Cargo.toml
24
Cargo.toml
@@ -18,6 +18,7 @@ members = [
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/plugins",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/query",
|
||||
@@ -61,17 +62,23 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.6.0"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
rust.unknown_lints = "deny"
|
||||
|
||||
[workspace.dependencies]
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-ipc = "47.0"
|
||||
arrow-ipc = { version = "47.0", features = ["lz4"] }
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
@@ -96,13 +103,14 @@ etcd-client = "0.12"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96f1f0404f421ee560a4310c73c5071e49168168" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "06f6297ff3cab578a1589741b504342fbad70453" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
num_cpus = "1.16"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
@@ -118,7 +126,7 @@ prost = "0.12"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.2", features = ["transducer"] }
|
||||
regex-automata = { version = "0.4" }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
@@ -126,8 +134,9 @@ reqwest = { version = "0.11", default-features = false, features = [
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rust_decimal = "1.33"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
@@ -164,6 +173,7 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-query = { path = "src/common/query" }
|
||||
@@ -201,7 +211,7 @@ table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
5
Makefile
5
Makefile
@@ -3,6 +3,7 @@ CARGO_PROFILE ?=
|
||||
FEATURES ?=
|
||||
TARGET_DIR ?=
|
||||
TARGET ?=
|
||||
BUILD_BIN ?= greptime
|
||||
CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
@@ -45,6 +46,10 @@ ifneq ($(strip $(TARGET)),)
|
||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_BIN)),)
|
||||
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
219
README.md
219
README.md
@@ -6,145 +6,154 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
The next-generation hybrid time-series/analytics processing database in the cloud
|
||||
</h3>
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C"></img></a>
|
||||
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
|
||||
|
||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
|
||||
</p>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
|
||||
</a>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/greptime/greptimedb/">
|
||||
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
|
||||
</a>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
|
||||
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
|
||||
</a>
|
||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
|
||||
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
|
||||
</a>
|
||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
|
||||
</a>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
|
||||
|
||||
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
|
||||
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
<br/>
|
||||
|
||||
## What is GreptimeDB
|
||||
<a href="https://greptime.com/slack">
|
||||
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
|
||||
</a>
|
||||
<a href="https://twitter.com/greptime">
|
||||
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
|
||||
</a>
|
||||
<a href="https://www.linkedin.com/company/greptime/">
|
||||
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
GreptimeDB is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage.
|
||||
## Introduction
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||
|
||||
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||
- Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
- Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
- Native SQL and PromQL for queries, and Python scripting to facilitate complex analytical tasks.
|
||||
- Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down.
|
||||
- Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc.
|
||||
## Why GreptimeDB
|
||||
|
||||
## Quick Start
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
|
||||
### [GreptimePlay](https://greptime.com/playground)
|
||||
* **Easy horizontal scaling**
|
||||
|
||||
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
|
||||
* **Analyzing time-series data**
|
||||
|
||||
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||
|
||||
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
### 1. [GreptimePlay](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
### Build
|
||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||
|
||||
#### Build from Source
|
||||
Start instantly with a free cluster.
|
||||
|
||||
To compile GreptimeDB from source, you'll need:
|
||||
### 3. Docker Image
|
||||
|
||||
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
|
||||
available as `build-essential` on ubuntu and similar name on other platforms.
|
||||
- Rust: the easiest way to install Rust is to use
|
||||
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
|
||||
install correct Rust version for you.
|
||||
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
|
||||
available from major package manager on macos and linux distributions. You can
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
To install GreptimeDB locally, the recommended way is via Docker:
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
A docker image with necessary dependencies is provided:
|
||||
|
||||
```
|
||||
docker build --network host -f docker/Dockerfile -t greptimedb .
|
||||
```shell
|
||||
docker pull greptime/greptimedb
|
||||
```
|
||||
|
||||
### Run
|
||||
|
||||
Start GreptimeDB from source code, in standalone mode:
|
||||
Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||
|
||||
## Build
|
||||
|
||||
Check the prerequisite:
|
||||
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
|
||||
Build GreptimeDB binary:
|
||||
|
||||
```shell
|
||||
make
|
||||
```
|
||||
|
||||
Run a standalone server:
|
||||
|
||||
```shell
|
||||
cargo run -- standalone start
|
||||
```
|
||||
|
||||
Or if you built from docker:
|
||||
|
||||
```
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
### Documentation
|
||||
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
## Extension
|
||||
|
||||
### Dashboard
|
||||
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
||||
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
||||
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
|
||||
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
|
||||
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
|
||||
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptime-ingester-js)
|
||||
|
||||
### Grafana Dashboard
|
||||
|
||||
Our official Grafana dashboard is available at [grafana](./grafana/README.md) directory.
|
||||
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
||||
|
||||
## Project Status
|
||||
|
||||
This project is in its early stage and under heavy development. We move fast and
|
||||
break things. Benchmark on development branch may not represent its potential
|
||||
performance. We release pre-built binaries constantly for functional
|
||||
evaluation. Do not use it in production at the moment.
|
||||
|
||||
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
|
||||
The current version has not yet reached General Availability version standards.
|
||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
## Community
|
||||
|
||||
@@ -154,12 +163,12 @@ and what went wrong. If you have any questions or if you would like to get invol
|
||||
community, please check out:
|
||||
|
||||
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- Greptime official [Website](https://greptime.com)
|
||||
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- Greptime official [website](https://greptime.com)
|
||||
|
||||
In addition, you may:
|
||||
|
||||
- View our official [Blog](https://greptime.com/blogs/index)
|
||||
- View our official [Blog](https://greptime.com/blogs/)
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
@@ -170,7 +179,7 @@ open contributions and allowing you to use the software however you want.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
||||
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
@@ -11,6 +14,5 @@ clap.workspace = true
|
||||
client.workspace = true
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -29,7 +29,7 @@ use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -502,9 +502,9 @@ async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream) => {
|
||||
match res.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
||||
OutputData::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,5 +8,6 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
- "tests-integration/**/*.rs" # ignore integration tests
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -134,10 +134,22 @@ create_on_compaction = "auto"
|
||||
apply_on_query = "auto"
|
||||
# Memory threshold for performing an external sort during index creation.
|
||||
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64MB"
|
||||
mem_threshold_on_create = "64M"
|
||||
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
intermediate_path = ""
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
# Memtable type.
|
||||
# - "partition_tree": partition tree memtable
|
||||
# - "time_series": time-series memtable (deprecated)
|
||||
type = "partition_tree"
|
||||
# The max number of keys in one shard.
|
||||
index_max_keys_per_shard = 8192
|
||||
# The max rows of data inside the actively writing buffer in one shard.
|
||||
data_freeze_threshold = 32768
|
||||
# Max dictionary bytes.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
|
||||
@@ -31,6 +31,7 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
watch = false
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres]
|
||||
@@ -43,6 +44,7 @@ runtime_size = 2
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
watch = false
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb]
|
||||
|
||||
@@ -44,6 +44,8 @@ mode = "disable"
|
||||
cert_path = ""
|
||||
# Private key file path.
|
||||
key_path = ""
|
||||
# Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# PostgresSQL server options.
|
||||
[postgres]
|
||||
@@ -62,6 +64,8 @@ mode = "disable"
|
||||
cert_path = ""
|
||||
# private key file path.
|
||||
key_path = ""
|
||||
# Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
@@ -118,7 +122,7 @@ sync_period = "1000ms"
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# The prefix of topic name.
|
||||
@@ -240,6 +244,18 @@ mem_threshold_on_create = "64M"
|
||||
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
intermediate_path = ""
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
# Memtable type.
|
||||
# - "partition_tree": partition tree memtable
|
||||
# - "time_series": time-series memtable (deprecated)
|
||||
type = "partition_tree"
|
||||
# The max number of keys in one shard.
|
||||
index_max_keys_per_shard = 8192
|
||||
# The max rows of data inside the actively writing buffer in one shard.
|
||||
data_freeze_threshold = 32768
|
||||
# Max dictionary bytes.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
@@ -250,10 +266,11 @@ intermediate_path = ""
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
# Whether to append logs to stdout. Defaults to true.
|
||||
# append_stdout = true
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# [logging.tracing_sample_ratio]
|
||||
# default_ratio = 0.0
|
||||
|
||||
# Standalone export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
|
||||
50
docs/benchmarks/tsbs/v0.7.0.md
Normal file
50
docs/benchmarks/tsbs/v0.7.0.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# TSBS benchmark - v0.7.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 50GB (GP3) |
|
||||
| OS | Ubuntu 22.04.1 |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 3695814.64 |
|
||||
| EC2 c5d.2xlarge | 2987166.64 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
@@ -79,7 +79,7 @@ This RFC proposes to add a new expression node `MergeScan` to merge result from
|
||||
│ │ │ │
|
||||
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
||||
```
|
||||
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
This merge operation simply chains all the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
|
||||
## Commutativity of MergeScan
|
||||
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 65 KiB |
101
docs/rfcs/2024-02-21-multi-dimension-partition-rule/rfc.md
Normal file
101
docs/rfcs/2024-02-21-multi-dimension-partition-rule/rfc.md
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
Feature Name: Multi-dimension Partition Rule
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3351
|
||||
Date: 2024-02-21
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
A new region partition scheme that runs on multiple dimensions of the key space. The partition rule is defined by a set of simple expressions on the partition key columns.
|
||||
|
||||
# Motivation
|
||||
|
||||
The current partition rule is from MySQL's [`RANGE Partition`](https://dev.mysql.com/doc/refman/8.0/en/partitioning-range.html), which is based on a single dimension. It is sort of a [Hilbert Curve](https://en.wikipedia.org/wiki/Hilbert_curve) and pick several point on the curve to divide the space. It is neither easy to understand how the data get partitioned nor flexible enough to handle complex partitioning requirements.
|
||||
|
||||
Considering the future requirements like region repartitioning or autonomous rebalancing, where both workload and partition may change frequently. Here proposes a new region partition scheme that uses a set of simple expressions on the partition key columns to divide the key space.
|
||||
|
||||
# Details
|
||||
|
||||
## Partition rule
|
||||
|
||||
First, we define a simple expression that can be used to define the partition rule. The simple expression is a binary expression expression on the partition key columns that can be evaluated to a boolean value. The binary operator is limited to comparison operators only, like `=`, `!=`, `>`, `>=`, `<`, `<=`. And the operands are limited either literal value or partition column.
|
||||
|
||||
Example of valid simple expressions are $`col_A = 10`$, $`col_A \gt 10 \& col_B \gt 20`$ or $`col_A \ne 10`$.
|
||||
|
||||
Those expressions can be used as predicates to divide the key space into different regions. The following example have two partition columns `Col A` and `Col B`, and four partitioned regions.
|
||||
|
||||
```math
|
||||
\left\{\begin{aligned}
|
||||
|
||||
&col_A \le 10 &Region_1 \\
|
||||
&10 \lt col_A \& col_A \le 20 &Region_2 \\
|
||||
&20 \lt col_A \space \& \space col_B \lt 100 &Region_3 \\
|
||||
&20 \lt col_A \space \& \space col_B \ge 100 &Region_4
|
||||
|
||||
\end{aligned}\right\}
|
||||
```
|
||||
|
||||
An advantage of this scheme is that it is easy to understand how the data get partitioned. The above example can be visualized in a 2D space (two partition column is involved in the example).
|
||||
|
||||

|
||||
|
||||
Here each expression draws a line in the 2D space. Managing data partitioning becomes a matter of drawing lines in the key space.
|
||||
|
||||
To make it easy to use, there is a "default region" which catches all the data that doesn't match any of previous expressions. The default region exist by default and do not need to specify. It is also possible to remove this default region if the DB finds it is not necessary.
|
||||
|
||||
## SQL interface
|
||||
|
||||
The SQL interface is in response to two parts: specifying the partition columns and the partition rule. Thouth we are targeting an autonomous system, it's still allowed to give some bootstrap rules or hints on creating table.
|
||||
|
||||
Partition column is specified by `PARTITION ON COLUMNS` sub-clause in `CREATE TABLE`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE t (...)
|
||||
PARTITION ON COLUMNS (...) ();
|
||||
```
|
||||
|
||||
Two following brackets are for partition columns and partition rule respectively.
|
||||
|
||||
Columns provided here are only used as an allow-list of how the partition rule can be defined. Which means (a) the sequence between columns doesn't matter, (b) the columns provided here are not necessarily being used in the partition rule.
|
||||
|
||||
The partition rule part is a list of comma-separated simple expressions. Expressions here are not corresponding to region, as they might be changed by system to fit various workload.
|
||||
|
||||
A full example of `CREATE TABLE` with partition rule is:
|
||||
|
||||
```sql
|
||||
CREATE TABLE IF NOT EXISTS demo (
|
||||
a STRING,
|
||||
b STRING,
|
||||
c STRING,
|
||||
d STRING,
|
||||
ts TIMESTAMP,
|
||||
memory DOUBLE,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (a, b, c, d)
|
||||
)
|
||||
PARTITION ON COLUMNS (c, b, a) (
|
||||
a < 10,
|
||||
10 >= a AND a < 20,
|
||||
20 >= a AND b < 100,
|
||||
20 >= a AND b > 100
|
||||
)
|
||||
```
|
||||
|
||||
## Combine with storage
|
||||
|
||||
Examining columns separately suits our columnar storage very well in two aspects.
|
||||
|
||||
1. The simple expression can be pushed down to storage and file format, and is likely to hit existing index. Makes pruning operation very efficient.
|
||||
|
||||
2. Columns in columnar storage are not tightly coupled like in the traditional row storages, which means we can easily add or remove columns from partition rule without much impact (like a global reshuffle) on data.
|
||||
|
||||
The data file itself can be "projected" to the key space as a polyhedron, it is guaranteed that each plane is in parallel with some coordinate planes (in a 2D scenario, this is saying that all the files can be projected to a rectangle). Thus partition or repartition also only need to consider related columns.
|
||||
|
||||

|
||||
|
||||
An additional limitation is that considering how the index works and how we organize the primary keys at present, the partition columns are limited to be a subset of primary keys for better performance.
|
||||
|
||||
# Drawbacks
|
||||
|
||||
This is a breaking change.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
@@ -66,7 +66,7 @@
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
@@ -2116,7 +2116,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "bytes"
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -2126,7 +2126,7 @@
|
||||
"x": 0,
|
||||
"y": 61
|
||||
},
|
||||
"id": 12,
|
||||
"id": 17,
|
||||
"interval": "1s",
|
||||
"options": {
|
||||
"legend": {
|
||||
@@ -2147,8 +2147,8 @@
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "builder",
|
||||
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
|
||||
"editorMode": "code",
|
||||
"expr": "rate(raft_engine_sync_log_duration_seconds_count[2s])",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"instant": false,
|
||||
@@ -2158,7 +2158,7 @@
|
||||
"useBackend": false
|
||||
}
|
||||
],
|
||||
"title": "wal write size",
|
||||
"title": "raft engine sync count",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -2378,6 +2378,120 @@
|
||||
],
|
||||
"title": "raft engine write duration seconds",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "bytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 68
|
||||
},
|
||||
"id": 12,
|
||||
"interval": "1s",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.95, sum by(le) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"instant": false,
|
||||
"legendFormat": "req-size-p95",
|
||||
"range": true,
|
||||
"refId": "A",
|
||||
"useBackend": false
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS-1}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(raft_engine_write_size_sum[$__rate_interval])",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "throughput",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "wal write size",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "10s",
|
||||
@@ -2387,13 +2501,13 @@
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-3h",
|
||||
"from": "now-30m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "GreptimeDB",
|
||||
"uid": "e7097237-669b-4f8d-b751-13067afbfb68",
|
||||
"version": 9,
|
||||
"version": 12,
|
||||
"weekStart": ""
|
||||
}
|
||||
@@ -19,6 +19,12 @@ includes = [
|
||||
"*.py",
|
||||
]
|
||||
|
||||
excludes = [
|
||||
# copied sources
|
||||
"src/common/base/src/readable_size.rs",
|
||||
"src/servers/src/repeated_field.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
inceptionYear = 2023
|
||||
copyrightOwner = "Greptime Team"
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e -x
|
||||
set -ex
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
@@ -13,13 +12,34 @@ RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
|
||||
|
||||
echo "Downloading assets to dir: $OUT_DIR"
|
||||
cd $OUT_DIR
|
||||
|
||||
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
||||
GITHUB_URL="https://github.com"
|
||||
else
|
||||
GITHUB_URL="${GITHUB_PROXY_URL%/}"
|
||||
fi
|
||||
|
||||
function retry_fetch() {
|
||||
local url=$1
|
||||
local filename=$2
|
||||
|
||||
curl --connect-timeout 10 --retry 3 -fsSL $url --output $filename || {
|
||||
echo "Failed to download $url"
|
||||
echo "You may try to set http_proxy and https_proxy environment variables."
|
||||
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
||||
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/https://github.com/"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/build.tar.gz" build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
common-decimal.workspace = true
|
||||
@@ -15,7 +18,6 @@ greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.9"
|
||||
|
||||
@@ -707,7 +707,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
}
|
||||
|
||||
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
// TODO(fys): use macros to optimize code
|
||||
match data_type {
|
||||
ConcreteDataType::Int64(_) => values
|
||||
.i64_values
|
||||
|
||||
@@ -8,13 +8,17 @@ license.workspace = true
|
||||
default = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
digest = "0.10"
|
||||
hex = { version = "0.4" }
|
||||
notify.workspace = true
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -22,6 +22,9 @@ use snafu::{ensure, OptionExt};
|
||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||
use crate::user_provider::watch_file_user_provider::{
|
||||
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
|
||||
};
|
||||
use crate::{UserInfoRef, UserProviderRef};
|
||||
|
||||
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
|
||||
@@ -40,9 +43,12 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
match name {
|
||||
STATIC_USER_PROVIDER => {
|
||||
let provider =
|
||||
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
||||
StaticUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
||||
Ok(provider)
|
||||
}
|
||||
WATCH_FILE_USER_PROVIDER => {
|
||||
WatchFileUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)
|
||||
}
|
||||
_ => InvalidConfigSnafu {
|
||||
value: name.to_string(),
|
||||
msg: "Invalid UserProviderOption",
|
||||
|
||||
@@ -64,6 +64,13 @@ pub enum Error {
|
||||
username: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to initialize a watcher for file {}", path))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
PermissionDenied { location: Location },
|
||||
}
|
||||
@@ -73,6 +80,7 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::Internal,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -13,10 +13,24 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod static_user_provider;
|
||||
pub(crate) mod watch_file_user_provider;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
use crate::error::Result;
|
||||
use crate::UserInfoRef;
|
||||
use crate::error::{
|
||||
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, UserInfoRef};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait UserProvider: Send + Sync {
|
||||
@@ -44,3 +58,88 @@ pub trait UserProvider: Send + Sync {
|
||||
Ok(user_info)
|
||||
}
|
||||
}
|
||||
|
||||
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
|
||||
// check valid path
|
||||
let path = Path::new(filepath);
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
ensure!(
|
||||
path.is_file(),
|
||||
InvalidConfigSnafu {
|
||||
value: filepath,
|
||||
msg: "UserProvider file must be a file",
|
||||
}
|
||||
);
|
||||
let file = File::open(path).context(IoSnafu)?;
|
||||
let credential = io::BufReader::new(file)
|
||||
.lines()
|
||||
.map_while(std::result::Result::ok)
|
||||
.filter_map(|line| {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<String, Vec<u8>>>();
|
||||
|
||||
ensure!(
|
||||
!credential.is_empty(),
|
||||
InvalidConfigSnafu {
|
||||
value: filepath,
|
||||
msg: "UserProvider's file must contains at least one valid credential",
|
||||
}
|
||||
);
|
||||
|
||||
Ok(Some(credential))
|
||||
}
|
||||
|
||||
fn authenticate_with_credential(
|
||||
users: &HashMap<String, Vec<u8>>,
|
||||
input_id: Identity<'_>,
|
||||
input_pwd: Password<'_>,
|
||||
) -> Result<UserInfoRef> {
|
||||
match input_id {
|
||||
Identity::UserId(username, _) => {
|
||||
ensure!(
|
||||
!username.is_empty(),
|
||||
IllegalParamSnafu {
|
||||
msg: "blank username"
|
||||
}
|
||||
);
|
||||
let save_pwd = users.get(username).context(UserNotFoundSnafu {
|
||||
username: username.to_string(),
|
||||
})?;
|
||||
|
||||
match input_pwd {
|
||||
Password::PlainText(pwd) => {
|
||||
ensure!(
|
||||
!pwd.expose_secret().is_empty(),
|
||||
IllegalParamSnafu {
|
||||
msg: "blank password"
|
||||
}
|
||||
);
|
||||
if save_pwd == pwd.expose_secret().as_bytes() {
|
||||
Ok(DefaultUserInfo::with_name(username))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, save_pwd)
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
}
|
||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "pg_md5",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,60 +13,34 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use secrecy::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{
|
||||
Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
use crate::error::{InvalidConfigSnafu, Result};
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
|
||||
impl TryFrom<&str> for StaticUserProvider {
|
||||
type Error = Error;
|
||||
pub(crate) struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
fn try_from(value: &str) -> Result<Self> {
|
||||
impl StaticUserProvider {
|
||||
pub(crate) fn new(value: &str) -> Result<Self> {
|
||||
let (mode, content) = value.split_once(':').context(InvalidConfigSnafu {
|
||||
value: value.to_string(),
|
||||
msg: "StaticUserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
return match mode {
|
||||
"file" => {
|
||||
// check valid path
|
||||
let path = Path::new(content);
|
||||
ensure!(path.exists() && path.is_file(), InvalidConfigSnafu {
|
||||
value: content.to_string(),
|
||||
msg: "StaticUserProviderOption file must be a valid file path",
|
||||
});
|
||||
|
||||
let file = File::open(path).context(IoSnafu)?;
|
||||
let credential = io::BufReader::new(file)
|
||||
.lines()
|
||||
.map_while(std::result::Result::ok)
|
||||
.filter_map(|line| {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<String, Vec<u8>>>();
|
||||
|
||||
ensure!(!credential.is_empty(), InvalidConfigSnafu {
|
||||
value: content.to_string(),
|
||||
msg: "StaticUserProviderOption file must contains at least one valid credential",
|
||||
});
|
||||
|
||||
Ok(StaticUserProvider { users: credential, })
|
||||
let users = load_credential_from_file(content)?
|
||||
.context(InvalidConfigSnafu {
|
||||
value: content.to_string(),
|
||||
msg: "StaticFileUserProvider must be a valid file path",
|
||||
})?;
|
||||
Ok(StaticUserProvider { users })
|
||||
}
|
||||
"cmd" => content
|
||||
.split(',')
|
||||
@@ -83,66 +57,19 @@ impl TryFrom<&str> for StaticUserProvider {
|
||||
value: mode.to_string(),
|
||||
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
|
||||
}
|
||||
.fail(),
|
||||
.fail(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserProvider for StaticUserProvider {
|
||||
fn name(&self) -> &str {
|
||||
STATIC_USER_PROVIDER
|
||||
}
|
||||
|
||||
async fn authenticate(
|
||||
&self,
|
||||
input_id: Identity<'_>,
|
||||
input_pwd: Password<'_>,
|
||||
) -> Result<UserInfoRef> {
|
||||
match input_id {
|
||||
Identity::UserId(username, _) => {
|
||||
ensure!(
|
||||
!username.is_empty(),
|
||||
IllegalParamSnafu {
|
||||
msg: "blank username"
|
||||
}
|
||||
);
|
||||
let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
|
||||
username: username.to_string(),
|
||||
})?;
|
||||
|
||||
match input_pwd {
|
||||
Password::PlainText(pwd) => {
|
||||
ensure!(
|
||||
!pwd.expose_secret().is_empty(),
|
||||
IllegalParamSnafu {
|
||||
msg: "blank password"
|
||||
}
|
||||
);
|
||||
return if save_pwd == pwd.expose_secret().as_bytes() {
|
||||
Ok(DefaultUserInfo::with_name(username))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
};
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, save_pwd)
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
}
|
||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "pg_md5",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn authenticate(&self, id: Identity<'_>, pwd: Password<'_>) -> Result<UserInfoRef> {
|
||||
authenticate_with_credential(&self.users, id, pwd)
|
||||
}
|
||||
|
||||
async fn authorize(
|
||||
@@ -181,7 +108,7 @@ pub mod test {
|
||||
#[tokio::test]
|
||||
async fn test_authorize() {
|
||||
let user_info = DefaultUserInfo::with_name("root");
|
||||
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
|
||||
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
|
||||
provider
|
||||
.authorize("catalog", "schema", &user_info)
|
||||
.await
|
||||
@@ -190,7 +117,7 @@ pub mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_inline_provider() {
|
||||
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
|
||||
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
|
||||
test_authenticate(&provider, "root", "123456").await;
|
||||
test_authenticate(&provider, "admin", "654321").await;
|
||||
}
|
||||
@@ -214,7 +141,7 @@ admin=654321",
|
||||
}
|
||||
|
||||
let param = format!("file:{file_path}");
|
||||
let provider = StaticUserProvider::try_from(param.as_str()).unwrap();
|
||||
let provider = StaticUserProvider::new(param.as_str()).unwrap();
|
||||
test_authenticate(&provider, "root", "123456").await;
|
||||
test_authenticate(&provider, "admin", "654321").await;
|
||||
}
|
||||
|
||||
215
src/auth/src/user_provider/watch_file_user_provider.rs
Normal file
215
src/auth/src/user_provider/watch_file_user_provider.rs
Normal file
@@ -0,0 +1,215 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
|
||||
|
||||
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
|
||||
|
||||
/// A user provider that reads user credential from a file and watches the file for changes.
|
||||
///
|
||||
/// Empty file is invalid; but file not exist means every user can be authenticated.
|
||||
pub(crate) struct WatchFileUserProvider {
|
||||
users: WatchedCredentialRef,
|
||||
}
|
||||
|
||||
impl WatchFileUserProvider {
|
||||
pub fn new(filepath: &str) -> Result<Self> {
|
||||
let credential = load_credential_from_file(filepath)?;
|
||||
let users = Arc::new(Mutex::new(credential));
|
||||
let this = WatchFileUserProvider {
|
||||
users: users.clone(),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut debouncer =
|
||||
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
let mut dir = Path::new(filepath).to_path_buf();
|
||||
ensure!(
|
||||
dir.pop(),
|
||||
InvalidConfigSnafu {
|
||||
value: filepath,
|
||||
msg: "UserProvider path must be a file path",
|
||||
}
|
||||
);
|
||||
debouncer
|
||||
.watch(&dir, RecursiveMode::NonRecursive)
|
||||
.context(FileWatchSnafu { path: filepath })?;
|
||||
|
||||
let filepath = filepath.to_string();
|
||||
std::thread::spawn(move || {
|
||||
let filename = Path::new(&filepath).file_name();
|
||||
let _hold = debouncer;
|
||||
while let Ok(res) = rx.recv() {
|
||||
if let Ok(event) = res {
|
||||
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
|
||||
let is_relevant_event = matches!(
|
||||
event.kind,
|
||||
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||
);
|
||||
if is_this_file && is_relevant_event {
|
||||
info!(?event.kind, "User provider file {} changed", &filepath);
|
||||
match load_credential_from_file(&filepath) {
|
||||
Ok(credential) => {
|
||||
let mut users =
|
||||
users.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {filepath} reloaded");
|
||||
#[cfg(test)]
|
||||
info!("User provider file {filepath} reloaded: {credential:?}");
|
||||
*users = credential;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {filepath}; keep the old one",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserProvider for WatchFileUserProvider {
|
||||
fn name(&self) -> &str {
|
||||
WATCH_FILE_USER_PROVIDER
|
||||
}
|
||||
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||
let users = self.users.lock().expect("users credential must be valid");
|
||||
if let Some(users) = users.as_ref() {
|
||||
authenticate_with_credential(users, id, password)
|
||||
} else {
|
||||
match id {
|
||||
Identity::UserId(id, _) => {
|
||||
warn!(id, "User provider file not exist, allow all users");
|
||||
Ok(DefaultUserInfo::with_name(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
|
||||
// default allow all
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(
|
||||
provider: &dyn UserProvider,
|
||||
username: &str,
|
||||
password: &str,
|
||||
ok: bool,
|
||||
timeout: Option<Duration>,
|
||||
) {
|
||||
if let Some(timeout) = timeout {
|
||||
let deadline = Instant::now().checked_add(timeout).unwrap();
|
||||
loop {
|
||||
let re = provider
|
||||
.authenticate(
|
||||
Identity::UserId(username, None),
|
||||
Password::PlainText(password.to_string().into()),
|
||||
)
|
||||
.await;
|
||||
if re.is_ok() == ok {
|
||||
break;
|
||||
} else if Instant::now() < deadline {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
} else {
|
||||
panic!("timeout (username: {username}, password: {password}, expected: {ok})");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let re = provider
|
||||
.authenticate(
|
||||
Identity::UserId(username, None),
|
||||
Password::PlainText(password.to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(
|
||||
re.is_ok(),
|
||||
ok,
|
||||
"username: {}, password: {}",
|
||||
username,
|
||||
password
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_file_provider");
|
||||
let file_path = format!("{}/test_file_provider", dir.path().to_str().unwrap());
|
||||
|
||||
// write a tmp file
|
||||
assert!(std::fs::write(&file_path, "root=123456\nadmin=654321\n").is_ok());
|
||||
let provider = WatchFileUserProvider::new(file_path.as_str()).unwrap();
|
||||
let timeout = Duration::from_secs(60);
|
||||
|
||||
test_authenticate(&provider, "root", "123456", true, None).await;
|
||||
test_authenticate(&provider, "admin", "654321", true, None).await;
|
||||
test_authenticate(&provider, "root", "654321", false, None).await;
|
||||
|
||||
// update the tmp file
|
||||
assert!(std::fs::write(&file_path, "root=654321\n").is_ok());
|
||||
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
|
||||
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||
|
||||
// remove the tmp file
|
||||
assert!(std::fs::remove_file(&file_path).is_ok());
|
||||
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
|
||||
|
||||
// recreate the tmp file
|
||||
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
|
||||
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "root", "654321", false, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||
}
|
||||
}
|
||||
@@ -7,21 +7,21 @@ license.workspace = true
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
@@ -34,12 +34,9 @@ itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
moka = { workspace = true, features = ["future", "sync"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
paste = "1.0"
|
||||
prometheus.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -164,11 +164,8 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table partitions: #{table}"))]
|
||||
FindPartitions {
|
||||
source: partition::error::Error,
|
||||
table: String,
|
||||
},
|
||||
#[snafu(display("Failed to find table partitions"))]
|
||||
FindPartitions { source: partition::error::Error },
|
||||
|
||||
#[snafu(display("Failed to find region routes"))]
|
||||
FindRegionRoutes { source: partition::error::Error },
|
||||
@@ -254,6 +251,12 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Get null from table cache, key: {}", key))]
|
||||
TableCacheNotGet { key: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
||||
GetTableCache { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -314,6 +317,7 @@ impl ErrorExt for Error {
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,16 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod columns;
|
||||
mod key_column_usage;
|
||||
pub mod columns;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
mod runtime_metrics;
|
||||
mod schemata;
|
||||
pub mod schemata;
|
||||
mod table_names;
|
||||
mod tables;
|
||||
pub mod tables;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
@@ -48,16 +48,16 @@ pub(super) struct InformationSchemaColumns {
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const COLUMN_NAME: &str = "column_name";
|
||||
const DATA_TYPE: &str = "data_type";
|
||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
const COLUMN_DEFAULT: &str = "column_default";
|
||||
const IS_NULLABLE: &str = "is_nullable";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const COLUMN_NAME: &str = "column_name";
|
||||
pub const DATA_TYPE: &str = "data_type";
|
||||
pub const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
pub const COLUMN_DEFAULT: &str = "column_default";
|
||||
pub const IS_NULLABLE: &str = "is_nullable";
|
||||
const COLUMN_TYPE: &str = "column_type";
|
||||
const COLUMN_COMMENT: &str = "column_comment";
|
||||
pub const COLUMN_COMMENT: &str = "column_comment";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
|
||||
@@ -37,13 +37,16 @@ use crate::error::{
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
const CONSTRAINT_NAME: &str = "constraint_name";
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const COLUMN_NAME: &str = "column_name";
|
||||
const ORDINAL_POSITION: &str = "ordinal_position";
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
pub const CONSTRAINT_NAME: &str = "constraint_name";
|
||||
// It's always `def` in MySQL
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
// The real catalog name for this key column.
|
||||
pub const REAL_TABLE_CATALOG: &str = "real_table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const COLUMN_NAME: &str = "column_name";
|
||||
pub const ORDINAL_POSITION: &str = "ordinal_position";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||
@@ -76,6 +79,11 @@ impl InformationSchemaKeyColumnUsage {
|
||||
),
|
||||
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
REAL_TABLE_CATALOG,
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||
@@ -158,6 +166,7 @@ struct InformationSchemaKeyColumnUsageBuilder {
|
||||
constraint_schema: StringVectorBuilder,
|
||||
constraint_name: StringVectorBuilder,
|
||||
table_catalog: StringVectorBuilder,
|
||||
real_table_catalog: StringVectorBuilder,
|
||||
table_schema: StringVectorBuilder,
|
||||
table_name: StringVectorBuilder,
|
||||
column_name: StringVectorBuilder,
|
||||
@@ -179,6 +188,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
real_table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -223,6 +233,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
&predicates,
|
||||
&schema_name,
|
||||
"TIME INDEX",
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
@@ -231,6 +242,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
}
|
||||
if keys.contains(&idx) {
|
||||
primary_constraints.push((
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.clone(),
|
||||
column.name.clone(),
|
||||
@@ -244,13 +256,14 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
for (i, (schema_name, table_name, column_name)) in
|
||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||
primary_constraints.into_iter().enumerate()
|
||||
{
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
"PRIMARY",
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column_name,
|
||||
@@ -269,6 +282,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
predicates: &Predicates,
|
||||
constraint_schema: &str,
|
||||
constraint_name: &str,
|
||||
table_catalog: &str,
|
||||
table_schema: &str,
|
||||
table_name: &str,
|
||||
column_name: &str,
|
||||
@@ -277,6 +291,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
let row = [
|
||||
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
|
||||
(CONSTRAINT_NAME, &Value::from(constraint_name)),
|
||||
(REAL_TABLE_CATALOG, &Value::from(table_catalog)),
|
||||
(TABLE_SCHEMA, &Value::from(table_schema)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(COLUMN_NAME, &Value::from(column_name)),
|
||||
@@ -291,6 +306,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
self.constraint_schema.push(Some(constraint_schema));
|
||||
self.constraint_name.push(Some(constraint_name));
|
||||
self.table_catalog.push(Some("def"));
|
||||
self.real_table_catalog.push(Some(table_catalog));
|
||||
self.table_schema.push(Some(table_schema));
|
||||
self.table_name.push(Some(table_name));
|
||||
self.column_name.push(Some(column_name));
|
||||
@@ -310,6 +326,7 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
Arc::new(self.constraint_schema.finish()),
|
||||
Arc::new(self.constraint_name.finish()),
|
||||
Arc::new(self.table_catalog.finish()),
|
||||
Arc::new(self.real_table_catalog.finish()),
|
||||
Arc::new(self.table_schema.finish()),
|
||||
Arc::new(self.table_name.finish()),
|
||||
Arc::new(self.column_name.finish()),
|
||||
|
||||
@@ -14,13 +14,15 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use crate::information_schema::table_names::*;
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
|
||||
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
@@ -59,14 +61,15 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
"SAVEPOINTS",
|
||||
]),
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec![MITO_ENGINE])),
|
||||
Arc::new(StringVector::from(vec!["DEFAULT"])),
|
||||
Arc::new(StringVector::from(vec![MITO_ENGINE, METRIC_ENGINE])),
|
||||
Arc::new(StringVector::from(vec!["DEFAULT", "YES"])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
"Storage engine for time-series data",
|
||||
"Storage engine for observability scenarios, which is adept at handling a large number of small tables, making it particularly suitable for cloud-native monitoring",
|
||||
])),
|
||||
Arc::new(StringVector::from(vec!["NO"])),
|
||||
Arc::new(StringVector::from(vec!["NO"])),
|
||||
Arc::new(StringVector::from(vec!["NO"])),
|
||||
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
|
||||
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
|
||||
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
|
||||
],
|
||||
),
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use core::pin::pin;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -31,7 +32,7 @@ use datatypes::vectors::{
|
||||
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
|
||||
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
use partition::partition::PartitionDef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -240,40 +241,64 @@ impl InformationSchemaPartitionsBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
let table_info_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.await
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(table_info))
|
||||
}
|
||||
});
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
const BATCH_SIZE: usize = 128;
|
||||
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
continue;
|
||||
}
|
||||
// Split table infos into chunks
|
||||
let mut table_info_chunks = pin!(table_info_stream.ready_chunks(BATCH_SIZE));
|
||||
|
||||
let table_id = table_info.ident.table_id;
|
||||
let partitions = if let Some(partition_manager) = &partition_manager {
|
||||
while let Some(table_infos) = table_info_chunks.next().await {
|
||||
let table_infos = table_infos.into_iter().collect::<Result<Vec<_>>>()?;
|
||||
let table_ids: Vec<TableId> =
|
||||
table_infos.iter().map(|info| info.ident.table_id).collect();
|
||||
|
||||
let mut table_partitions = if let Some(partition_manager) = &partition_manager {
|
||||
partition_manager
|
||||
.find_table_partitions(table_id)
|
||||
.batch_find_table_partitions(&table_ids)
|
||||
.await
|
||||
.context(FindPartitionsSnafu {
|
||||
table: &table_info.name,
|
||||
})?
|
||||
.context(FindPartitionsSnafu)?
|
||||
} else {
|
||||
// Current node must be a standalone instance, contains only one partition by default.
|
||||
// TODO(dennis): change it when we support multi-regions for standalone.
|
||||
vec![PartitionInfo {
|
||||
id: RegionId::new(table_id, 0),
|
||||
partition: PartitionDef::new(vec![], vec![]),
|
||||
}]
|
||||
table_ids
|
||||
.into_iter()
|
||||
.map(|table_id| {
|
||||
(
|
||||
table_id,
|
||||
vec![PartitionInfo {
|
||||
id: RegionId::new(table_id, 0),
|
||||
partition: PartitionDef::new(vec![], vec![]),
|
||||
}],
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
||||
self.add_partitions(
|
||||
&predicates,
|
||||
&table_info,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
&partitions,
|
||||
);
|
||||
for table_info in table_infos {
|
||||
let partitions = table_partitions
|
||||
.remove(&table_info.ident.table_id)
|
||||
.unwrap_or(vec![]);
|
||||
|
||||
self.add_partitions(
|
||||
&predicates,
|
||||
&table_info,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
&partitions,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -199,7 +199,7 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
|
||||
let table_routes = if let Some(partition_manager) = &partition_manager {
|
||||
partition_manager
|
||||
.find_region_routes_batch(&table_ids)
|
||||
.batch_find_region_routes(&table_ids)
|
||||
.await
|
||||
.context(FindRegionRoutesSnafu)?
|
||||
} else {
|
||||
|
||||
@@ -37,8 +37,8 @@ use crate::error::{
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const CATALOG_NAME: &str = "catalog_name";
|
||||
const SCHEMA_NAME: &str = "schema_name";
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
pub const SCHEMA_NAME: &str = "schema_name";
|
||||
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
|
||||
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -39,10 +39,10 @@ use crate::error::{
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const TABLE_TYPE: &str = "table_type";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const TABLE_TYPE: &str = "table_type";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const ENGINE: &str = "engine";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -82,12 +82,10 @@ impl CachedMetaKvBackendBuilder {
|
||||
let cache_ttl = self.cache_ttl.unwrap_or(DEFAULT_CACHE_TTL);
|
||||
let cache_tti = self.cache_tti.unwrap_or(DEFAULT_CACHE_TTI);
|
||||
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(cache_max_capacity)
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build(),
|
||||
);
|
||||
let cache = CacheBuilder::new(cache_max_capacity)
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build();
|
||||
|
||||
let kv_backend = Arc::new(MetaKvBackend {
|
||||
client: self.meta_client,
|
||||
@@ -104,7 +102,7 @@ impl CachedMetaKvBackendBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
|
||||
|
||||
/// A wrapper of `MetaKvBackend` with cache support.
|
||||
///
|
||||
@@ -117,7 +115,7 @@ pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
/// TTL and TTI for cache.
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
cache: CacheBackend,
|
||||
name: String,
|
||||
version: AtomicUsize,
|
||||
}
|
||||
@@ -317,12 +315,10 @@ impl CachedMetaKvBackend {
|
||||
// only for test
|
||||
#[cfg(test)]
|
||||
fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build(),
|
||||
);
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
Self {
|
||||
@@ -333,7 +329,7 @@ impl CachedMetaKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
pub fn cache(&self) -> &CacheBackend {
|
||||
&self.cache
|
||||
}
|
||||
|
||||
|
||||
@@ -15,32 +15,36 @@
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Error::{GetTableCache, TableCacheNotGet};
|
||||
use crate::error::{
|
||||
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu,
|
||||
Result as CatalogResult, TableMetadataManagerSnafu,
|
||||
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
|
||||
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
@@ -52,56 +56,74 @@ use crate::CatalogManager;
|
||||
/// comes from `SystemCatalog`, which is static and read-only.
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
// TODO(LFC): Maybe use a real implementation for Standalone mode.
|
||||
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
|
||||
// is implemented by RaftEngine. Maybe we need a cache for it?
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
fn make_table(table_info_value: TableInfoValue) -> CatalogResult<TableRef> {
|
||||
let table_info = table_info_value
|
||||
.table_info
|
||||
.try_into()
|
||||
.context(catalog_err::InvalidTableInfoInCatalogSnafu)?;
|
||||
Ok(DistTable::table(Arc::new(table_info)))
|
||||
struct TableCacheInvalidator {
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
impl TableCacheInvalidator {
|
||||
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
|
||||
Self { table_cache }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for KvBackendCatalogManager {
|
||||
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(ctx, table_id)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(ctx, table_name)
|
||||
.await
|
||||
impl CacheInvalidator for TableCacheInvalidator {
|
||||
async fn invalidate(
|
||||
&self,
|
||||
_ctx: &Context,
|
||||
caches: Vec<CacheIdent>,
|
||||
) -> common_meta::error::Result<()> {
|
||||
for cache in caches {
|
||||
if let CacheIdent::TableName(table_name) = cache {
|
||||
let table_cache_key = format_full_table_name(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
self.table_cache.invalidate(&table_cache_key).await;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_CACHED_CATALOG: u64 = 128;
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
|
||||
pub async fn new(
|
||||
backend: KvBackendRef,
|
||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
||||
) -> Arc<Self> {
|
||||
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(TABLE_CACHE_TTL)
|
||||
.time_to_idle(TABLE_CACHE_TTI)
|
||||
.build();
|
||||
multi_cache_invalidator
|
||||
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
|
||||
.await;
|
||||
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
cache_invalidator,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(DEFAULT_CACHED_CATALOG),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
// The catalog name is not used in system_catalog, so let it empty
|
||||
String::default(),
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
table_cache,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -120,12 +142,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
self
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.catalog_names()
|
||||
.await;
|
||||
.catalog_names();
|
||||
|
||||
let keys = stream
|
||||
.try_collect::<Vec<_>>()
|
||||
@@ -136,12 +157,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.schema_names(catalog)
|
||||
.await;
|
||||
.schema_names(catalog);
|
||||
let mut keys = stream
|
||||
.try_collect::<BTreeSet<_>>()
|
||||
.await
|
||||
@@ -153,12 +173,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(keys.into_iter().collect())
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(catalog, schema)
|
||||
.await;
|
||||
.tables(catalog, schema);
|
||||
let mut tables = stream
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
@@ -172,7 +191,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(tables.into_iter().collect())
|
||||
}
|
||||
|
||||
async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
|
||||
async fn catalog_exists(&self, catalog: &str) -> Result<bool> {
|
||||
self.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.exists(CatalogNameKey::new(catalog))
|
||||
@@ -180,7 +199,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
if self.system_catalog.schema_exist(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
@@ -192,7 +211,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
if self.system_catalog.table_exist(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
@@ -211,41 +230,64 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> CatalogResult<Option<TableRef>> {
|
||||
) -> Result<Option<TableRef>> {
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
let init = async {
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return Ok(None);
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
build_table(table_info_value)
|
||||
};
|
||||
make_table(table_info_value).map(Some)
|
||||
|
||||
match self
|
||||
.table_cache
|
||||
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
|
||||
.await
|
||||
{
|
||||
Ok(table) => Ok(Some(table)),
|
||||
Err(err) => match err.as_ref() {
|
||||
TableCacheNotGet { .. } => Ok(None),
|
||||
_ => Err(err),
|
||||
},
|
||||
}
|
||||
.map_err(|err| GetTableCache {
|
||||
err_msg: err.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, CatalogResult<TableRef>> {
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
let sys_tables = try_stream!({
|
||||
// System tables
|
||||
let sys_table_names = self.system_catalog.table_names(schema);
|
||||
@@ -260,7 +302,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(catalog, schema)
|
||||
.await
|
||||
.map_ok(|(_, v)| v.table_id());
|
||||
const BATCH_SIZE: usize = 128;
|
||||
let user_tables = try_stream!({
|
||||
@@ -270,7 +311,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
while let Some(table_ids) = table_id_chunks.next().await {
|
||||
let table_ids = table_ids
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListTablesSnafu { catalog, schema })?;
|
||||
|
||||
@@ -282,7 +323,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.context(TableMetadataManagerSnafu)?;
|
||||
|
||||
for table_info_value in table_info_values.into_values() {
|
||||
yield make_table(table_info_value)?;
|
||||
yield build_table(table_info_value)?;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -291,6 +332,14 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
let table_info = table_info_value
|
||||
.table_info
|
||||
.try_into()
|
||||
.context(InvalidTableInfoInCatalogSnafu)?;
|
||||
Ok(DistTable::table(Arc::new(table_info)))
|
||||
}
|
||||
|
||||
// TODO: This struct can hold a static map of all system tables when
|
||||
// the upper layer (e.g., procedure) can inform the catalog manager
|
||||
// a new catalog is created.
|
||||
|
||||
@@ -19,10 +19,10 @@ use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::CreateTableExpr;
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -75,9 +75,9 @@ pub type OpenSystemTableHook =
|
||||
/// Register system table request:
|
||||
/// - When system table is already created and registered, the hook will be called
|
||||
/// with table ref after opening the system table
|
||||
/// - When system table is not exists, create and register the table by create_table_request and calls open_hook with the created table.
|
||||
/// - When system table is not exists, create and register the table by `create_table_expr` and calls `open_hook` with the created table.
|
||||
pub struct RegisterSystemTableRequest {
|
||||
pub create_table_request: CreateTableRequest,
|
||||
pub create_table_expr: CreateTableExpr,
|
||||
pub open_hook: Option<OpenSystemTableHook>,
|
||||
}
|
||||
|
||||
|
||||
@@ -7,13 +7,15 @@ license.workspace = true
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.6"
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
@@ -22,10 +24,6 @@ common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
@@ -34,7 +32,7 @@ parking_lot = "0.12"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
session.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = { workspace = true, features = ["net"] }
|
||||
|
||||
@@ -307,7 +307,7 @@ impl Database {
|
||||
reason: "Expect 'AffectedRows' Flight messages to be the one and the only!"
|
||||
}
|
||||
);
|
||||
Ok(Output::AffectedRows(rows))
|
||||
Ok(Output::new_with_affected_rows(rows))
|
||||
}
|
||||
FlightMessage::Recordbatch(_) | FlightMessage::Metrics(_) => {
|
||||
IllegalFlightMessagesSnafu {
|
||||
@@ -340,7 +340,7 @@ impl Database {
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
Ok(Output::Stream(Box::pin(record_batch_stream)))
|
||||
Ok(Output::new_with_stream(Box::pin(record_batch_stream)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,10 +134,17 @@ impl From<Status> for Error {
|
||||
|
||||
impl Error {
|
||||
pub fn should_retry(&self) -> bool {
|
||||
!matches!(
|
||||
// TODO(weny): figure out each case of these codes.
|
||||
matches!(
|
||||
self,
|
||||
Self::RegionServer {
|
||||
code: Code::InvalidArgument,
|
||||
code: Code::Cancelled,
|
||||
..
|
||||
} | Self::RegionServer {
|
||||
code: Code::DeadlineExceeded,
|
||||
..
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unavailable,
|
||||
..
|
||||
}
|
||||
)
|
||||
|
||||
@@ -26,7 +26,7 @@ use api::v1::greptime_response::Response;
|
||||
use api::v1::{AffectedRows, GreptimeResponse};
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::status_code::StatusCode;
|
||||
pub use common_query::Output;
|
||||
pub use common_query::{Output, OutputData, OutputMeta};
|
||||
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
use api::v1::ResponseHeader;
|
||||
use arc_swap::ArcSwapOption;
|
||||
use arrow_flight::Ticket;
|
||||
@@ -23,7 +23,7 @@ use async_trait::async_trait;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::datanode_manager::{AffectedRows, Datanode};
|
||||
use common_meta::datanode_manager::{Datanode, HandleResponse};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
@@ -46,7 +46,7 @@ pub struct RegionRequester {
|
||||
|
||||
#[async_trait]
|
||||
impl Datanode for RegionRequester {
|
||||
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
|
||||
async fn handle(&self, request: RegionRequest) -> MetaResult<HandleResponse> {
|
||||
self.handle_inner(request).await.map_err(|err| {
|
||||
if err.should_retry() {
|
||||
meta_error::Error::RetryLater {
|
||||
@@ -123,8 +123,8 @@ impl RegionRequester {
|
||||
.fail();
|
||||
};
|
||||
|
||||
let metrics_str = Arc::new(ArcSwapOption::from(None));
|
||||
let ref_str = metrics_str.clone();
|
||||
let metrics = Arc::new(ArcSwapOption::from(None));
|
||||
let metrics_ref = metrics.clone();
|
||||
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
@@ -140,7 +140,8 @@ impl RegionRequester {
|
||||
match flight_message {
|
||||
FlightMessage::Recordbatch(record_batch) => yield Ok(record_batch),
|
||||
FlightMessage::Metrics(s) => {
|
||||
ref_str.swap(Some(Arc::new(s)));
|
||||
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
||||
metrics_ref.swap(m);
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
@@ -159,12 +160,12 @@ impl RegionRequester {
|
||||
schema,
|
||||
stream,
|
||||
output_ordering: None,
|
||||
metrics: metrics_str,
|
||||
metrics,
|
||||
};
|
||||
Ok(Box::pin(record_batch_stream))
|
||||
}
|
||||
|
||||
async fn handle_inner(&self, request: RegionRequest) -> Result<AffectedRows> {
|
||||
async fn handle_inner(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||
let request_type = request
|
||||
.body
|
||||
.as_ref()
|
||||
@@ -177,10 +178,7 @@ impl RegionRequester {
|
||||
|
||||
let mut client = self.client.raw_region_client()?;
|
||||
|
||||
let RegionResponse {
|
||||
header,
|
||||
affected_rows,
|
||||
} = client
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -194,19 +192,20 @@ impl RegionRequester {
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
check_response_header(header)?;
|
||||
check_response_header(&response.header)?;
|
||||
|
||||
Ok(affected_rows)
|
||||
Ok(HandleResponse::from_region_response(response))
|
||||
}
|
||||
|
||||
pub async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
|
||||
pub async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
|
||||
self.handle_inner(request).await
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
|
||||
pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> {
|
||||
let status = header
|
||||
.and_then(|header| header.status)
|
||||
.as_ref()
|
||||
.and_then(|header| header.status.as_ref())
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "either response header or status is missing",
|
||||
})?;
|
||||
@@ -220,7 +219,7 @@ pub fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
|
||||
})?;
|
||||
ServerSnafu {
|
||||
code,
|
||||
msg: status.err_msg,
|
||||
msg: status.err_msg.clone(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
@@ -235,19 +234,19 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_check_response_header() {
|
||||
let result = check_response_header(None);
|
||||
let result = check_response_header(&None);
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
IllegalDatabaseResponse { .. }
|
||||
));
|
||||
|
||||
let result = check_response_header(Some(ResponseHeader { status: None }));
|
||||
let result = check_response_header(&Some(ResponseHeader { status: None }));
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
IllegalDatabaseResponse { .. }
|
||||
));
|
||||
|
||||
let result = check_response_header(Some(ResponseHeader {
|
||||
let result = check_response_header(&Some(ResponseHeader {
|
||||
status: Some(PbStatus {
|
||||
status_code: StatusCode::Success as u32,
|
||||
err_msg: String::default(),
|
||||
@@ -255,7 +254,7 @@ mod test {
|
||||
}));
|
||||
assert!(result.is_ok());
|
||||
|
||||
let result = check_response_header(Some(ResponseHeader {
|
||||
let result = check_response_header(&Some(ResponseHeader {
|
||||
status: Some(PbStatus {
|
||||
status_code: u32::MAX,
|
||||
err_msg: String::default(),
|
||||
@@ -266,7 +265,7 @@ mod test {
|
||||
IllegalDatabaseResponse { .. }
|
||||
));
|
||||
|
||||
let result = check_response_header(Some(ResponseHeader {
|
||||
let result = check_response_header(&Some(ResponseHeader {
|
||||
status: Some(PbStatus {
|
||||
status_code: StatusCode::Internal as u32,
|
||||
err_msg: "blabla".to_string(),
|
||||
|
||||
@@ -12,8 +12,10 @@ path = "src/bin/greptime.rs"
|
||||
[features]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
catalog.workspace = true
|
||||
@@ -49,7 +51,6 @@ meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
mito2.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
partition.workspace = true
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
|
||||
@@ -13,5 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
// Trigger this script if the git branch/commit changes
|
||||
println!("cargo:rerun-if-changed=.git/refs/heads");
|
||||
|
||||
common_version::setup_build_info();
|
||||
}
|
||||
|
||||
@@ -62,7 +62,9 @@ pub struct BenchTableMetadataCommand {
|
||||
|
||||
impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||
|
||||
|
||||
@@ -106,9 +106,15 @@ impl TableMetadataBencher {
|
||||
.await
|
||||
.unwrap();
|
||||
let start = Instant::now();
|
||||
let table_info = table_info.unwrap();
|
||||
let table_id = table_info.table_info.ident.table_id;
|
||||
let _ = self
|
||||
.table_metadata_manager
|
||||
.delete_table_metadata(&table_info.unwrap(), &table_route.unwrap())
|
||||
.delete_table_metadata(
|
||||
table_id,
|
||||
&table_info.table_name(),
|
||||
table_route.unwrap().region_routes().unwrap(),
|
||||
)
|
||||
.await;
|
||||
start.elapsed()
|
||||
},
|
||||
|
||||
@@ -19,8 +19,7 @@ use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::api::v1::auth_header::AuthScheme;
|
||||
use client::api::v1::Basic;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME};
|
||||
use common_recordbatch::util::collect;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
@@ -142,7 +141,7 @@ impl Export {
|
||||
.with_context(|_| RequestDatabaseSnafu {
|
||||
sql: "show databases".to_string(),
|
||||
})?;
|
||||
let Output::Stream(stream) = result else {
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
@@ -183,7 +182,7 @@ impl Export {
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream) = result else {
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let Some(record_batch) = collect(stream)
|
||||
@@ -235,7 +234,7 @@ impl Export {
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream) = result else {
|
||||
let OutputData::Stream(stream) = result.data else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
|
||||
@@ -19,9 +19,10 @@ use std::time::Instant;
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
@@ -184,15 +185,15 @@ impl Repl {
|
||||
}
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output {
|
||||
Output::Stream(s) => {
|
||||
let either = match output.data {
|
||||
OutputData::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
Output::RecordBatches(x) => Either::Left(x),
|
||||
Output::AffectedRows(rows) => Either::Right(rows),
|
||||
OutputData::RecordBatches(x) => Either::Left(x),
|
||||
OutputData::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
@@ -252,14 +253,17 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
|
||||
let cached_meta_backend =
|
||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
||||
cached_meta_backend.clone(),
|
||||
]));
|
||||
let catalog_list =
|
||||
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
|
||||
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await;
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
@@ -70,7 +70,7 @@ impl UpgradeCommand {
|
||||
etcd_addr: &self.etcd_addr,
|
||||
})?;
|
||||
let tool = MigrateTableMetadata {
|
||||
etcd_store: EtcdStore::with_etcd_client(client),
|
||||
etcd_store: EtcdStore::with_etcd_client(client, 128),
|
||||
dryrun: self.dryrun,
|
||||
skip_catalog_keys: self.skip_catalog_keys,
|
||||
skip_table_global_keys: self.skip_table_global_keys,
|
||||
|
||||
@@ -43,6 +43,10 @@ impl Instance {
|
||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||
&mut self.datanode
|
||||
}
|
||||
|
||||
pub fn datanode(&self) -> &Datanode {
|
||||
&self.datanode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -235,6 +239,7 @@ impl StartCommand {
|
||||
.with_default_grpc_server(&datanode.region_server())
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
datanode.setup_services(services);
|
||||
|
||||
|
||||
@@ -16,9 +16,10 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::CachedMetaKvBackendBuilder;
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager};
|
||||
use clap::Parser;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::logging;
|
||||
@@ -43,13 +44,17 @@ pub struct Instance {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(frontend: FeInstance) -> Self {
|
||||
pub fn new(frontend: FeInstance) -> Self {
|
||||
Self { frontend }
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut FeInstance {
|
||||
&mut self.frontend
|
||||
}
|
||||
|
||||
pub fn inner(&self) -> &FeInstance {
|
||||
&self.frontend
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -243,11 +248,19 @@ impl StartCommand {
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
||||
cached_meta_backend.clone(),
|
||||
]));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
cached_meta_backend.clone(),
|
||||
multi_cache_invalidator.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(
|
||||
cached_meta_backend.clone(),
|
||||
multi_cache_invalidator.clone(),
|
||||
)),
|
||||
]);
|
||||
|
||||
@@ -259,11 +272,12 @@ impl StartCommand {
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
cached_meta_backend.clone(),
|
||||
catalog_manager,
|
||||
Arc::new(DatanodeClients::default()),
|
||||
meta_client,
|
||||
)
|
||||
.with_cache_invalidator(cached_meta_backend)
|
||||
.with_plugin(plugins.clone())
|
||||
.with_cache_invalidator(multi_cache_invalidator)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
@@ -271,6 +285,7 @@ impl StartCommand {
|
||||
|
||||
let servers = Services::new(opts.clone(), Arc::new(instance.clone()), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
instance
|
||||
.build_servers(opts, servers)
|
||||
|
||||
@@ -32,11 +32,11 @@ lazy_static::lazy_static! {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait App {
|
||||
pub trait App: Send {
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// A hook for implementor to make something happened before actual startup. Defaults to no-op.
|
||||
fn pre_start(&mut self) -> error::Result<()> {
|
||||
async fn pre_start(&mut self) -> error::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -46,24 +46,21 @@ pub trait App {
|
||||
}
|
||||
|
||||
pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
||||
let name = app.name().to_string();
|
||||
info!("Starting app: {}", app.name());
|
||||
|
||||
app.pre_start()?;
|
||||
app.pre_start().await?;
|
||||
|
||||
tokio::select! {
|
||||
result = app.start() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Failed to start app {name}!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Failed to stop app {name}!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
app.start().await?;
|
||||
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!("Failed to listen for ctrl-c signal: {}", e);
|
||||
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
}
|
||||
|
||||
app.stop().await?;
|
||||
info!("Goodbye!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -117,10 +117,12 @@ struct StartCommand {
|
||||
/// The working home directory of this metasrv instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
|
||||
/// If it's not empty, the metasrv will store all data with this key prefix.
|
||||
#[clap(long, default_value = "")]
|
||||
store_key_prefix: String,
|
||||
/// The max operations per txn
|
||||
#[clap(long)]
|
||||
max_txn_ops: Option<usize>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -181,6 +183,10 @@ impl StartCommand {
|
||||
opts.store_key_prefix = self.store_key_prefix.clone()
|
||||
}
|
||||
|
||||
if let Some(max_txn_ops) = self.max_txn_ops {
|
||||
opts.max_txn_ops = max_txn_ops;
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
|
||||
@@ -16,13 +16,14 @@ use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_config::{metadata_store_dir, KvBackendConfig};
|
||||
use common_meta::cache_invalidator::DummyCacheInvalidator;
|
||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::DdlTaskExecutorRef;
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -399,6 +400,10 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
|
||||
let catalog_manager =
|
||||
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
|
||||
|
||||
let builder =
|
||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||
@@ -419,28 +424,34 @@ impl StartCommand {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let table_meta_allocator = TableMetadataAllocator::new(
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
wal_options_allocator.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
);
|
||||
));
|
||||
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
table_metadata_manager,
|
||||
procedure_manager.clone(),
|
||||
datanode_manager.clone(),
|
||||
multi_cache_invalidator,
|
||||
table_meta_allocator,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
kv_backend,
|
||||
catalog_manager,
|
||||
datanode_manager,
|
||||
ddl_task_executor,
|
||||
)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
frontend
|
||||
.build_servers(fe_opts, servers)
|
||||
@@ -458,21 +469,23 @@ impl StartCommand {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
table_meta_allocator: TableMetadataAllocator,
|
||||
) -> Result<DdlTaskExecutorRef> {
|
||||
let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Result<ProcedureExecutorRef> {
|
||||
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
procedure_manager,
|
||||
datanode_manager,
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
cache_invalidator,
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
Arc::new(MemoryRegionKeeper::default()),
|
||||
true,
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(ddl_task_executor)
|
||||
Ok(procedure_executor)
|
||||
}
|
||||
|
||||
pub async fn create_table_metadata_manager(
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
|
||||
@@ -21,6 +21,8 @@ pub mod readable_size;
|
||||
use core::any::Any;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub type AffectedRows = usize;
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
/// [`Plugins`] is a wrapper of Arc contents.
|
||||
|
||||
@@ -1,20 +1,6 @@
|
||||
// Copyright (c) 2017-present, PingCAP, Inc. Licensed under Apache-2.0.
|
||||
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/0.3.0/src/util.rs
|
||||
|
||||
use std::fmt::{self, Debug, Display, Write};
|
||||
use std::ops::{Div, Mul};
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -55,10 +55,10 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
/// schema name
|
||||
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
|
||||
/// `<catalog>` and `<schema>`.
|
||||
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
|
||||
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
|
||||
match parse_optional_catalog_and_schema_from_db_string(db) {
|
||||
(Some(catalog), schema) => (catalog, schema),
|
||||
(None, schema) => (DEFAULT_CATALOG_NAME, schema),
|
||||
(None, schema) => (DEFAULT_CATALOG_NAME.to_string(), schema),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,12 +66,12 @@ pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
|
||||
///
|
||||
/// Similar to [`parse_catalog_and_schema_from_db_string`] but returns an optional
|
||||
/// catalog if it's not provided in the database name.
|
||||
pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<&str>, &str) {
|
||||
pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<String>, String) {
|
||||
let parts = db.splitn(2, '-').collect::<Vec<&str>>();
|
||||
if parts.len() == 2 {
|
||||
(Some(parts[0]), parts[1])
|
||||
(Some(parts[0].to_lowercase()), parts[1].to_lowercase())
|
||||
} else {
|
||||
(None, db)
|
||||
(None, db.to_lowercase())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,32 +88,37 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_catalog_and_schema() {
|
||||
assert_eq!(
|
||||
(DEFAULT_CATALOG_NAME, "fullschema"),
|
||||
(DEFAULT_CATALOG_NAME.to_string(), "fullschema".to_string()),
|
||||
parse_catalog_and_schema_from_db_string("fullschema")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
("catalog", "schema"),
|
||||
("catalog".to_string(), "schema".to_string()),
|
||||
parse_catalog_and_schema_from_db_string("catalog-schema")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
("catalog", "schema1-schema2"),
|
||||
("catalog".to_string(), "schema1-schema2".to_string()),
|
||||
parse_catalog_and_schema_from_db_string("catalog-schema1-schema2")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
(None, "fullschema"),
|
||||
(None, "fullschema".to_string()),
|
||||
parse_optional_catalog_and_schema_from_db_string("fullschema")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
(Some("catalog"), "schema"),
|
||||
(Some("catalog".to_string()), "schema".to_string()),
|
||||
parse_optional_catalog_and_schema_from_db_string("catalog-schema")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
(Some("catalog"), "schema1-schema2"),
|
||||
(Some("catalog".to_string()), "schema".to_string()),
|
||||
parse_optional_catalog_and_schema_from_db_string("CATALOG-SCHEMA")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
(Some("catalog".to_string()), "schema1-schema2".to_string()),
|
||||
parse_optional_catalog_and_schema_from_db_string("catalog-schema1-schema2")
|
||||
);
|
||||
}
|
||||
|
||||
@@ -4,9 +4,11 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
serde.workspace = true
|
||||
sysinfo.workspace = true
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
|
||||
@@ -28,12 +28,15 @@ const REGION: &str = "region";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "enable_virtual_host_style";
|
||||
|
||||
pub fn is_supported_in_s3(key: &str) -> bool {
|
||||
key == ENDPOINT
|
||||
|| key == ACCESS_KEY_ID
|
||||
|| key == SECRET_ACCESS_KEY
|
||||
|| key == SESSION_TOKEN
|
||||
|| key == REGION
|
||||
|| key == ENABLE_VIRTUAL_HOST_STYLE
|
||||
[
|
||||
ENDPOINT,
|
||||
ACCESS_KEY_ID,
|
||||
SECRET_ACCESS_KEY,
|
||||
SESSION_TOKEN,
|
||||
REGION,
|
||||
ENABLE_VIRTUAL_HOST_STYLE,
|
||||
]
|
||||
.contains(&key)
|
||||
}
|
||||
|
||||
pub fn build_s3_backend(
|
||||
|
||||
@@ -4,8 +4,10 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
bigdecimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
snafu.workspace = true
|
||||
strum.workspace = true
|
||||
|
||||
@@ -19,7 +19,9 @@ pub mod format;
|
||||
pub mod mock;
|
||||
pub mod status_code;
|
||||
|
||||
pub use snafu;
|
||||
|
||||
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
|
||||
// please define in `src/servers/src/http/header.rs`.
|
||||
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
|
||||
pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
|
||||
|
||||
pub use snafu;
|
||||
|
||||
@@ -4,13 +4,18 @@ edition.workspace = true
|
||||
version.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-trait.workspace = true
|
||||
chrono-tz = "0.6"
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
@@ -18,14 +23,16 @@ common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
libc = "0.2"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
paste = "1.0"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -30,6 +30,17 @@ pub struct FunctionContext {
|
||||
pub state: Arc<FunctionState>,
|
||||
}
|
||||
|
||||
impl FunctionContext {
|
||||
/// Create a mock [`FunctionContext`] for test.
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn mock() -> Self {
|
||||
Self {
|
||||
query_ctx: QueryContextBuilder::default().build(),
|
||||
state: Arc::new(FunctionState::mock()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FunctionContext {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
||||
@@ -21,6 +21,7 @@ use once_cell::sync::Lazy;
|
||||
use crate::function::FunctionRef;
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::numpy::NumpyFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
@@ -80,6 +81,7 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
NumpyFunction::register(&function_registry);
|
||||
TimestampFunction::register(&function_registry);
|
||||
DateFunction::register(&function_registry);
|
||||
ExpressionFunction::register(&function_registry);
|
||||
|
||||
// Aggregate functions
|
||||
AggregateFunctions::register(&function_registry);
|
||||
|
||||
@@ -13,42 +13,58 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::ProcedureStateResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_base::AffectedRows;
|
||||
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
|
||||
use common_query::error::Result;
|
||||
use common_query::Output;
|
||||
use session::context::QueryContextRef;
|
||||
use table::requests::{DeleteRequest, InsertRequest};
|
||||
|
||||
pub type AffectedRows = usize;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
|
||||
|
||||
/// A trait for handling table mutations in `QueryEngine`.
|
||||
#[async_trait]
|
||||
pub trait TableMutationHandler: Send + Sync {
|
||||
/// Inserts rows into the table.
|
||||
async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
|
||||
async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<Output>;
|
||||
|
||||
/// Delete rows from the table.
|
||||
async fn delete(&self, request: DeleteRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
|
||||
|
||||
/// Migrate a region from source peer to target peer, returns the procedure id if success.
|
||||
async fn migrate_region(
|
||||
/// Trigger a flush task for table.
|
||||
async fn flush(&self, request: FlushTableRequest, ctx: QueryContextRef)
|
||||
-> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a compaction task for table.
|
||||
async fn compact(
|
||||
&self,
|
||||
region_id: u64,
|
||||
from_peer: u64,
|
||||
to_peer: u64,
|
||||
replay_timeout: Duration,
|
||||
) -> Result<String>;
|
||||
request: CompactTableRequest,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a flush task for a table region.
|
||||
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
|
||||
-> Result<AffectedRows>;
|
||||
|
||||
/// Trigger a compaction task for a table region.
|
||||
async fn compact_region(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows>;
|
||||
}
|
||||
|
||||
/// A trait for handling meta service requests in `QueryEngine`.
|
||||
/// A trait for handling procedure service requests in `QueryEngine`.
|
||||
#[async_trait]
|
||||
pub trait MetaServiceHandler: Send + Sync {
|
||||
pub trait ProcedureServiceHandler: Send + Sync {
|
||||
/// Migrate a region from source peer to target peer, returns the procedure id if success.
|
||||
async fn migrate_region(&self, request: MigrateRegionRequest) -> Result<Option<String>>;
|
||||
|
||||
/// Query the procedure' state by its id
|
||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||
}
|
||||
|
||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||
|
||||
pub type MetaServiceHandlerRef = Arc<dyn MetaServiceHandler>;
|
||||
pub type ProcedureServiceHandlerRef = Arc<dyn ProcedureServiceHandler>;
|
||||
|
||||
@@ -12,8 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::cast::cast;
|
||||
use datatypes::value::ValueRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
@@ -27,3 +31,15 @@ pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>)
|
||||
|
||||
Signature::one_of(sigs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
/// Cast a [`ValueRef`] to u64, returns `None` if fails
|
||||
pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
||||
cast((*value).into(), &ConcreteDataType::uint64_datatype())
|
||||
.context(InvalidInputTypeSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to cast input into uint64, actual type: {:#?}",
|
||||
value.data_type(),
|
||||
),
|
||||
})
|
||||
.map(|v| v.as_u64())
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod macros;
|
||||
pub mod scalars;
|
||||
mod system;
|
||||
mod table;
|
||||
|
||||
27
src/common/function/src/macros.rs
Normal file
27
src/common/function/src/macros.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Ensure current function is invokded under `greptime` catalog.
|
||||
#[macro_export]
|
||||
macro_rules! ensure_greptime {
|
||||
($func_ctx: expr) => {{
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
snafu::ensure!(
|
||||
$func_ctx.query_ctx.current_catalog() == DEFAULT_CATALOG_NAME,
|
||||
common_query::error::PermissionDeniedSnafu {
|
||||
err_msg: format!("current catalog is not {DEFAULT_CATALOG_NAME}")
|
||||
}
|
||||
);
|
||||
}};
|
||||
}
|
||||
@@ -14,8 +14,22 @@
|
||||
|
||||
mod binary;
|
||||
mod ctx;
|
||||
mod is_null;
|
||||
mod unary;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use binary::scalar_binary_op;
|
||||
pub use ctx::EvalContext;
|
||||
pub use unary::scalar_unary_op;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::expression::is_null::IsNullFunction;
|
||||
|
||||
pub(crate) struct ExpressionFunction;
|
||||
|
||||
impl ExpressionFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(IsNullFunction));
|
||||
}
|
||||
}
|
||||
|
||||
109
src/common/function/src/scalars/expression/is_null.rs
Normal file
109
src/common/function/src/scalars/expression/is_null.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use common_query::error::{ArrowComputeSnafu, InvalidFuncArgsSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datafusion::arrow::array::ArrayRef;
|
||||
use datafusion::arrow::compute::is_null;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::vectors::Helper;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const NAME: &str = "isnull";
|
||||
|
||||
/// The function to check whether an expression is NULL
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct IsNullFunction;
|
||||
|
||||
impl Display for IsNullFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for IsNullFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[ConcreteDataType]) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::any(1, Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let values = &columns[0];
|
||||
let arrow_array = &values.to_arrow_array();
|
||||
let result = is_null(arrow_array).context(ArrowComputeSnafu)?;
|
||||
|
||||
Helper::try_into_vector(Arc::new(result) as ArrayRef).context(error::FromArrowArraySnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{BooleanVector, Float32Vector};
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_is_null_function() {
|
||||
let is_null = IsNullFunction;
|
||||
assert_eq!("isnull", is_null.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::boolean_datatype(),
|
||||
is_null.return_type(&[]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
is_null.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Any(1),
|
||||
volatility: Volatility::Immutable
|
||||
}
|
||||
);
|
||||
let values = vec![None, Some(3.0), None];
|
||||
|
||||
let args: Vec<VectorRef> = vec![Arc::new(Float32Vector::from(values))];
|
||||
let vector = is_null.eval(FunctionContext::default(), &args).unwrap();
|
||||
let expect: VectorRef = Arc::new(BooleanVector::from_vec(vec![true, false, true]));
|
||||
assert_eq!(expect, vector);
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod clamp;
|
||||
mod modulo;
|
||||
mod pow;
|
||||
mod rate;
|
||||
@@ -19,6 +20,7 @@ mod rate;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use clamp::ClampFunction;
|
||||
use common_query::error::{GeneralDataFusionSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::error::DataFusionError;
|
||||
@@ -40,7 +42,8 @@ impl MathFunction {
|
||||
registry.register(Arc::new(ModuloFunction));
|
||||
registry.register(Arc::new(PowFunction));
|
||||
registry.register(Arc::new(RateFunction));
|
||||
registry.register(Arc::new(RangeFunction))
|
||||
registry.register(Arc::new(RangeFunction));
|
||||
registry.register(Arc::new(ClampFunction));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
403
src/common/function/src/scalars/math/clamp.rs
Normal file
403
src/common/function/src/scalars/math/clamp.rs
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::arrow::array::{ArrayIter, PrimitiveArray};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::value::TryAsPrimitive;
|
||||
use datatypes::vectors::PrimitiveVector;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::function::Function;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ClampFunction;
|
||||
|
||||
const CLAMP_NAME: &str = "clamp";
|
||||
|
||||
impl Function for ClampFunction {
|
||||
fn name(&self) -> &str {
|
||||
CLAMP_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
// Type check is done by `signature`
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// input, min, max
|
||||
Signature::uniform(3, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: crate::function::FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 3, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type().is_numeric(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The first arg's type is not numeric, have: {}",
|
||||
columns[0].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[0].data_type() == columns[1].data_type()
|
||||
&& columns[1].data_type() == columns[2].data_type(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Arguments don't have identical types: {}, {}, {}",
|
||||
columns[0].data_type(),
|
||||
columns[1].data_type(),
|
||||
columns[2].data_type()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure!(
|
||||
columns[1].len() == 1 && columns[2].len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second and third args should be scalar, have: {:?}, {:?}",
|
||||
columns[1], columns[2]
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
|
||||
let input_array = columns[0].to_arrow_array();
|
||||
let input = input_array
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<<$S as LogicalPrimitiveType>::ArrowPrimitive>>()
|
||||
.unwrap();
|
||||
|
||||
let min = TryAsPrimitive::<$S>::try_as_primitive(&columns[1].get(0))
|
||||
.with_context(|| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The second arg should not be none",
|
||||
}
|
||||
})?;
|
||||
let max = TryAsPrimitive::<$S>::try_as_primitive(&columns[2].get(0))
|
||||
.with_context(|| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The third arg should not be none",
|
||||
}
|
||||
})?;
|
||||
|
||||
// ensure min <= max
|
||||
ensure!(
|
||||
min <= max,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The second arg should be less than or equal to the third arg, have: {:?}, {:?}",
|
||||
columns[1], columns[2]
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
clamp_impl::<$S, true, true>(input, min, max)
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ClampFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", CLAMP_NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: bool>(
|
||||
input: &PrimitiveArray<T::ArrowPrimitive>,
|
||||
min: T::Native,
|
||||
max: T::Native,
|
||||
) -> Result<VectorRef> {
|
||||
common_telemetry::info!("[DEBUG] min {min:?}, max {max:?}");
|
||||
|
||||
let iter = ArrayIter::new(input);
|
||||
let result = iter.map(|x| {
|
||||
x.map(|x| {
|
||||
if CLAMP_MIN && x < min {
|
||||
min
|
||||
} else if CLAMP_MAX && x > max {
|
||||
max
|
||||
} else {
|
||||
x
|
||||
}
|
||||
})
|
||||
});
|
||||
let result = PrimitiveArray::<T::ArrowPrimitive>::from_iter(result);
|
||||
Ok(Arc::new(PrimitiveVector::<T>::from(result)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Float64Vector, Int64Vector, StringVector, UInt64Vector,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
|
||||
#[test]
|
||||
fn clamp_i64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
|
||||
-1,
|
||||
10,
|
||||
vec![Some(-1), Some(-1), Some(-1), Some(0), Some(1), Some(2)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3), Some(-2), Some(-1), Some(0), Some(1), Some(2)],
|
||||
0,
|
||||
0,
|
||||
vec![Some(0), Some(0), Some(0), Some(0), Some(0), Some(0)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3), None, Some(-1), None, None, Some(2)],
|
||||
-2,
|
||||
1,
|
||||
vec![Some(-2), None, Some(-1), None, None, Some(1)],
|
||||
),
|
||||
(
|
||||
vec![None, None, None, None, None],
|
||||
0,
|
||||
1,
|
||||
vec![None, None, None, None, None],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampFunction;
|
||||
for (in_data, min, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Int64Vector::from(in_data)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Int64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_u64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(0), Some(1), Some(2), Some(3), Some(4), Some(5)],
|
||||
1,
|
||||
3,
|
||||
vec![Some(1), Some(1), Some(2), Some(3), Some(3), Some(3)],
|
||||
),
|
||||
(
|
||||
vec![Some(0), Some(1), Some(2), Some(3), Some(4), Some(5)],
|
||||
0,
|
||||
0,
|
||||
vec![Some(0), Some(0), Some(0), Some(0), Some(0), Some(0)],
|
||||
),
|
||||
(
|
||||
vec![Some(0), None, Some(2), None, None, Some(5)],
|
||||
1,
|
||||
3,
|
||||
vec![Some(1), None, Some(2), None, None, Some(3)],
|
||||
),
|
||||
(
|
||||
vec![None, None, None, None, None],
|
||||
0,
|
||||
1,
|
||||
vec![None, None, None, None, None],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampFunction;
|
||||
for (in_data, min, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(UInt64Vector::from(in_data)) as _,
|
||||
Arc::new(UInt64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(UInt64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(UInt64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_f64() {
|
||||
let inputs = [
|
||||
(
|
||||
vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
-1.0,
|
||||
10.0,
|
||||
vec![Some(-1.0), Some(-1.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
),
|
||||
(
|
||||
vec![Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)],
|
||||
0.0,
|
||||
0.0,
|
||||
vec![Some(0.0), Some(0.0), Some(0.0), Some(0.0)],
|
||||
),
|
||||
(
|
||||
vec![Some(-3.0), None, Some(-1.0), None, None, Some(2.0)],
|
||||
-2.0,
|
||||
1.0,
|
||||
vec![Some(-2.0), None, Some(-1.0), None, None, Some(1.0)],
|
||||
),
|
||||
(
|
||||
vec![None, None, None, None, None],
|
||||
0.0,
|
||||
1.0,
|
||||
vec![None, None, None, None, None],
|
||||
),
|
||||
];
|
||||
|
||||
let func = ClampFunction;
|
||||
for (in_data, min, max, expected) in inputs {
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(in_data)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Float64Vector::from(expected));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_const_i32() {
|
||||
let input = vec![Some(5)];
|
||||
let min = 2;
|
||||
let max = 4;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(ConstantVector::new(Arc::new(Int64Vector::from(input)), 1)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), args.as_slice())
|
||||
.unwrap();
|
||||
let expected: VectorRef = Arc::new(Int64Vector::from(vec![Some(4)]));
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_invalid_min_max() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = 10.0;
|
||||
let max = -1.0;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_type_not_match() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -1;
|
||||
let max = 10;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Int64Vector::from_vec(vec![min])) as _,
|
||||
Arc::new(UInt64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_min_is_not_scalar() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -10.0;
|
||||
let max = 1.0;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min, min])) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![max])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_no_max() {
|
||||
let input = vec![Some(-3.0), Some(-2.0), Some(-1.0), Some(0.0), Some(1.0)];
|
||||
let min = -10.0;
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(Float64Vector::from(input)) as _,
|
||||
Arc::new(Float64Vector::from_vec(vec![min])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clamp_on_string() {
|
||||
let input = vec![Some("foo"), Some("foo"), Some("foo"), Some("foo")];
|
||||
|
||||
let func = ClampFunction;
|
||||
let args = [
|
||||
Arc::new(StringVector::from(input)) as _,
|
||||
Arc::new(StringVector::from_vec(vec!["bar"])) as _,
|
||||
Arc::new(StringVector::from_vec(vec!["baz"])) as _,
|
||||
];
|
||||
let result = func.eval(FunctionContext::default(), args.as_slice());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -128,7 +128,7 @@ mod tests {
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 4);
|
||||
for i in 0..3 {
|
||||
for i in 0..4 {
|
||||
let p: i64 = (nums[i] % divs[i]) as i64;
|
||||
assert!(matches!(result.get(i), Value::Int64(v) if v == p));
|
||||
}
|
||||
@@ -160,7 +160,7 @@ mod tests {
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 4);
|
||||
for i in 0..3 {
|
||||
for i in 0..4 {
|
||||
let p: u64 = (nums[i] % divs[i]) as u64;
|
||||
assert!(matches!(result.get(i), Value::UInt64(v) if v == p));
|
||||
}
|
||||
@@ -192,7 +192,7 @@ mod tests {
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 4);
|
||||
for i in 0..3 {
|
||||
for i in 0..4 {
|
||||
let p: f64 = nums[i] % divs[i];
|
||||
assert!(matches!(result.get(i), Value::Float64(v) if v == p));
|
||||
}
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_timezone;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_timezone::ToTimezoneFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -25,6 +27,7 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToTimezoneFunction));
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
|
||||
313
src/common/function/src/scalars/timestamp/to_timezone.rs
Normal file
313
src/common/function/src/scalars/timestamp/to_timezone.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use common_time::{Timestamp, Timezone};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToTimezoneFunction;
|
||||
|
||||
const NAME: &str = "to_timezone";
|
||||
|
||||
fn convert_to_timezone(arg: &str) -> Option<Timezone> {
|
||||
Timezone::from_tz_string(arg).ok()
|
||||
}
|
||||
|
||||
fn convert_to_timestamp(arg: &Value) -> Option<Timestamp> {
|
||||
match arg {
|
||||
Value::Timestamp(ts) => Some(*ts),
|
||||
Value::Int64(i) => Some(Timestamp::new_millisecond(*i)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToTimezoneFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "TO_TIMEZONE")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToTimezoneFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
// type checked by signature - MUST BE timestamp
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let array = columns[0].to_arrow_array();
|
||||
let times = match columns[0].data_type() {
|
||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||
let vector = Int64Vector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts) => match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
let tzs = {
|
||||
let array = columns[1].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array)
|
||||
.ok()
|
||||
.with_context(|| UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
})?;
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timezone(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let result = times
|
||||
.iter()
|
||||
.zip(tzs.iter())
|
||||
.map(|(time, tz)| match (time, tz) {
|
||||
(Some(time), _) => Some(time.to_timezone_aware_string(tz.as_ref())),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<Option<String>>>();
|
||||
Ok(Arc::new(StringVector::from(result)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::{
|
||||
TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
|
||||
};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
assert_eq!("to_timezone", f.name());
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:01"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:01"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampSecondVector =
|
||||
TimestampSecondVector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![Some("America/New_York"), None, Some("Europe/Moscow"), None];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMillisecond>> = vec![
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMillisecondVector =
|
||||
TimestampMillisecondVector::from_owned_iterator(times.into_iter());
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMicrosecond>> = vec![
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMicrosecondVector =
|
||||
TimestampMicrosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampNanosecond>> = vec![
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampNanosecondVector =
|
||||
TimestampNanosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_numerical_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
Some("2024-03-26 23:01:50"),
|
||||
None,
|
||||
Some("2024-03-27 06:02:00"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<i64>> = vec![
|
||||
Some(1),
|
||||
None,
|
||||
Some(1),
|
||||
None,
|
||||
Some(1711508510000),
|
||||
None,
|
||||
Some(1711508520000),
|
||||
None,
|
||||
];
|
||||
let ts_vector: Int64Vector = Int64Vector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![
|
||||
Some("America/New_York"),
|
||||
None,
|
||||
Some("Europe/Moscow"),
|
||||
None,
|
||||
Some("America/New_York"),
|
||||
None,
|
||||
Some("Europe/Moscow"),
|
||||
None,
|
||||
];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(8, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::handlers::{MetaServiceHandlerRef, TableMutationHandlerRef};
|
||||
use crate::handlers::{ProcedureServiceHandlerRef, TableMutationHandlerRef};
|
||||
|
||||
/// Shared state for SQL functions.
|
||||
/// The handlers in state may be `None` in cli command-line or test cases.
|
||||
@@ -20,6 +20,105 @@ use crate::handlers::{MetaServiceHandlerRef, TableMutationHandlerRef};
|
||||
pub struct FunctionState {
|
||||
// The table mutation handler
|
||||
pub table_mutation_handler: Option<TableMutationHandlerRef>,
|
||||
// The meta service handler
|
||||
pub meta_service_handler: Option<MetaServiceHandlerRef>,
|
||||
// The procedure service handler
|
||||
pub procedure_service_handler: Option<ProcedureServiceHandlerRef>,
|
||||
}
|
||||
|
||||
impl FunctionState {
|
||||
/// Create a mock [`FunctionState`] for test.
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn mock() -> Self {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use async_trait::async_trait;
|
||||
use common_base::AffectedRows;
|
||||
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
|
||||
use common_query::error::Result;
|
||||
use common_query::Output;
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::{
|
||||
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||
};
|
||||
|
||||
use crate::handlers::{ProcedureServiceHandler, TableMutationHandler};
|
||||
struct MockProcedureServiceHandler;
|
||||
struct MockTableMutationHandler;
|
||||
const ROWS: usize = 42;
|
||||
|
||||
#[async_trait]
|
||||
impl ProcedureServiceHandler for MockProcedureServiceHandler {
|
||||
async fn migrate_region(
|
||||
&self,
|
||||
_request: MigrateRegionRequest,
|
||||
) -> Result<Option<String>> {
|
||||
Ok(Some("test_pid".to_string()))
|
||||
}
|
||||
|
||||
async fn query_procedure_state(&self, _pid: &str) -> Result<ProcedureStateResponse> {
|
||||
Ok(ProcedureStateResponse {
|
||||
status: ProcedureStatus::Done.into(),
|
||||
error: "OK".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TableMutationHandler for MockTableMutationHandler {
|
||||
async fn insert(
|
||||
&self,
|
||||
_request: InsertRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
Ok(Output::new_with_affected_rows(ROWS))
|
||||
}
|
||||
|
||||
async fn delete(
|
||||
&self,
|
||||
_request: DeleteRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn flush(
|
||||
&self,
|
||||
_request: FlushTableRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn compact(
|
||||
&self,
|
||||
_request: CompactTableRequest,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn flush_region(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
|
||||
async fn compact_region(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<AffectedRows> {
|
||||
Ok(ROWS)
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
table_mutation_handler: Some(Arc::new(MockTableMutationHandler)),
|
||||
procedure_service_handler: Some(Arc::new(MockProcedureServiceHandler)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod build;
|
||||
mod database;
|
||||
mod procedure_state;
|
||||
mod timezone;
|
||||
mod version;
|
||||
|
||||
@@ -21,6 +22,7 @@ use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::DatabaseFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
use version::VersionFunction;
|
||||
|
||||
@@ -34,5 +36,6 @@ impl SystemFunction {
|
||||
registry.register(Arc::new(VersionFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register(Arc::new(ProcedureStateFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ use datatypes::vectors::{StringVector, VectorRef};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Generates build information
|
||||
/// Generates build information
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct BuildFunction;
|
||||
|
||||
@@ -42,11 +42,7 @@ impl Function for BuildFunction {
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
0,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
Signature::uniform(0, vec![], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
@@ -75,7 +71,7 @@ mod tests {
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(0, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::string_datatype()]
|
||||
} if valid_types.is_empty()
|
||||
));
|
||||
let build_info = common_version::build_info().to_string();
|
||||
let vector = build.eval(FunctionContext::default(), &[]).unwrap();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user