mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 07:30:02 +00:00
Compare commits
115 Commits
windows_pd
...
v0.11.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fe735009c | ||
|
|
f0298afaf0 | ||
|
|
5175dea6b3 | ||
|
|
7caa88abc7 | ||
|
|
eafb01dfff | ||
|
|
b0de816d3d | ||
|
|
5c6161a95e | ||
|
|
5e3c5945c4 | ||
|
|
f6feac26f5 | ||
|
|
4b2c59e626 | ||
|
|
cf605ecccc | ||
|
|
ab3f9c42f1 | ||
|
|
258fc6f31b | ||
|
|
e2dccc1d1a | ||
|
|
78c5707642 | ||
|
|
204b5e474f | ||
|
|
e9f1fa0b7d | ||
|
|
a988ff5acf | ||
|
|
ef0fca9388 | ||
|
|
b704e7f703 | ||
|
|
3a4c636e29 | ||
|
|
a22e8b421c | ||
|
|
5b42546204 | ||
|
|
0678a31ab1 | ||
|
|
589cc84048 | ||
|
|
ed8c072a5e | ||
|
|
9d172f1cae | ||
|
|
236888313d | ||
|
|
0b97ef0e4f | ||
|
|
316e6a83eb | ||
|
|
6dc57b7a6c | ||
|
|
1f5c2b32e5 | ||
|
|
01e907be40 | ||
|
|
e4dc5ea243 | ||
|
|
3ff5754b5a | ||
|
|
c22ca3ebd5 | ||
|
|
327d165ad9 | ||
|
|
fe63a620ef | ||
|
|
be81f0db5a | ||
|
|
6ca7a305ae | ||
|
|
1111a8bd57 | ||
|
|
66b21b29b5 | ||
|
|
31cfab81ad | ||
|
|
dd3a509607 | ||
|
|
d4cae6af1e | ||
|
|
3fec71b5c0 | ||
|
|
9e31a6478b | ||
|
|
bce291a8e1 | ||
|
|
c788eb67e2 | ||
|
|
0c32dcf46c | ||
|
|
68a05b38bd | ||
|
|
ee72ae8bd0 | ||
|
|
556bd796d8 | ||
|
|
1327e8809f | ||
|
|
17d75c767c | ||
|
|
a1ed450c0c | ||
|
|
ea4ce9d1e3 | ||
|
|
1f7d9666b7 | ||
|
|
9f1a0d78b2 | ||
|
|
ed8e418716 | ||
|
|
9e7121c1bb | ||
|
|
94a49ed4f0 | ||
|
|
f5e743379f | ||
|
|
6735e5867e | ||
|
|
925525726b | ||
|
|
6427682a9a | ||
|
|
55b0022676 | ||
|
|
2d84cc8d87 | ||
|
|
c030705b17 | ||
|
|
443c600bd0 | ||
|
|
39cadfe10b | ||
|
|
9b5e4e80f7 | ||
|
|
041a276b66 | ||
|
|
614a25ddc5 | ||
|
|
4337e20010 | ||
|
|
65c52cc698 | ||
|
|
50f31fd681 | ||
|
|
b5af5aaf8d | ||
|
|
27693c7f1e | ||
|
|
a59fef9ffb | ||
|
|
bcecd8ce52 | ||
|
|
ffdcb8c1ac | ||
|
|
554121ad79 | ||
|
|
43c12b4f2c | ||
|
|
7aa8c28fe4 | ||
|
|
34fbe7739e | ||
|
|
06d7bd99dd | ||
|
|
b71d842615 | ||
|
|
7f71693b8e | ||
|
|
615ea1a171 | ||
|
|
4e725d259d | ||
|
|
dc2252eb6d | ||
|
|
6d4cc2e070 | ||
|
|
6066ce2c4a | ||
|
|
b90d8f7dbd | ||
|
|
fdccf4ff84 | ||
|
|
8b1484c064 | ||
|
|
576e20ac78 | ||
|
|
10b3e3da0f | ||
|
|
4a3ef2d718 | ||
|
|
65eabb2a05 | ||
|
|
bc5a57f51f | ||
|
|
f24b9d8814 | ||
|
|
dd4d0a88ce | ||
|
|
3d2096fe9d | ||
|
|
35715bb710 | ||
|
|
08a3befa67 | ||
|
|
ca1758d4e7 | ||
|
|
42bf818167 | ||
|
|
2c9b117224 | ||
|
|
3edf2317e1 | ||
|
|
85d72a3cd0 | ||
|
|
928172bd82 | ||
|
|
e9f5bddeff | ||
|
|
486755d795 |
4
.github/actions/build-images/action.yml
vendored
4
.github/actions/build-images/action.yml
vendored
@@ -41,8 +41,8 @@ runs:
|
||||
image-name: ${{ inputs.image-name }}
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/ubuntu/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
|
||||
14
.github/actions/build-linux-artifacts/action.yml
vendored
14
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -48,19 +48,7 @@ runs:
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build standard greptime
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: pyo3_backend,servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
- name: Build greptime
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
|
||||
@@ -33,15 +33,6 @@ runs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow numpy
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
|
||||
2
.github/workflows/dev-build.yml
vendored
2
.github/workflows/dev-build.yml
vendored
@@ -29,7 +29,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
34
.github/workflows/develop.yml
vendored
34
.github/workflows/develop.yml
vendored
@@ -10,17 +10,6 @@ on:
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
@@ -84,7 +73,7 @@ jobs:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-toml"
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
- name: Run taplo
|
||||
run: taplo format --check
|
||||
|
||||
@@ -107,7 +96,7 @@ jobs:
|
||||
shared-key: "build-binaries"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
@@ -163,7 +152,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -220,7 +209,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz cargo-gc-bin
|
||||
cargo install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -268,7 +257,7 @@ jobs:
|
||||
shared-key: "build-greptime-ci"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
@@ -338,7 +327,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -487,7 +476,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -653,6 +642,7 @@ jobs:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy, fmt]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -678,12 +668,6 @@ jobs:
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow numpy
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
@@ -697,7 +681,7 @@ jobs:
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
2
.github/workflows/nightly-build.yml
vendored
2
.github/workflows/nightly-build.yml
vendored
@@ -27,7 +27,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 23 * * 1-5"
|
||||
- cron: "0 23 * * 1-4"
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -91,18 +91,12 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow numpy
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
run: cargo nextest run -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
NEXT_RELEASE_VERSION: v0.11.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
@@ -222,18 +222,10 @@ jobs:
|
||||
arch: aarch64-apple-darwin
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: pyo3_backend,servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||
@@ -271,10 +263,6 @@ jobs:
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||
|
||||
181
Cargo.lock
generated
181
Cargo.lock
generated
@@ -188,7 +188,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -773,7 +773,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1314,7 +1314,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1684,7 +1684,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1727,7 +1727,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1736,7 +1736,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1763,7 +1763,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1804,7 +1804,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1864,7 +1864,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1916,7 +1916,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1938,11 +1938,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1965,7 +1965,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -2001,7 +2001,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -2014,8 +2014,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"http 0.2.12",
|
||||
"snafu 0.8.5",
|
||||
"strum 0.25.0",
|
||||
"tonic 0.11.0",
|
||||
@@ -2023,7 +2024,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2033,7 +2034,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"approx 0.5.1",
|
||||
@@ -2077,7 +2078,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2094,7 +2095,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2120,7 +2121,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2139,7 +2140,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2153,7 +2154,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2166,7 +2167,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2223,7 +2224,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2232,11 +2233,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2248,7 +2249,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2275,7 +2276,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2283,7 +2284,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2309,7 +2310,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2328,7 +2329,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2350,13 +2351,15 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-metrics",
|
||||
"tokio-metrics-collector",
|
||||
"tokio-test",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2384,7 +2387,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2396,7 +2399,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2414,7 +2417,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2424,7 +2427,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3223,7 +3226,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3274,7 +3277,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3283,7 +3286,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3907,7 +3910,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4023,7 +4026,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4061,6 +4064,7 @@ dependencies = [
|
||||
"get-size-derive2",
|
||||
"get-size2",
|
||||
"greptime-proto",
|
||||
"http 0.2.12",
|
||||
"hydroflow",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
@@ -4081,7 +4085,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -4119,7 +4123,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4554,7 +4558,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a875e976441188028353f7274a46a7e6e065c5d4#a875e976441188028353f7274a46a7e6e065c5d4"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908#43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908"
|
||||
dependencies = [
|
||||
"prost 0.12.6",
|
||||
"serde",
|
||||
@@ -5269,7 +5273,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5286,6 +5290,7 @@ dependencies = [
|
||||
"futures",
|
||||
"greptime-proto",
|
||||
"mockall",
|
||||
"parquet",
|
||||
"pin-project",
|
||||
"prost 0.12.6",
|
||||
"rand",
|
||||
@@ -6118,7 +6123,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6130,7 +6135,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6474,7 +6479,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6501,7 +6506,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6580,7 +6585,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6674,7 +6679,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7411,7 +7416,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7664,7 +7669,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7712,7 +7717,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -7962,7 +7967,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8248,7 +8253,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8410,7 +8415,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -8698,7 +8703,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8933,7 +8938,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9058,7 +9063,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9102,6 +9107,7 @@ dependencies = [
|
||||
"log-query",
|
||||
"meter-core",
|
||||
"meter-macros",
|
||||
"nalgebra 0.33.2",
|
||||
"num",
|
||||
"num-traits",
|
||||
"object-store",
|
||||
@@ -9122,7 +9128,7 @@ dependencies = [
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10606,7 +10612,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10898,7 +10904,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -11010,7 +11016,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11364,7 +11370,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11428,7 +11434,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11646,7 +11652,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11808,7 +11814,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -12007,7 +12013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12284,7 +12290,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12327,7 +12333,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12391,7 +12397,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.2",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -12615,9 +12621,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.40.0"
|
||||
version = "1.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
|
||||
checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
@@ -12653,6 +12659,31 @@ dependencies = [
|
||||
"syn 2.0.90",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-metrics"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-metrics-collector"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8092b7a97ed5dac2f44892db190eca8f476ede0fa585bc87664de4151cd0b64"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"parking_lot 0.12.3",
|
||||
"prometheus",
|
||||
"tokio",
|
||||
"tokio-metrics",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.12"
|
||||
|
||||
@@ -68,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -124,8 +124,9 @@ etcd-client = "0.13"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908" }
|
||||
hex = "0.4"
|
||||
http = "0.2"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -134,6 +135,7 @@ lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
notify = "6.1"
|
||||
num_cpus = "1.16"
|
||||
once_cell = "1.18"
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -150,12 +151,17 @@
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
@@ -195,6 +201,7 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -421,7 +428,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -460,7 +467,7 @@
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
@@ -478,12 +485,17 @@
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
|
||||
@@ -294,7 +294,7 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
@@ -478,7 +478,7 @@ auto_flush_interval = "1h"
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
@@ -550,7 +550,7 @@ metadata_cache_size = "64MiB"
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "8MiB"
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
@@ -576,6 +576,30 @@ apply_on_query = "auto"
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## The options for bloom filter index in Mito engine.
|
||||
[region_engine.mito.bloom_filter_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for the index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
|
||||
@@ -18,6 +18,10 @@ max_concurrent_queries = 0
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
@@ -589,7 +593,7 @@ metadata_cache_size = "64MiB"
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "8MiB"
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
@@ -615,6 +619,30 @@ apply_on_query = "auto"
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## The options for bloom filter in Mito engine.
|
||||
[region_engine.mito.bloom_filter_index]
|
||||
|
||||
## Whether to create the bloom filter on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the bloom filter on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the bloom filter on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for bloom filter creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
|
||||
@@ -13,8 +13,6 @@ RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
@@ -43,8 +41,6 @@ RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
WORKDIR /greptime
|
||||
|
||||
@@ -20,10 +20,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
pkg-config
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -46,15 +43,8 @@ ARG OUTPUT_DIR
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
@@ -7,9 +7,7 @@ RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
centos-release-scl
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
@@ -8,15 +8,8 @@ ARG TARGET_BIN=greptime
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
from multiprocessing import Pool
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
@@ -33,13 +34,11 @@ def extract_branch_names(file_content):
|
||||
return pattern.findall(file_content)
|
||||
|
||||
|
||||
def check_snafu_in_files(branch_name, rust_files):
|
||||
def check_snafu_in_files(branch_name, rust_files_content):
|
||||
branch_name_snafu = f"{branch_name}Snafu"
|
||||
for rust_file in rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
content = file.read()
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
for content in rust_files_content.values():
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -49,21 +48,24 @@ def main():
|
||||
|
||||
for error_file in error_files:
|
||||
with open(error_file, "r") as file:
|
||||
content = file.read()
|
||||
branch_names.extend(extract_branch_names(content))
|
||||
branch_names.extend(extract_branch_names(file.read()))
|
||||
|
||||
unused_snafu = [
|
||||
branch_name
|
||||
for branch_name in branch_names
|
||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||
]
|
||||
# Read all rust files into memory once
|
||||
rust_files_content = {}
|
||||
for rust_file in other_rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
rust_files_content[rust_file] = file.read()
|
||||
|
||||
with Pool() as pool:
|
||||
results = pool.starmap(
|
||||
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
|
||||
)
|
||||
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
|
||||
|
||||
if unused_snafu:
|
||||
print("Unused error variants:")
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
|
||||
if unused_snafu:
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
let
|
||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
|
||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-24.11";
|
||||
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
||||
pkgs = import nixpkgs { config = {}; overlays = []; };
|
||||
in
|
||||
@@ -17,10 +17,12 @@ pkgs.mkShell rec {
|
||||
})
|
||||
cargo-nextest
|
||||
taplo
|
||||
curl
|
||||
];
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
libgit2
|
||||
libz
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
|
||||
@@ -15,7 +15,7 @@ cache.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
@@ -56,7 +56,6 @@ tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -22,6 +22,7 @@ use catalog::information_schema::InformationExtension;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use client::api::v1::meta::RegionRole;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
@@ -152,6 +153,7 @@ pub struct StandaloneOptions {
|
||||
pub tracing: TracingOptions,
|
||||
pub init_regions_in_background: bool,
|
||||
pub init_regions_parallelism: usize,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -181,6 +183,7 @@ impl Default for StandaloneOptions {
|
||||
tracing: TracingOptions::default(),
|
||||
init_regions_in_background: false,
|
||||
init_regions_parallelism: 16,
|
||||
max_in_flight_write_bytes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -218,6 +221,7 @@ impl StandaloneOptions {
|
||||
user_provider: cloned_opts.user_provider,
|
||||
// Handle the export metrics task run by standalone to frontend for execution
|
||||
export_metrics: cloned_opts.export_metrics,
|
||||
max_in_flight_write_bytes: cloned_opts.max_in_flight_write_bytes,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +35,23 @@ data = {
|
||||
"bigint_other": [5, -5, 1, 5, 5],
|
||||
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
|
||||
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
|
||||
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
|
||||
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
|
||||
"timestamp_simple": [
|
||||
datetime.datetime(2023, 4, 1, 20, 15, 30, 2000),
|
||||
datetime.datetime.fromtimestamp(int("1629617204525777000") / 1000000000),
|
||||
datetime.datetime(2023, 1, 1),
|
||||
datetime.datetime(2023, 2, 1),
|
||||
datetime.datetime(2023, 3, 1),
|
||||
],
|
||||
"date_simple": [
|
||||
datetime.date(2023, 4, 1),
|
||||
datetime.date(2023, 3, 1),
|
||||
datetime.date(2023, 1, 1),
|
||||
datetime.date(2023, 2, 1),
|
||||
datetime.date(2023, 3, 1),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def infer_schema(data):
|
||||
schema = "struct<"
|
||||
for key, value in data.items():
|
||||
@@ -56,7 +69,7 @@ def infer_schema(data):
|
||||
elif key.startswith("date"):
|
||||
dt = "date"
|
||||
else:
|
||||
print(key,value,dt)
|
||||
print(key, value, dt)
|
||||
raise NotImplementedError
|
||||
if key.startswith("double"):
|
||||
dt = "double"
|
||||
@@ -68,7 +81,6 @@ def infer_schema(data):
|
||||
return schema
|
||||
|
||||
|
||||
|
||||
def _write(
|
||||
schema: str,
|
||||
data,
|
||||
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
http.workspace = true
|
||||
snafu.workspace = true
|
||||
strum.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -18,9 +18,30 @@ pub mod ext;
|
||||
pub mod mock;
|
||||
pub mod status_code;
|
||||
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
pub use snafu;
|
||||
|
||||
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
|
||||
// please define in `src/servers/src/http/header.rs`.
|
||||
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
|
||||
pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
|
||||
|
||||
/// Create a http header map from error code and message.
|
||||
/// using `GREPTIME_DB_HEADER_ERROR_CODE` and `GREPTIME_DB_HEADER_ERROR_MSG` as keys.
|
||||
pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
|
||||
let mut header = HeaderMap::new();
|
||||
|
||||
let msg = HeaderValue::from_str(msg).unwrap_or_else(|_| {
|
||||
HeaderValue::from_bytes(
|
||||
&msg.as_bytes()
|
||||
.iter()
|
||||
.flat_map(|b| std::ascii::escape_default(*b))
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
.expect("Already escaped string should be valid ascii")
|
||||
});
|
||||
|
||||
header.insert(GREPTIME_DB_HEADER_ERROR_CODE, code.into());
|
||||
header.insert(GREPTIME_DB_HEADER_ERROR_MSG, msg);
|
||||
header
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
jsonb.workspace = true
|
||||
nalgebra = "0.33"
|
||||
nalgebra.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
|
||||
@@ -32,6 +32,7 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
|
||||
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::vector::sum::VectorSumCreator;
|
||||
|
||||
/// A function creates `AggregateFunctionCreator`.
|
||||
/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it.
|
||||
@@ -91,6 +92,7 @@ impl AggregateFunctions {
|
||||
register_aggr_func!("argmin", 1, ArgminAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
||||
register_aggr_func!("vec_sum", 1, VectorSumCreator);
|
||||
|
||||
#[cfg(feature = "geo")]
|
||||
register_aggr_func!(
|
||||
|
||||
@@ -14,9 +14,14 @@
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
pub(crate) mod impl_conv;
|
||||
mod elem_sum;
|
||||
pub mod impl_conv;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
mod sub;
|
||||
pub(crate) mod sum;
|
||||
mod vector_div;
|
||||
mod vector_mul;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -38,5 +43,11 @@ impl VectorFunction {
|
||||
// scalar calculation
|
||||
registry.register(Arc::new(scalar_add::ScalarAddFunction));
|
||||
registry.register(Arc::new(scalar_mul::ScalarMulFunction));
|
||||
|
||||
// vector calculation
|
||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(sub::SubFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
}
|
||||
}
|
||||
|
||||
129
src/common/function/src/scalars/vector/elem_sum.rs
Normal file
129
src/common/function/src/scalars/vector/elem_sum.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
const NAME: &str = "vec_elem_sum";
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ElemSumFunction;
|
||||
|
||||
impl Function for ElemSumFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
result.push(Some(DVectorView::from_slice(&arg0, arg0.len()).sum()));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ElemSumFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
|
||||
#[test]
|
||||
fn test_elem_sum() {
|
||||
let func = ElemSumFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(6.0));
|
||||
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(15.0));
|
||||
assert_eq!(result.get_ref(2).as_f32().unwrap(), None);
|
||||
}
|
||||
}
|
||||
223
src/common/function/src/scalars/vector/sub.rs
Normal file
223
src/common/function/src/scalars/vector/sub.rs
Normal file
@@ -0,0 +1,223 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_sub";
|
||||
|
||||
/// Subtracts corresponding elements of two vectors, returns a vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_sub("[1.0, 1.0]", "[1.0, 2.0]")) as result;
|
||||
///
|
||||
/// +---------------------------------------------------------------+
|
||||
/// | vec_to_string(vec_sub(Utf8("[1.0, 1.0]"),Utf8("[1.0, 2.0]"))) |
|
||||
/// +---------------------------------------------------------------+
|
||||
/// | [0,-1] |
|
||||
/// +---------------------------------------------------------------+
|
||||
///
|
||||
/// -- Negative scalar to simulate subtraction
|
||||
/// SELECT vec_to_string(vec_sub('[-1.0, -1.0]', '[1.0, 2.0]'));
|
||||
///
|
||||
/// +-----------------------------------------------------------------+
|
||||
/// | vec_to_string(vec_sub(Utf8("[-1.0, -1.0]"),Utf8("[1.0, 2.0]"))) |
|
||||
/// +-----------------------------------------------------------------+
|
||||
/// | [-2,-3] |
|
||||
/// +-----------------------------------------------------------------+
|
||||
///
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SubFunction;
|
||||
|
||||
impl Function for SubFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
let (Some(arg0), Some(arg1)) = (arg0, arg1) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
|
||||
let vec_res = vec0 - vec1;
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for SubFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sub() {
|
||||
let func = SubFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
Some("[2.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[6.0,5.0,4.0]".to_string()),
|
||||
Some("[3.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.0, 1.0, 2.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[-2.0, 0.0, 2.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sub_error() {
|
||||
let func = SubFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
Some("[2.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[6.0,5.0,4.0]".to_string()),
|
||||
Some("[3.0,2.0,2.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0, input1]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 4, args 1: 3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
202
src/common/function/src/scalars/vector/sum.rs
Normal file
202
src/common/function/src/scalars/vector/sum.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VectorSum {
|
||||
sum: Option<OVector<f32, Dyn>>,
|
||||
has_null: bool,
|
||||
}
|
||||
|
||||
#[as_aggr_func_creator]
|
||||
#[derive(Debug, Default, AggrFuncTypeStore)]
|
||||
pub struct VectorSumCreator {}
|
||||
|
||||
impl AggregateFunctionCreator for VectorSumCreator {
|
||||
fn creator(&self) -> AccumulatorCreatorFunction {
|
||||
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
|
||||
ensure!(
|
||||
types.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let input_type = &types[0];
|
||||
match input_type {
|
||||
ConcreteDataType::String(_) | ConcreteDataType::Binary(_) => {
|
||||
Ok(Box::new(VectorSum::default()))
|
||||
}
|
||||
_ => {
|
||||
let err_msg = format!(
|
||||
"\"VEC_SUM\" aggregate function not support data type {:?}",
|
||||
input_type.logical_type_id(),
|
||||
);
|
||||
CreateAccumulatorSnafu { err_msg }.fail()?
|
||||
}
|
||||
}
|
||||
});
|
||||
creator
|
||||
}
|
||||
|
||||
fn output_type(&self) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn state_types(&self) -> common_query::error::Result<Vec<ConcreteDataType>> {
|
||||
Ok(vec![self.output_type()?])
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorSum {
|
||||
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||
self.sum
|
||||
.get_or_insert_with(|| OVector::zeros_generic(Dyn(len), Const::<1>))
|
||||
}
|
||||
|
||||
fn update(&mut self, values: &[VectorRef], is_update: bool) -> Result<(), Error> {
|
||||
if values.is_empty() || self.has_null {
|
||||
return Ok(());
|
||||
};
|
||||
let column = &values[0];
|
||||
let len = column.len();
|
||||
|
||||
match as_veclit_if_const(column)? {
|
||||
Some(column) => {
|
||||
let vec_column = DVectorView::from_slice(&column, column.len()).scale(len as f32);
|
||||
*self.inner(vec_column.len()) += vec_column;
|
||||
}
|
||||
None => {
|
||||
for i in 0..len {
|
||||
let Some(arg0) = as_veclit(column.get_ref(i))? else {
|
||||
if is_update {
|
||||
self.has_null = true;
|
||||
self.sum = None;
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
let vec_column = DVectorView::from_slice(&arg0, arg0.len());
|
||||
*self.inner(vec_column.len()) += vec_column;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for VectorSum {
|
||||
fn state(&self) -> common_query::error::Result<Vec<Value>> {
|
||||
self.evaluate().map(|v| vec![v])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(values, true)
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(states, false)
|
||||
}
|
||||
|
||||
fn evaluate(&self) -> common_query::error::Result<Value> {
|
||||
match &self.sum {
|
||||
None => Ok(Value::Null),
|
||||
Some(vector) => Ok(Value::from(veclit_to_binlit(vector.as_slice()))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{ConstantVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut vec_sum = VectorSum::default();
|
||||
vec_sum.update_batch(&[]).unwrap();
|
||||
assert!(vec_sum.sum.is_none());
|
||||
assert!(!vec_sum.has_null);
|
||||
assert_eq!(Value::Null, vec_sum.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Some(
|
||||
"[1.0,2.0,3.0]".to_string(),
|
||||
)]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[1.0, 2.0, 3.0])),
|
||||
vec_sum.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update one null value
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Option::<String>::None]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_sum.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[12.0, 15.0, 18.0])),
|
||||
vec_sum.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update null-value batch
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
None,
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_sum.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
|
||||
4,
|
||||
))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[4.0, 8.0, 12.0])),
|
||||
vec_sum.evaluate().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
218
src/common/function/src/scalars/vector/vector_div.rs
Normal file
218
src/common/function/src/scalars/vector/vector_div.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_div";
|
||||
|
||||
/// Divides corresponding elements of two vectors.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_div("[2, 4, 6]", "[2, 2, 2]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [1,2,3] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorDivFunction;
|
||||
|
||||
impl Function for VectorDivFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(arg0), Some(arg1)) = (arg0, arg1) {
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the vectors must match for division, have: {} vs {}",
|
||||
arg0.len(),
|
||||
arg1.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
let vec_res = vec0.component_div(&vec1);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorDivFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vector_mul() {
|
||||
let func = VectorDivFunction;
|
||||
|
||||
let vec0 = vec![1.0, 2.0, 3.0];
|
||||
let vec1 = vec![1.0, 1.0];
|
||||
let (len0, len1) = (vec0.len(), vec1.len());
|
||||
let input0 = Arc::new(StringVector::from(vec![Some(format!("{vec0:?}"))]));
|
||||
let input1 = Arc::new(StringVector::from(vec![Some(format!("{vec1:?}"))]));
|
||||
|
||||
let err = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!(
|
||||
"The length of the vectors must match for division, have: {} vs {}",
|
||||
len0, len1
|
||||
)
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[8.0,10.0,12.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[2.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
Some("[3.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[1.0, 2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[4.0, 5.0, 6.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![Some("[1.0,-2.0]".to_string())]));
|
||||
let input1 = Arc::new(StringVector::from(vec![Some("[0.0,0.0]".to_string())]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[f64::INFINITY as f32, f64::NEG_INFINITY as f32]).as_slice())
|
||||
);
|
||||
}
|
||||
}
|
||||
205
src/common/function/src/scalars/vector/vector_mul.rs
Normal file
205
src/common/function/src/scalars/vector/vector_mul.rs
Normal file
@@ -0,0 +1,205 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_mul";
|
||||
|
||||
/// Multiplies corresponding elements of two vectors.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_mul("[1, 2, 3]", "[1, 2, 3]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [1,4,9] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorMulFunction;
|
||||
|
||||
impl Function for VectorMulFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(arg0), Some(arg1)) = (arg0, arg1) {
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the vectors must match for multiplying, have: {} vs {}",
|
||||
arg0.len(),
|
||||
arg1.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
let vec_res = vec1.component_mul(&vec0);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorMulFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vector_mul() {
|
||||
let func = VectorMulFunction;
|
||||
|
||||
let vec0 = vec![1.0, 2.0, 3.0];
|
||||
let vec1 = vec![1.0, 1.0];
|
||||
let (len0, len1) = (vec0.len(), vec1.len());
|
||||
let input0 = Arc::new(StringVector::from(vec![Some(format!("{vec0:?}"))]));
|
||||
let input1 = Arc::new(StringVector::from(vec![Some(format!("{vec1:?}"))]));
|
||||
|
||||
let err = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!(
|
||||
"The length of the vectors must match for multiplying, have: {} vs {}",
|
||||
len0, len1
|
||||
)
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[8.0,10.0,12.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[2.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
Some("[3.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[1.0, 2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[16.0, 20.0, 24.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
}
|
||||
@@ -60,6 +60,7 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
column_schema: schema,
|
||||
is_key: column_def.semantic_type == SemanticType::Tag as i32,
|
||||
location: parse_location(ac.location)?,
|
||||
add_if_not_exists: ac.add_if_not_exists,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
@@ -220,6 +221,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
location: None,
|
||||
add_if_not_exists: true,
|
||||
}],
|
||||
})),
|
||||
};
|
||||
@@ -240,6 +242,7 @@ mod tests {
|
||||
add_column.column_schema.data_type
|
||||
);
|
||||
assert_eq!(None, add_column.location);
|
||||
assert!(add_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -265,6 +268,7 @@ mod tests {
|
||||
location_type: LocationType::First.into(),
|
||||
after_column_name: String::default(),
|
||||
}),
|
||||
add_if_not_exists: false,
|
||||
},
|
||||
AddColumn {
|
||||
column_def: Some(ColumnDef {
|
||||
@@ -280,6 +284,7 @@ mod tests {
|
||||
location_type: LocationType::After.into(),
|
||||
after_column_name: "ts".to_string(),
|
||||
}),
|
||||
add_if_not_exists: true,
|
||||
},
|
||||
],
|
||||
})),
|
||||
@@ -308,6 +313,7 @@ mod tests {
|
||||
}),
|
||||
add_column.location
|
||||
);
|
||||
assert!(add_column.add_if_not_exists);
|
||||
|
||||
let add_column = add_columns.pop().unwrap();
|
||||
assert!(!add_column.is_key);
|
||||
@@ -317,6 +323,7 @@ mod tests {
|
||||
add_column.column_schema.data_type
|
||||
);
|
||||
assert_eq!(Some(AddColumnLocation::First), add_column.location);
|
||||
assert!(!add_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -299,6 +299,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let memory_column = &add_columns.add_columns[1];
|
||||
assert_eq!(
|
||||
@@ -311,6 +312,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let time_column = &add_columns.add_columns[2];
|
||||
assert_eq!(
|
||||
@@ -323,6 +325,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let interval_column = &add_columns.add_columns[3];
|
||||
assert_eq!(
|
||||
@@ -335,6 +338,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let decimal_column = &add_columns.add_columns[4];
|
||||
assert_eq!(
|
||||
@@ -352,6 +356,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -192,6 +192,9 @@ pub fn build_create_table_expr(
|
||||
Ok(expr)
|
||||
}
|
||||
|
||||
/// Find columns that are not present in the schema and return them as `AddColumns`
|
||||
/// for adding columns automatically.
|
||||
/// It always sets `add_if_not_exists` to `true` for now.
|
||||
pub fn extract_new_columns(
|
||||
schema: &Schema,
|
||||
column_exprs: Vec<ColumnExpr>,
|
||||
@@ -213,6 +216,7 @@ pub fn extract_new_columns(
|
||||
AddColumn {
|
||||
column_def,
|
||||
location: None,
|
||||
add_if_not_exists: true,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -105,7 +105,7 @@ impl AlterLogicalTablesProcedure {
|
||||
.context(ConvertAlterTableRequestSnafu)?;
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind, true)
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
|
||||
@@ -28,13 +28,13 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status, StringKey,
|
||||
};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use futures::future;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::metadata::{RawTableInfo, TableId, TableInfo};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
@@ -51,10 +51,14 @@ use crate::{metrics, ClusterId};
|
||||
|
||||
/// The alter table procedure
|
||||
pub struct AlterTableProcedure {
|
||||
// The runtime context.
|
||||
/// The runtime context.
|
||||
context: DdlContext,
|
||||
// The serialized data.
|
||||
/// The serialized data.
|
||||
data: AlterTableData,
|
||||
/// Cached new table metadata in the prepare step.
|
||||
/// If we recover the procedure from json, then the table info value is not cached.
|
||||
/// But we already validated it in the prepare step.
|
||||
new_table_info: Option<TableInfo>,
|
||||
}
|
||||
|
||||
impl AlterTableProcedure {
|
||||
@@ -70,18 +74,31 @@ impl AlterTableProcedure {
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterTableData::new(task, table_id, cluster_id),
|
||||
new_table_info: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data: AlterTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
Ok(AlterTableProcedure { context, data })
|
||||
Ok(AlterTableProcedure {
|
||||
context,
|
||||
data,
|
||||
new_table_info: None,
|
||||
})
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
self.check_alter().await?;
|
||||
self.fill_table_info().await?;
|
||||
|
||||
// Validates the request and builds the new table info.
|
||||
// We need to build the new table info here because we should ensure the alteration
|
||||
// is valid in `UpdateMeta` state as we already altered the region.
|
||||
// Safety: `fill_table_info()` already set it.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
self.new_table_info = Some(self.build_new_table_info(&table_info_value.table_info)?);
|
||||
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
if matches!(alter_kind, Kind::RenameTable { .. }) {
|
||||
@@ -106,6 +123,14 @@ impl AlterTableProcedure {
|
||||
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
let alter_kind = self.make_region_alter_kind()?;
|
||||
|
||||
info!(
|
||||
"Submitting alter region requests for table {}, table_id: {}, alter_kind: {:?}",
|
||||
self.data.table_ref(),
|
||||
table_id,
|
||||
alter_kind,
|
||||
);
|
||||
|
||||
for datanode in leaders {
|
||||
let requester = self.context.node_manager.datanode(&datanode).await;
|
||||
@@ -113,7 +138,7 @@ impl AlterTableProcedure {
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
let request = self.make_alter_region_request(region_id)?;
|
||||
let request = self.make_alter_region_request(region_id, alter_kind.clone())?;
|
||||
debug!("Submitting {request:?} to {datanode}");
|
||||
|
||||
let datanode = datanode.clone();
|
||||
@@ -150,7 +175,15 @@ impl AlterTableProcedure {
|
||||
let table_ref = self.data.table_ref();
|
||||
// Safety: checked before.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
let new_info = self.build_new_table_info(&table_info_value.table_info)?;
|
||||
// Gets the table info from the cache or builds it.
|
||||
let new_info = match &self.new_table_info {
|
||||
Some(cached) => cached.clone(),
|
||||
None => self.build_new_table_info(&table_info_value.table_info)
|
||||
.inspect_err(|e| {
|
||||
// We already check the table info in the prepare step so this should not happen.
|
||||
error!(e; "Unable to build info for table {} in update metadata step, table_id: {}", table_ref, table_id);
|
||||
})?,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Starting update table: {} metadata, new table info {:?}",
|
||||
@@ -174,7 +207,7 @@ impl AlterTableProcedure {
|
||||
.await?;
|
||||
}
|
||||
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}");
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}");
|
||||
self.data.state = AlterTableState::InvalidateTableCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{
|
||||
@@ -27,13 +29,15 @@ use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{InvalidProtoMsgSnafu, Result};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Makes alter region request.
|
||||
pub(crate) fn make_alter_region_request(&self, region_id: RegionId) -> Result<RegionRequest> {
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
/// Makes alter region request from existing an alter kind.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_alter_region_request(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> Result<RegionRequest> {
|
||||
// Safety: checked
|
||||
let table_info = self.data.table_info().unwrap();
|
||||
let kind = create_proto_alter_kind(table_info, alter_kind)?;
|
||||
|
||||
Ok(RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
@@ -47,45 +51,66 @@ impl AlterTableProcedure {
|
||||
})),
|
||||
})
|
||||
}
|
||||
|
||||
/// Makes alter kind proto that all regions can reuse.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_region_alter_kind(&self) -> Result<Option<alter_request::Kind>> {
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
// Safety: checked
|
||||
let table_info = self.data.table_info().unwrap();
|
||||
let kind = create_proto_alter_kind(table_info, alter_kind)?;
|
||||
|
||||
Ok(kind)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates region proto alter kind from `table_info` and `alter_kind`.
|
||||
///
|
||||
/// Returns the kind and next column id if it adds new columns.
|
||||
/// It always adds column if not exists and drops column if exists.
|
||||
/// It skips the column if it already exists in the table.
|
||||
fn create_proto_alter_kind(
|
||||
table_info: &RawTableInfo,
|
||||
alter_kind: &Kind,
|
||||
) -> Result<Option<alter_request::Kind>> {
|
||||
match alter_kind {
|
||||
Kind::AddColumns(x) => {
|
||||
// Construct a set of existing columns in the table.
|
||||
let existing_columns: HashSet<_> = table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|col| &col.name)
|
||||
.collect();
|
||||
let mut next_column_id = table_info.meta.next_column_id;
|
||||
|
||||
let add_columns = x
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| {
|
||||
let column_def =
|
||||
add_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.context(InvalidProtoMsgSnafu {
|
||||
err_msg: "'column_def' is absent",
|
||||
})?;
|
||||
let mut add_columns = Vec::with_capacity(x.add_columns.len());
|
||||
for add_column in &x.add_columns {
|
||||
let column_def = add_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.context(InvalidProtoMsgSnafu {
|
||||
err_msg: "'column_def' is absent",
|
||||
})?;
|
||||
|
||||
let column_id = next_column_id;
|
||||
next_column_id += 1;
|
||||
// Skips existing columns.
|
||||
if existing_columns.contains(&column_def.name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let column_def = RegionColumnDef {
|
||||
column_def: Some(column_def.clone()),
|
||||
column_id,
|
||||
};
|
||||
let column_id = next_column_id;
|
||||
next_column_id += 1;
|
||||
let column_def = RegionColumnDef {
|
||||
column_def: Some(column_def.clone()),
|
||||
column_id,
|
||||
};
|
||||
|
||||
Ok(AddColumn {
|
||||
column_def: Some(column_def),
|
||||
location: add_column.location.clone(),
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
add_columns.push(AddColumn {
|
||||
column_def: Some(column_def),
|
||||
location: add_column.location.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Some(alter_request::Kind::AddColumns(AddColumns {
|
||||
add_columns,
|
||||
@@ -143,6 +168,7 @@ mod tests {
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
|
||||
async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
@@ -171,6 +197,7 @@ mod tests {
|
||||
.name("cpu")
|
||||
.data_type(ColumnDataType::Float64)
|
||||
.semantic_type(SemanticType::Field)
|
||||
.is_nullable(true)
|
||||
.build()
|
||||
.unwrap()
|
||||
.into(),
|
||||
@@ -225,15 +252,16 @@ mod tests {
|
||||
name: "my_tag3".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: b"hello".to_vec(),
|
||||
default_constraint: Vec::new(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: Some(AddColumnLocation {
|
||||
location_type: LocationType::After as i32,
|
||||
after_column_name: "my_tag2".to_string(),
|
||||
after_column_name: "host".to_string(),
|
||||
}),
|
||||
add_if_not_exists: false,
|
||||
}],
|
||||
})),
|
||||
},
|
||||
@@ -242,8 +270,11 @@ mod tests {
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
procedure.make_alter_region_request(region_id).unwrap().body
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
@@ -259,7 +290,7 @@ mod tests {
|
||||
name: "my_tag3".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: b"hello".to_vec(),
|
||||
default_constraint: Vec::new(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
@@ -268,7 +299,7 @@ mod tests {
|
||||
}),
|
||||
location: Some(AddColumnLocation {
|
||||
location_type: LocationType::After as i32,
|
||||
after_column_name: "my_tag2".to_string(),
|
||||
after_column_name: "host".to_string(),
|
||||
}),
|
||||
}]
|
||||
}
|
||||
@@ -299,8 +330,11 @@ mod tests {
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
procedure.make_alter_region_request(region_id).unwrap().body
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -23,7 +23,9 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Builds new_meta
|
||||
/// Builds new table info after alteration.
|
||||
/// It bumps the column id of the table by the number of the add column requests.
|
||||
/// So there may be holes in the column id sequence.
|
||||
pub(crate) fn build_new_table_info(&self, table_info: &RawTableInfo) -> Result<TableInfo> {
|
||||
let table_info =
|
||||
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
|
||||
@@ -34,7 +36,7 @@ impl AlterTableProcedure {
|
||||
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind, false)
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
@@ -46,6 +48,9 @@ impl AlterTableProcedure {
|
||||
new_info.ident.version = table_info.ident.version + 1;
|
||||
match request.alter_kind {
|
||||
AlterKind::AddColumns { columns } => {
|
||||
// Bumps the column id for the new columns.
|
||||
// It may bump more than the actual number of columns added if there are
|
||||
// existing columns, but it's fine.
|
||||
new_info.meta.next_column_id += columns.len() as u32;
|
||||
}
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
|
||||
@@ -30,6 +30,8 @@ pub struct TestAlterTableExpr {
|
||||
add_columns: Vec<ColumnDef>,
|
||||
#[builder(setter(into, strip_option))]
|
||||
new_table_name: Option<String>,
|
||||
#[builder(setter)]
|
||||
add_if_not_exists: bool,
|
||||
}
|
||||
|
||||
impl From<TestAlterTableExpr> for AlterTableExpr {
|
||||
@@ -53,6 +55,7 @@ impl From<TestAlterTableExpr> for AlterTableExpr {
|
||||
.map(|col| AddColumn {
|
||||
column_def: Some(col),
|
||||
location: None,
|
||||
add_if_not_exists: value.add_if_not_exists,
|
||||
})
|
||||
.collect(),
|
||||
})),
|
||||
|
||||
@@ -56,6 +56,7 @@ fn make_alter_logical_table_add_column_task(
|
||||
let alter_table = alter_table
|
||||
.table_name(table.to_string())
|
||||
.add_columns(add_columns)
|
||||
.add_if_not_exists(true)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ async fn test_on_submit_alter_request() {
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: "my_field_column".to_string(),
|
||||
name: "cpu".to_string(),
|
||||
}],
|
||||
})),
|
||||
},
|
||||
@@ -225,7 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: "my_field_column".to_string(),
|
||||
name: "cpu".to_string(),
|
||||
}],
|
||||
})),
|
||||
},
|
||||
@@ -330,6 +330,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
..Default::default()
|
||||
}),
|
||||
location: None,
|
||||
add_if_not_exists: false,
|
||||
}],
|
||||
})),
|
||||
},
|
||||
|
||||
@@ -28,13 +28,10 @@ pub type SchemaMetadataManagerRef = Arc<SchemaMetadataManager>;
|
||||
pub struct SchemaMetadataManager {
|
||||
table_id_schema_cache: TableSchemaCacheRef,
|
||||
schema_cache: SchemaCacheRef,
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
}
|
||||
|
||||
impl SchemaMetadataManager {
|
||||
/// Creates a new database meta
|
||||
#[cfg(not(any(test, feature = "testing")))]
|
||||
pub fn new(table_id_schema_cache: TableSchemaCacheRef, schema_cache: SchemaCacheRef) -> Self {
|
||||
Self {
|
||||
table_id_schema_cache,
|
||||
@@ -42,20 +39,6 @@ impl SchemaMetadataManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new database meta
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new(
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
table_id_schema_cache: TableSchemaCacheRef,
|
||||
schema_cache: SchemaCacheRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_id_schema_cache,
|
||||
schema_cache,
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets schema options by table id.
|
||||
pub async fn get_schema_options_by_table_id(
|
||||
&self,
|
||||
@@ -80,6 +63,7 @@ impl SchemaMetadataManager {
|
||||
schema_name: &str,
|
||||
catalog_name: &str,
|
||||
schema_value: Option<crate::key::schema_name::SchemaNameValue>,
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
) {
|
||||
use table::metadata::{RawTableInfo, TableType};
|
||||
let value = crate::key::table_info::TableInfoValue::new(RawTableInfo {
|
||||
@@ -91,19 +75,18 @@ impl SchemaMetadataManager {
|
||||
meta: Default::default(),
|
||||
table_type: TableType::Base,
|
||||
});
|
||||
let table_info_manager =
|
||||
crate::key::table_info::TableInfoManager::new(self.kv_backend.clone());
|
||||
let table_info_manager = crate::key::table_info::TableInfoManager::new(kv_backend.clone());
|
||||
let (txn, _) = table_info_manager
|
||||
.build_create_txn(table_id, &value)
|
||||
.unwrap();
|
||||
let resp = self.kv_backend.txn(txn).await.unwrap();
|
||||
let resp = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(resp.succeeded, "Failed to create table metadata");
|
||||
let key = crate::key::schema_name::SchemaNameKey {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
};
|
||||
|
||||
crate::key::schema_name::SchemaManager::new(self.kv_backend.clone())
|
||||
crate::key::schema_name::SchemaManager::new(kv_backend.clone())
|
||||
.create(key, schema_value, false)
|
||||
.await
|
||||
.expect("Failed to create schema metadata");
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::any::Any;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::error;
|
||||
use snafu::ResultExt;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{Client, NoTls};
|
||||
@@ -97,7 +98,11 @@ impl PgStore {
|
||||
let (client, conn) = tokio_postgres::connect(url, NoTls)
|
||||
.await
|
||||
.context(ConnectPostgresSnafu)?;
|
||||
tokio::spawn(async move { conn.await.context(ConnectPostgresSnafu) });
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
error!(e; "connection error");
|
||||
}
|
||||
});
|
||||
Self::with_pg_client(client).await
|
||||
}
|
||||
|
||||
|
||||
@@ -39,3 +39,7 @@ tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
|
||||
[target.'cfg(tokio_unstable)'.dependencies]
|
||||
tokio-metrics = { version = "0.3" }
|
||||
tokio-metrics-collector = { version = "0.2" }
|
||||
|
||||
@@ -224,7 +224,6 @@ impl DatanodeBuilder {
|
||||
cache_registry.get().context(MissingCacheSnafu)?;
|
||||
|
||||
let schema_metadata_manager = Arc::new(SchemaMetadataManager::new(
|
||||
kv_backend.clone(),
|
||||
table_id_schema_cache,
|
||||
schema_cache,
|
||||
));
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_telemetry::{info, warn};
|
||||
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder, OBJECT_CACHE_DIR};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
@@ -147,12 +147,10 @@ async fn build_cache_layer(
|
||||
};
|
||||
|
||||
// Enable object cache by default
|
||||
// Set the cache_path to be `${data_home}/object_cache/read/{name}` by default
|
||||
// Set the cache_path to be `${data_home}` by default
|
||||
// if it's not present
|
||||
if cache_path.is_none() {
|
||||
let object_cache_path = join_dir(data_home, OBJECT_CACHE_DIR);
|
||||
let read_cache_path = join_dir(&object_cache_path, "read");
|
||||
let read_cache_path = join_dir(&read_cache_path, &name.to_lowercase());
|
||||
let read_cache_path = data_home.to_string();
|
||||
tokio::fs::create_dir_all(Path::new(&read_cache_path))
|
||||
.await
|
||||
.context(CreateDirSnafu {
|
||||
|
||||
@@ -29,7 +29,7 @@ use crate::error::{self, DuplicateColumnSnafu, Error, ProjectArrowSchemaSnafu, R
|
||||
use crate::prelude::ConcreteDataType;
|
||||
pub use crate::schema::column_schema::{
|
||||
ColumnSchema, FulltextAnalyzer, FulltextOptions, Metadata, SkippingIndexOptions,
|
||||
COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
|
||||
SkippingIndexType, COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
|
||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||
|
||||
@@ -543,7 +543,7 @@ pub struct SkippingIndexOptions {
|
||||
pub granularity: u32,
|
||||
/// The type of the skip index.
|
||||
#[serde(default)]
|
||||
pub index_type: SkipIndexType,
|
||||
pub index_type: SkippingIndexType,
|
||||
}
|
||||
|
||||
impl fmt::Display for SkippingIndexOptions {
|
||||
@@ -556,15 +556,15 @@ impl fmt::Display for SkippingIndexOptions {
|
||||
|
||||
/// Skip index types.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||
pub enum SkipIndexType {
|
||||
pub enum SkippingIndexType {
|
||||
#[default]
|
||||
BloomFilter,
|
||||
}
|
||||
|
||||
impl fmt::Display for SkipIndexType {
|
||||
impl fmt::Display for SkippingIndexType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SkipIndexType::BloomFilter => write!(f, "BLOOM"),
|
||||
SkippingIndexType::BloomFilter => write!(f, "BLOOM"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -587,7 +587,7 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
// Parse index type with default value BloomFilter
|
||||
let index_type = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE) {
|
||||
Some(typ) => match typ.to_ascii_uppercase().as_str() {
|
||||
"BLOOM" => SkipIndexType::BloomFilter,
|
||||
"BLOOM" => SkippingIndexType::BloomFilter,
|
||||
_ => {
|
||||
return error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid index type: {typ}, expected: 'BLOOM'"),
|
||||
@@ -595,7 +595,7 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
.fail();
|
||||
}
|
||||
},
|
||||
None => SkipIndexType::default(),
|
||||
None => SkippingIndexType::default(),
|
||||
};
|
||||
|
||||
Ok(SkippingIndexOptions {
|
||||
|
||||
@@ -45,6 +45,7 @@ get-size2 = "0.1.2"
|
||||
greptime-proto.workspace = true
|
||||
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version
|
||||
# otherwise it is the same with upstream repo
|
||||
http.workspace = true
|
||||
hydroflow = { git = "https://github.com/GreptimeTeam/hydroflow.git", branch = "main" }
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -30,7 +30,7 @@ use common_telemetry::{debug, info, trace};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::value::Value;
|
||||
use greptime_proto::v1;
|
||||
use itertools::Itertools;
|
||||
use itertools::{EitherOrBoth, Itertools};
|
||||
use meta_client::MetaClientOptions;
|
||||
use query::QueryEngine;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -45,18 +45,15 @@ use tokio::sync::broadcast::error::TryRecvError;
|
||||
use tokio::sync::{broadcast, watch, Mutex, RwLock};
|
||||
|
||||
pub(crate) use crate::adapter::node_context::FlownodeContext;
|
||||
use crate::adapter::table_source::TableSource;
|
||||
use crate::adapter::util::column_schemas_to_proto;
|
||||
use crate::adapter::table_source::ManagedTableSource;
|
||||
use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
|
||||
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||
use crate::compute::ErrCollector;
|
||||
use crate::df_optimizer::sql_to_flow_plan;
|
||||
use crate::error::{
|
||||
EvalSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu, TableNotFoundSnafu,
|
||||
UnexpectedSnafu,
|
||||
};
|
||||
use crate::expr::{Batch, GlobalId};
|
||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||
use crate::repr::{self, DiffRow, Row, BATCH_SIZE};
|
||||
use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
|
||||
use crate::expr::Batch;
|
||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
|
||||
|
||||
mod flownode_impl;
|
||||
mod parse_expr;
|
||||
@@ -67,7 +64,7 @@ mod util;
|
||||
mod worker;
|
||||
|
||||
pub(crate) mod node_context;
|
||||
mod table_source;
|
||||
pub(crate) mod table_source;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::utils::StateReportHandler;
|
||||
@@ -127,7 +124,7 @@ pub struct FlowWorkerManager {
|
||||
/// The query engine that will be used to parse the query and convert it to a dataflow plan
|
||||
pub query_engine: Arc<dyn QueryEngine>,
|
||||
/// Getting table name and table schema from table info manager
|
||||
table_info_source: TableSource,
|
||||
table_info_source: ManagedTableSource,
|
||||
frontend_invoker: RwLock<Option<FrontendInvoker>>,
|
||||
/// contains mapping from table name to global id, and table schema
|
||||
node_context: RwLock<FlownodeContext>,
|
||||
@@ -156,11 +153,11 @@ impl FlowWorkerManager {
|
||||
query_engine: Arc<dyn QueryEngine>,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
) -> Self {
|
||||
let srv_map = TableSource::new(
|
||||
let srv_map = ManagedTableSource::new(
|
||||
table_meta.table_info_manager().clone(),
|
||||
table_meta.table_name_manager().clone(),
|
||||
);
|
||||
let node_context = FlownodeContext::default();
|
||||
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
|
||||
let tick_manager = FlowTickManager::new();
|
||||
let worker_handles = Vec::new();
|
||||
FlowWorkerManager {
|
||||
@@ -245,16 +242,26 @@ impl FlowWorkerManager {
|
||||
let (catalog, schema) = (table_name[0].clone(), table_name[1].clone());
|
||||
let ctx = Arc::new(QueryContext::with(&catalog, &schema));
|
||||
|
||||
let (is_ts_placeholder, proto_schema) =
|
||||
self.try_fetch_or_create_table(&table_name).await?;
|
||||
let (is_ts_placeholder, proto_schema) = self
|
||||
.try_fetch_existing_table(&table_name)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Table not found: {}", table_name.join(".")),
|
||||
})?;
|
||||
let schema_len = proto_schema.len();
|
||||
|
||||
let total_rows = reqs.iter().map(|r| r.len()).sum::<usize>();
|
||||
trace!(
|
||||
"Sending {} writeback requests to table {}, reqs total rows={}",
|
||||
reqs.len(),
|
||||
table_name.join("."),
|
||||
reqs.iter().map(|r| r.len()).sum::<usize>()
|
||||
);
|
||||
|
||||
METRIC_FLOW_ROWS
|
||||
.with_label_values(&["out"])
|
||||
.inc_by(total_rows as u64);
|
||||
|
||||
let now = self.tick_manager.tick();
|
||||
for req in reqs {
|
||||
match req {
|
||||
@@ -390,16 +397,14 @@ impl FlowWorkerManager {
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Fetch table info or create table from flow's schema if not exist
|
||||
async fn try_fetch_or_create_table(
|
||||
/// Fetch table schema and primary key from table info source, if table not exist return None
|
||||
async fn fetch_table_pk_schema(
|
||||
&self,
|
||||
table_name: &TableName,
|
||||
) -> Result<(bool, Vec<api::v1::ColumnSchema>), Error> {
|
||||
// TODO(discord9): instead of auto build table from request schema, actually build table
|
||||
// before `create flow` to be able to assign pk and ts etc.
|
||||
let (primary_keys, schema, is_ts_placeholder) = if let Some(table_id) = self
|
||||
) -> Result<Option<(Vec<String>, Option<usize>, Vec<ColumnSchema>)>, Error> {
|
||||
if let Some(table_id) = self
|
||||
.table_info_source
|
||||
.get_table_id_from_name(table_name)
|
||||
.get_opt_table_id_from_name(table_name)
|
||||
.await?
|
||||
{
|
||||
let table_info = self
|
||||
@@ -414,97 +419,64 @@ impl FlowWorkerManager {
|
||||
.map(|i| meta.schema.column_schemas[i].name.clone())
|
||||
.collect_vec();
|
||||
let schema = meta.schema.column_schemas;
|
||||
// check if the last column is the auto created timestamp column, hence the table is auto created from
|
||||
// flow's plan type
|
||||
let is_auto_create = {
|
||||
let correct_name = schema
|
||||
.last()
|
||||
.map(|s| s.name == AUTO_CREATED_PLACEHOLDER_TS_COL)
|
||||
.unwrap_or(false);
|
||||
let correct_time_index = meta.schema.timestamp_index == Some(schema.len() - 1);
|
||||
correct_name && correct_time_index
|
||||
};
|
||||
(primary_keys, schema, is_auto_create)
|
||||
let time_index = meta.schema.timestamp_index;
|
||||
Ok(Some((primary_keys, time_index, schema)))
|
||||
} else {
|
||||
// TODO(discord9): condiser remove buggy auto create by schema
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
let node_ctx = self.node_context.read().await;
|
||||
let gid: GlobalId = node_ctx
|
||||
.table_repr
|
||||
.get_by_name(table_name)
|
||||
.map(|x| x.1)
|
||||
.unwrap();
|
||||
let schema = node_ctx
|
||||
.schema
|
||||
.get(&gid)
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: format!("Table name = {:?}", table_name),
|
||||
})?
|
||||
.clone();
|
||||
// TODO(discord9): use default key from schema
|
||||
let primary_keys = schema
|
||||
.typ()
|
||||
.keys
|
||||
.first()
|
||||
.map(|v| {
|
||||
v.column_indices
|
||||
.iter()
|
||||
.map(|i| {
|
||||
schema
|
||||
.get_name(*i)
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("col_{i}"))
|
||||
})
|
||||
.collect_vec()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let update_at = ColumnSchema::new(
|
||||
UPDATE_AT_TS_COL,
|
||||
/// return (primary keys, schema and if the table have a placeholder timestamp column)
|
||||
/// schema of the table comes from flow's output plan
|
||||
///
|
||||
/// adjust to add `update_at` column and ts placeholder if needed
|
||||
async fn adjust_auto_created_table_schema(
|
||||
&self,
|
||||
schema: &RelationDesc,
|
||||
) -> Result<(Vec<String>, Vec<ColumnSchema>, bool), Error> {
|
||||
// TODO(discord9): condiser remove buggy auto create by schema
|
||||
|
||||
// TODO(discord9): use default key from schema
|
||||
let primary_keys = schema
|
||||
.typ()
|
||||
.keys
|
||||
.first()
|
||||
.map(|v| {
|
||||
v.column_indices
|
||||
.iter()
|
||||
.map(|i| {
|
||||
schema
|
||||
.get_name(*i)
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("col_{i}"))
|
||||
})
|
||||
.collect_vec()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let update_at = ColumnSchema::new(
|
||||
UPDATE_AT_TS_COL,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
);
|
||||
|
||||
let original_schema = relation_desc_to_column_schemas_with_fallback(schema);
|
||||
|
||||
let mut with_auto_added_col = original_schema.clone();
|
||||
with_auto_added_col.push(update_at);
|
||||
|
||||
// if no time index, add one as placeholder
|
||||
let no_time_index = schema.typ().time_index.is_none();
|
||||
if no_time_index {
|
||||
let ts_col = ColumnSchema::new(
|
||||
AUTO_CREATED_PLACEHOLDER_TS_COL,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
);
|
||||
)
|
||||
.with_time_index(true);
|
||||
with_auto_added_col.push(ts_col);
|
||||
}
|
||||
|
||||
let original_schema = schema
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(idx, typ)| {
|
||||
let name = schema
|
||||
.names
|
||||
.get(idx)
|
||||
.cloned()
|
||||
.flatten()
|
||||
.unwrap_or(format!("col_{}", idx));
|
||||
let ret = ColumnSchema::new(name, typ.scalar_type, typ.nullable);
|
||||
if schema.typ().time_index == Some(idx) {
|
||||
ret.with_time_index(true)
|
||||
} else {
|
||||
ret
|
||||
}
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let mut with_auto_added_col = original_schema.clone();
|
||||
with_auto_added_col.push(update_at);
|
||||
|
||||
// if no time index, add one as placeholder
|
||||
let no_time_index = schema.typ().time_index.is_none();
|
||||
if no_time_index {
|
||||
let ts_col = ColumnSchema::new(
|
||||
AUTO_CREATED_PLACEHOLDER_TS_COL,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
.with_time_index(true);
|
||||
with_auto_added_col.push(ts_col);
|
||||
}
|
||||
|
||||
(primary_keys, with_auto_added_col, no_time_index)
|
||||
};
|
||||
let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
|
||||
Ok((is_ts_placeholder, proto_schema))
|
||||
Ok((primary_keys, with_auto_added_col, no_time_index))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -752,43 +724,6 @@ impl FlowWorkerManager {
|
||||
query_ctx,
|
||||
} = args;
|
||||
|
||||
let already_exist = {
|
||||
let mut flag = false;
|
||||
|
||||
// check if the task already exists
|
||||
for handle in self.worker_handles.iter() {
|
||||
if handle.lock().await.contains_flow(flow_id).await? {
|
||||
flag = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
flag
|
||||
};
|
||||
match (create_if_not_exists, or_replace, already_exist) {
|
||||
// do replace
|
||||
(_, true, true) => {
|
||||
info!("Replacing flow with id={}", flow_id);
|
||||
self.remove_flow(flow_id).await?;
|
||||
}
|
||||
(false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
|
||||
// do nothing if exists
|
||||
(true, false, true) => {
|
||||
info!("Flow with id={} already exists, do nothing", flow_id);
|
||||
return Ok(None);
|
||||
}
|
||||
// create if not exists
|
||||
(_, _, false) => (),
|
||||
}
|
||||
|
||||
if create_if_not_exists {
|
||||
// check if the task already exists
|
||||
for handle in self.worker_handles.iter() {
|
||||
if handle.lock().await.contains_flow(flow_id).await? {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut node_ctx = self.node_context.write().await;
|
||||
// assign global id to source and sink table
|
||||
for source in &source_table_ids {
|
||||
@@ -807,7 +742,67 @@ impl FlowWorkerManager {
|
||||
let flow_plan = sql_to_flow_plan(&mut node_ctx, &self.query_engine, &sql).await?;
|
||||
|
||||
debug!("Flow {:?}'s Plan is {:?}", flow_id, flow_plan);
|
||||
node_ctx.assign_table_schema(&sink_table_name, flow_plan.schema.clone())?;
|
||||
|
||||
// check schema against actual table schema if exists
|
||||
// if not exist create sink table immediately
|
||||
if let Some((_, _, real_schema)) = self.fetch_table_pk_schema(&sink_table_name).await? {
|
||||
let auto_schema = relation_desc_to_column_schemas_with_fallback(&flow_plan.schema);
|
||||
|
||||
// for column schema, only `data_type` need to be check for equality
|
||||
// since one can omit flow's column name when write flow query
|
||||
// print a user friendly error message about mismatch and how to correct them
|
||||
for (idx, zipped) in auto_schema
|
||||
.iter()
|
||||
.zip_longest(real_schema.iter())
|
||||
.enumerate()
|
||||
{
|
||||
match zipped {
|
||||
EitherOrBoth::Both(auto, real) => {
|
||||
if auto.data_type != real.data_type {
|
||||
InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"Column {}(name is '{}', flow inferred name is '{}')'s data type mismatch, expect {:?} got {:?}",
|
||||
idx,
|
||||
real.name,
|
||||
auto.name,
|
||||
real.data_type,
|
||||
auto.data_type
|
||||
),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
}
|
||||
EitherOrBoth::Right(real) if real.data_type.is_timestamp() => {
|
||||
// if table is auto created, the last one or two column should be timestamp(update at and ts placeholder)
|
||||
continue;
|
||||
}
|
||||
_ => InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"schema length mismatched, expected {} found {}",
|
||||
real_schema.len(),
|
||||
auto_schema.len()
|
||||
),
|
||||
}
|
||||
.fail()?,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// assign inferred schema to sink table
|
||||
// create sink table
|
||||
let did_create = self
|
||||
.create_table_from_relation(
|
||||
&format!("flow-id={flow_id}"),
|
||||
&sink_table_name,
|
||||
&flow_plan.schema,
|
||||
)
|
||||
.await?;
|
||||
if !did_create {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Failed to create table {:?}", sink_table_name),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
}
|
||||
|
||||
let _ = comment;
|
||||
let _ = flow_options;
|
||||
@@ -842,9 +837,11 @@ impl FlowWorkerManager {
|
||||
source_ids,
|
||||
src_recvs: source_receivers,
|
||||
expire_after,
|
||||
or_replace,
|
||||
create_if_not_exists,
|
||||
err_collector,
|
||||
};
|
||||
|
||||
handle.create_flow(create_request).await?;
|
||||
info!("Successfully create flow with id={}", flow_id);
|
||||
Ok(Some(flow_id))
|
||||
|
||||
@@ -25,20 +25,24 @@ use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
|
||||
use common_meta::node_manager::Flownode;
|
||||
use common_telemetry::{debug, trace};
|
||||
use itertools::Itertools;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{IntoError, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::util::from_proto_to_data_type;
|
||||
use crate::adapter::{CreateFlowArgs, FlowWorkerManager};
|
||||
use crate::error::InternalSnafu;
|
||||
use crate::error::{CreateFlowSnafu, InsertIntoFlowSnafu, InternalSnafu};
|
||||
use crate::metrics::METRIC_FLOW_TASK_COUNT;
|
||||
use crate::repr::{self, DiffRow};
|
||||
|
||||
fn to_meta_err(err: crate::error::Error) -> common_meta::error::Error {
|
||||
// TODO(discord9): refactor this
|
||||
Err::<(), _>(BoxedError::new(err))
|
||||
.with_context(|_| ExternalSnafu)
|
||||
.unwrap_err()
|
||||
/// return a function to convert `crate::error::Error` to `common_meta::error::Error`
|
||||
fn to_meta_err(
|
||||
location: snafu::Location,
|
||||
) -> impl FnOnce(crate::error::Error) -> common_meta::error::Error {
|
||||
move |err: crate::error::Error| -> common_meta::error::Error {
|
||||
common_meta::error::Error::External {
|
||||
location,
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -75,11 +79,16 @@ impl Flownode for FlowWorkerManager {
|
||||
or_replace,
|
||||
expire_after,
|
||||
comment: Some(comment),
|
||||
sql,
|
||||
sql: sql.clone(),
|
||||
flow_options,
|
||||
query_ctx,
|
||||
};
|
||||
let ret = self.create_flow(args).await.map_err(to_meta_err)?;
|
||||
let ret = self
|
||||
.create_flow(args)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| CreateFlowSnafu { sql: sql.clone() })
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.inc();
|
||||
Ok(FlowResponse {
|
||||
affected_flows: ret
|
||||
@@ -94,7 +103,7 @@ impl Flownode for FlowWorkerManager {
|
||||
})) => {
|
||||
self.remove_flow(flow_id.id as u64)
|
||||
.await
|
||||
.map_err(to_meta_err)?;
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.dec();
|
||||
Ok(Default::default())
|
||||
}
|
||||
@@ -112,9 +121,15 @@ impl Flownode for FlowWorkerManager {
|
||||
.await
|
||||
.flush_all_sender()
|
||||
.await
|
||||
.map_err(to_meta_err)?;
|
||||
let rows_send = self.run_available(true).await.map_err(to_meta_err)?;
|
||||
let row = self.send_writeback_requests().await.map_err(to_meta_err)?;
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let rows_send = self
|
||||
.run_available(true)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let row = self
|
||||
.send_writeback_requests()
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
|
||||
debug!(
|
||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sended and {} output rows flushed",
|
||||
@@ -138,7 +153,7 @@ impl Flownode for FlowWorkerManager {
|
||||
}
|
||||
|
||||
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
|
||||
// using try_read makesure two things:
|
||||
// using try_read to ensure two things:
|
||||
// 1. flush wouldn't happen until inserts before it is inserted
|
||||
// 2. inserts happening concurrently with flush wouldn't be block by flush
|
||||
let _flush_lock = self.flush_lock.try_read();
|
||||
@@ -154,17 +169,23 @@ impl Flownode for FlowWorkerManager {
|
||||
// TODO(discord9): reconsider time assignment mechanism
|
||||
let now = self.tick_manager.tick();
|
||||
|
||||
let fetch_order = {
|
||||
let (table_types, fetch_order) = {
|
||||
let ctx = self.node_context.read().await;
|
||||
let table_col_names = ctx
|
||||
.table_repr
|
||||
.get_by_table_id(&table_id)
|
||||
.map(|r| r.1)
|
||||
.and_then(|id| ctx.schema.get(&id))
|
||||
.map(|desc| &desc.names)
|
||||
.context(UnexpectedSnafu {
|
||||
err_msg: format!("Table not found: {}", table_id),
|
||||
})?;
|
||||
|
||||
// TODO(discord9): also check schema version so that altered table can be reported
|
||||
let table_schema = ctx
|
||||
.table_source
|
||||
.table_from_id(&table_id)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let table_types = table_schema
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|t| t.scalar_type)
|
||||
.collect_vec();
|
||||
let table_col_names = table_schema.names;
|
||||
let table_col_names = table_col_names
|
||||
.iter().enumerate()
|
||||
.map(|(idx,name)| match name {
|
||||
@@ -183,16 +204,19 @@ impl Flownode for FlowWorkerManager {
|
||||
);
|
||||
let fetch_order: Vec<usize> = table_col_names
|
||||
.iter()
|
||||
.map(|names| {
|
||||
name_to_col.get(names).copied().context(UnexpectedSnafu {
|
||||
err_msg: format!("Column not found: {}", names),
|
||||
})
|
||||
.map(|col_name| {
|
||||
name_to_col
|
||||
.get(col_name)
|
||||
.copied()
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
err_msg: format!("Column not found: {}", col_name),
|
||||
})
|
||||
})
|
||||
.try_collect()?;
|
||||
if !fetch_order.iter().enumerate().all(|(i, &v)| i == v) {
|
||||
trace!("Reordering columns: {:?}", fetch_order)
|
||||
}
|
||||
fetch_order
|
||||
(table_types, fetch_order)
|
||||
};
|
||||
|
||||
let rows: Vec<DiffRow> = rows_proto
|
||||
@@ -207,17 +231,29 @@ impl Flownode for FlowWorkerManager {
|
||||
})
|
||||
.map(|r| (r, now, 1))
|
||||
.collect_vec();
|
||||
let batch_datatypes = insert_schema
|
||||
.iter()
|
||||
.map(from_proto_to_data_type)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.map_err(to_meta_err)?;
|
||||
self.handle_write_request(region_id.into(), rows, &batch_datatypes)
|
||||
if let Err(err) = self
|
||||
.handle_write_request(region_id.into(), rows, &table_types)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
common_telemetry::error!(err;"Failed to handle write request");
|
||||
to_meta_err(err)
|
||||
})?;
|
||||
{
|
||||
let err = BoxedError::new(err);
|
||||
let flow_ids = self
|
||||
.node_context
|
||||
.read()
|
||||
.await
|
||||
.get_flow_ids(table_id)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.cloned()
|
||||
.collect_vec();
|
||||
let err = InsertIntoFlowSnafu {
|
||||
region_id,
|
||||
flow_ids,
|
||||
}
|
||||
.into_error(err);
|
||||
common_telemetry::error!(err; "Failed to handle write request");
|
||||
let err = to_meta_err(snafu::location!())(err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
@@ -25,7 +25,8 @@ use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::{broadcast, mpsc, RwLock};
|
||||
|
||||
use crate::adapter::{FlowId, TableName, TableSource};
|
||||
use crate::adapter::table_source::FlowTableSource;
|
||||
use crate::adapter::{FlowId, ManagedTableSource, TableName};
|
||||
use crate::error::{Error, EvalSnafu, TableNotFoundSnafu};
|
||||
use crate::expr::error::InternalSnafu;
|
||||
use crate::expr::{Batch, GlobalId};
|
||||
@@ -33,7 +34,7 @@ use crate::metrics::METRIC_FLOW_INPUT_BUF_SIZE;
|
||||
use crate::repr::{DiffRow, RelationDesc, BATCH_SIZE, BROADCAST_CAP, SEND_BUF_CAP};
|
||||
|
||||
/// A context that holds the information of the dataflow
|
||||
#[derive(Default, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct FlownodeContext {
|
||||
/// mapping from source table to tasks, useful for schedule which task to run when a source table is updated
|
||||
pub source_to_tasks: BTreeMap<TableId, BTreeSet<FlowId>>,
|
||||
@@ -50,13 +51,32 @@ pub struct FlownodeContext {
|
||||
/// note that the sink receiver should only have one, and we are using broadcast as mpsc channel here
|
||||
pub sink_receiver:
|
||||
BTreeMap<TableName, (mpsc::UnboundedSender<Batch>, mpsc::UnboundedReceiver<Batch>)>,
|
||||
/// the schema of the table, query from metasrv or inferred from TypedPlan
|
||||
pub schema: HashMap<GlobalId, RelationDesc>,
|
||||
/// can query the schema of the table source, from metasrv with local cache
|
||||
pub table_source: Box<dyn FlowTableSource>,
|
||||
/// All the tables that have been registered in the worker
|
||||
pub table_repr: IdToNameMap,
|
||||
pub query_context: Option<Arc<QueryContext>>,
|
||||
}
|
||||
|
||||
impl FlownodeContext {
|
||||
pub fn new(table_source: Box<dyn FlowTableSource>) -> Self {
|
||||
Self {
|
||||
source_to_tasks: Default::default(),
|
||||
flow_to_sink: Default::default(),
|
||||
sink_to_flow: Default::default(),
|
||||
source_sender: Default::default(),
|
||||
sink_receiver: Default::default(),
|
||||
table_source,
|
||||
table_repr: Default::default(),
|
||||
query_context: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_flow_ids(&self, table_id: TableId) -> Option<&BTreeSet<FlowId>> {
|
||||
self.source_to_tasks.get(&table_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// a simple broadcast sender with backpressure, bounded capacity and blocking on send when send buf is full
|
||||
/// note that it wouldn't evict old data, so it's possible to block forever if the receiver is slow
|
||||
///
|
||||
@@ -284,7 +304,7 @@ impl FlownodeContext {
|
||||
/// Retrieves a GlobalId and table schema representing a table previously registered by calling the [register_table] function.
|
||||
///
|
||||
/// Returns an error if no table has been registered with the provided names
|
||||
pub fn table(&self, name: &TableName) -> Result<(GlobalId, RelationDesc), Error> {
|
||||
pub async fn table(&self, name: &TableName) -> Result<(GlobalId, RelationDesc), Error> {
|
||||
let id = self
|
||||
.table_repr
|
||||
.get_by_name(name)
|
||||
@@ -292,13 +312,7 @@ impl FlownodeContext {
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: name.join("."),
|
||||
})?;
|
||||
let schema = self
|
||||
.schema
|
||||
.get(&id)
|
||||
.cloned()
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: name.join("."),
|
||||
})?;
|
||||
let schema = self.table_source.table(name).await?;
|
||||
Ok((id, schema))
|
||||
}
|
||||
|
||||
@@ -312,7 +326,7 @@ impl FlownodeContext {
|
||||
/// merely creating a mapping from table id to global id
|
||||
pub async fn assign_global_id_to_table(
|
||||
&mut self,
|
||||
srv_map: &TableSource,
|
||||
srv_map: &ManagedTableSource,
|
||||
mut table_name: Option<TableName>,
|
||||
table_id: Option<TableId>,
|
||||
) -> Result<GlobalId, Error> {
|
||||
@@ -331,36 +345,18 @@ impl FlownodeContext {
|
||||
} else {
|
||||
let global_id = self.new_global_id();
|
||||
|
||||
// table id is Some meaning db must have created the table
|
||||
if let Some(table_id) = table_id {
|
||||
let (known_table_name, schema) = srv_map.get_table_name_schema(&table_id).await?;
|
||||
let known_table_name = srv_map.get_table_name(&table_id).await?;
|
||||
table_name = table_name.or(Some(known_table_name));
|
||||
self.schema.insert(global_id, schema);
|
||||
} // if we don't have table id, it means database havn't assign one yet or we don't need it
|
||||
} // if we don't have table id, it means database haven't assign one yet or we don't need it
|
||||
|
||||
// still update the mapping with new global id
|
||||
self.table_repr.insert(table_name, table_id, global_id);
|
||||
Ok(global_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Assign a schema to a table
|
||||
///
|
||||
pub fn assign_table_schema(
|
||||
&mut self,
|
||||
table_name: &TableName,
|
||||
schema: RelationDesc,
|
||||
) -> Result<(), Error> {
|
||||
let gid = self
|
||||
.table_repr
|
||||
.get_by_name(table_name)
|
||||
.map(|(_, gid)| gid)
|
||||
.context(TableNotFoundSnafu {
|
||||
name: format!("Table not found: {:?} in flownode cache", table_name),
|
||||
})?;
|
||||
|
||||
self.schema.insert(gid, schema);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a new global id
|
||||
pub fn new_global_id(&self) -> GlobalId {
|
||||
GlobalId::User(self.table_repr.global_id_to_name_id.len() as u64)
|
||||
|
||||
@@ -20,22 +20,63 @@ use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::adapter::util::table_info_value_to_relation_desc;
|
||||
use crate::adapter::TableName;
|
||||
use crate::error::{
|
||||
Error, ExternalSnafu, TableNotFoundMetaSnafu, TableNotFoundSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::repr::{self, ColumnType, RelationDesc, RelationType};
|
||||
use crate::repr::RelationDesc;
|
||||
|
||||
/// mapping of table name <-> table id should be query from tableinfo manager
|
||||
pub struct TableSource {
|
||||
/// Table source but for flow, provide table schema by table name/id
|
||||
#[async_trait::async_trait]
|
||||
pub trait FlowTableSource: Send + Sync + std::fmt::Debug {
|
||||
async fn table_name_from_id(&self, table_id: &TableId) -> Result<TableName, Error>;
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error>;
|
||||
|
||||
/// Get the table schema by table name
|
||||
async fn table(&self, name: &TableName) -> Result<RelationDesc, Error> {
|
||||
let id = self.table_id_from_name(name).await?;
|
||||
self.table_from_id(&id).await
|
||||
}
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<RelationDesc, Error>;
|
||||
}
|
||||
|
||||
/// managed table source information, query from table info manager and table name manager
|
||||
#[derive(Clone)]
|
||||
pub struct ManagedTableSource {
|
||||
/// for query `TableId -> TableName` mapping
|
||||
table_info_manager: TableInfoManager,
|
||||
table_name_manager: TableNameManager,
|
||||
}
|
||||
|
||||
impl TableSource {
|
||||
#[async_trait::async_trait]
|
||||
impl FlowTableSource for ManagedTableSource {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<RelationDesc, Error> {
|
||||
let table_info_value = self
|
||||
.get_table_info_value(table_id)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: format!("TableId = {:?}, Can't found table info", table_id),
|
||||
})?;
|
||||
let desc = table_info_value_to_relation_desc(table_info_value)?;
|
||||
|
||||
Ok(desc)
|
||||
}
|
||||
async fn table_name_from_id(&self, table_id: &TableId) -> Result<TableName, Error> {
|
||||
self.get_table_name(table_id).await
|
||||
}
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error> {
|
||||
self.get_opt_table_id_from_name(name)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: name.join("."),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagedTableSource {
|
||||
pub fn new(table_info_manager: TableInfoManager, table_name_manager: TableNameManager) -> Self {
|
||||
TableSource {
|
||||
ManagedTableSource {
|
||||
table_info_manager,
|
||||
table_name_manager,
|
||||
}
|
||||
@@ -61,8 +102,11 @@ impl TableSource {
|
||||
.map(|id| id.table_id())
|
||||
}
|
||||
|
||||
/// If the table havn't been created in database, the tableId returned would be null
|
||||
pub async fn get_table_id_from_name(&self, name: &TableName) -> Result<Option<TableId>, Error> {
|
||||
/// If the table haven't been created in database, the tableId returned would be null
|
||||
pub async fn get_opt_table_id_from_name(
|
||||
&self,
|
||||
name: &TableName,
|
||||
) -> Result<Option<TableId>, Error> {
|
||||
let ret = self
|
||||
.table_name_manager
|
||||
.get(TableNameKey::new(&name[0], &name[1], &name[2]))
|
||||
@@ -121,38 +165,121 @@ impl TableSource {
|
||||
table_name.table_name,
|
||||
];
|
||||
|
||||
let raw_schema = table_info_value.table_info.meta.schema;
|
||||
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
|
||||
.column_schemas
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|col| {
|
||||
(
|
||||
ColumnType {
|
||||
nullable: col.is_nullable(),
|
||||
scalar_type: col.data_type,
|
||||
},
|
||||
Some(col.name),
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
let key = table_info_value.table_info.meta.primary_key_indices;
|
||||
let keys = vec![repr::Key::from(key)];
|
||||
|
||||
let time_index = raw_schema.timestamp_index;
|
||||
Ok((
|
||||
table_name,
|
||||
RelationDesc {
|
||||
typ: RelationType {
|
||||
column_types,
|
||||
keys,
|
||||
time_index,
|
||||
// by default table schema's column are all non-auto
|
||||
auto_columns: vec![],
|
||||
},
|
||||
names: col_names,
|
||||
},
|
||||
))
|
||||
let desc = table_info_value_to_relation_desc(table_info_value)?;
|
||||
Ok((table_name, desc))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ManagedTableSource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("KvBackendTableSource").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::data_type::ConcreteDataType as CDT;
|
||||
|
||||
use super::*;
|
||||
use crate::repr::{ColumnType, RelationType};
|
||||
|
||||
pub struct FlowDummyTableSource {
|
||||
pub id_names_to_desc: Vec<(TableId, TableName, RelationDesc)>,
|
||||
id_to_idx: HashMap<TableId, usize>,
|
||||
name_to_idx: HashMap<TableName, usize>,
|
||||
}
|
||||
|
||||
impl Default for FlowDummyTableSource {
|
||||
fn default() -> Self {
|
||||
let id_names_to_desc = vec![
|
||||
(
|
||||
1024,
|
||||
[
|
||||
"greptime".to_string(),
|
||||
"public".to_string(),
|
||||
"numbers".to_string(),
|
||||
],
|
||||
RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)])
|
||||
.into_named(vec![Some("number".to_string())]),
|
||||
),
|
||||
(
|
||||
1025,
|
||||
[
|
||||
"greptime".to_string(),
|
||||
"public".to_string(),
|
||||
"numbers_with_ts".to_string(),
|
||||
],
|
||||
RelationType::new(vec![
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), false),
|
||||
])
|
||||
.into_named(vec![Some("number".to_string()), Some("ts".to_string())]),
|
||||
),
|
||||
];
|
||||
let id_to_idx = id_names_to_desc
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, (id, _name, _desc))| (*id, idx))
|
||||
.collect();
|
||||
let name_to_idx = id_names_to_desc
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, (_id, name, _desc))| (name.clone(), idx))
|
||||
.collect();
|
||||
Self {
|
||||
id_names_to_desc,
|
||||
id_to_idx,
|
||||
name_to_idx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl FlowTableSource for FlowDummyTableSource {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<RelationDesc, Error> {
|
||||
let idx = self.id_to_idx.get(table_id).context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
let desc = self
|
||||
.id_names_to_desc
|
||||
.get(*idx)
|
||||
.map(|x| x.2.clone())
|
||||
.context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
Ok(desc)
|
||||
}
|
||||
|
||||
async fn table_name_from_id(&self, table_id: &TableId) -> Result<TableName, Error> {
|
||||
let idx = self.id_to_idx.get(table_id).context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
self.id_names_to_desc
|
||||
.get(*idx)
|
||||
.map(|x| x.1.clone())
|
||||
.context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})
|
||||
}
|
||||
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error> {
|
||||
for (id, table_name, _desc) in &self.id_names_to_desc {
|
||||
if name == table_name {
|
||||
return Ok(*id);
|
||||
}
|
||||
}
|
||||
TableNotFoundSnafu {
|
||||
name: format!("Table name = {:?}, couldn't found table id", name),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for FlowDummyTableSource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DummyTableSource").finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,16 +12,153 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column_def::options_from_column_schema;
|
||||
use api::v1::{ColumnDataType, ColumnDataTypeExtension, SemanticType};
|
||||
use api::v1::{ColumnDataType, ColumnDataTypeExtension, CreateTableExpr, SemanticType};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use operator::expr_factory::CreateExprFactory;
|
||||
use session::context::QueryContextBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{Error, ExternalSnafu};
|
||||
use crate::adapter::{TableName, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||
use crate::FlowWorkerManager;
|
||||
|
||||
impl FlowWorkerManager {
|
||||
/// Create table from given schema(will adjust to add auto column if needed), return true if table is created
|
||||
pub(crate) async fn create_table_from_relation(
|
||||
&self,
|
||||
flow_name: &str,
|
||||
table_name: &TableName,
|
||||
relation_desc: &RelationDesc,
|
||||
) -> Result<bool, Error> {
|
||||
if self.fetch_table_pk_schema(table_name).await?.is_some() {
|
||||
return Ok(false);
|
||||
}
|
||||
let (pks, tys, _) = self.adjust_auto_created_table_schema(relation_desc).await?;
|
||||
|
||||
//create sink table using pks, column types and is_ts_auto
|
||||
|
||||
let proto_schema = column_schemas_to_proto(tys.clone(), &pks)?;
|
||||
|
||||
// create sink table
|
||||
let create_expr = CreateExprFactory {}
|
||||
.create_table_expr_by_column_schemas(
|
||||
&TableReference {
|
||||
catalog: &table_name[0],
|
||||
schema: &table_name[1],
|
||||
table: &table_name[2],
|
||||
},
|
||||
&proto_schema,
|
||||
"mito",
|
||||
Some(&format!("Sink table for flow {}", flow_name)),
|
||||
)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
self.submit_create_sink_table_ddl(create_expr).await?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Try fetch table with adjusted schema(added auto column if needed)
|
||||
pub(crate) async fn try_fetch_existing_table(
|
||||
&self,
|
||||
table_name: &TableName,
|
||||
) -> Result<Option<(bool, Vec<api::v1::ColumnSchema>)>, Error> {
|
||||
if let Some((primary_keys, time_index, schema)) =
|
||||
self.fetch_table_pk_schema(table_name).await?
|
||||
{
|
||||
// check if the last column is the auto created timestamp column, hence the table is auto created from
|
||||
// flow's plan type
|
||||
let is_auto_create = {
|
||||
let correct_name = schema
|
||||
.last()
|
||||
.map(|s| s.name == AUTO_CREATED_PLACEHOLDER_TS_COL)
|
||||
.unwrap_or(false);
|
||||
let correct_time_index = time_index == Some(schema.len() - 1);
|
||||
correct_name && correct_time_index
|
||||
};
|
||||
let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
|
||||
Ok(Some((is_auto_create, proto_schema)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// submit a create table ddl
|
||||
pub(crate) async fn submit_create_sink_table_ddl(
|
||||
&self,
|
||||
mut create_table: CreateTableExpr,
|
||||
) -> Result<(), Error> {
|
||||
let stmt_exec = {
|
||||
self.frontend_invoker
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|f| f.statement_executor())
|
||||
}
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "Failed to get statement executor",
|
||||
})?;
|
||||
let ctx = Arc::new(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog(create_table.catalog_name.clone())
|
||||
.current_schema(create_table.schema_name.clone())
|
||||
.build(),
|
||||
);
|
||||
stmt_exec
|
||||
.create_table_inner(&mut create_table, None, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_info_value_to_relation_desc(
|
||||
table_info_value: TableInfoValue,
|
||||
) -> Result<RelationDesc, Error> {
|
||||
let raw_schema = table_info_value.table_info.meta.schema;
|
||||
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
|
||||
.column_schemas
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|col| {
|
||||
(
|
||||
ColumnType {
|
||||
nullable: col.is_nullable(),
|
||||
scalar_type: col.data_type,
|
||||
},
|
||||
Some(col.name),
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
let key = table_info_value.table_info.meta.primary_key_indices;
|
||||
let keys = vec![crate::repr::Key::from(key)];
|
||||
|
||||
let time_index = raw_schema.timestamp_index;
|
||||
|
||||
Ok(RelationDesc {
|
||||
typ: RelationType {
|
||||
column_types,
|
||||
keys,
|
||||
time_index,
|
||||
// by default table schema's column are all non-auto
|
||||
auto_columns: vec![],
|
||||
},
|
||||
names: col_names,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_proto_to_data_type(
|
||||
column_schema: &api::v1::ColumnSchema,
|
||||
@@ -75,3 +212,29 @@ pub fn column_schemas_to_proto(
|
||||
.collect();
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Convert `RelationDesc` to `ColumnSchema` list,
|
||||
/// if the column name is not present, use `col_{idx}` as the column name
|
||||
pub fn relation_desc_to_column_schemas_with_fallback(schema: &RelationDesc) -> Vec<ColumnSchema> {
|
||||
schema
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(idx, typ)| {
|
||||
let name = schema
|
||||
.names
|
||||
.get(idx)
|
||||
.cloned()
|
||||
.flatten()
|
||||
.unwrap_or(format!("col_{}", idx));
|
||||
let ret = ColumnSchema::new(name, typ.scalar_type, typ.nullable);
|
||||
if schema.typ().time_index == Some(idx) {
|
||||
ret.with_time_index(true)
|
||||
} else {
|
||||
ret
|
||||
}
|
||||
})
|
||||
.collect_vec()
|
||||
}
|
||||
|
||||
@@ -247,15 +247,25 @@ impl<'s> Worker<'s> {
|
||||
src_recvs: Vec<broadcast::Receiver<Batch>>,
|
||||
// TODO(discord9): set expire duration for all arrangement and compare to sys timestamp instead
|
||||
expire_after: Option<repr::Duration>,
|
||||
or_replace: bool,
|
||||
create_if_not_exists: bool,
|
||||
err_collector: ErrCollector,
|
||||
) -> Result<Option<FlowId>, Error> {
|
||||
let already_exists = self.task_states.contains_key(&flow_id);
|
||||
match (already_exists, create_if_not_exists) {
|
||||
(true, true) => return Ok(None),
|
||||
(true, false) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
|
||||
(false, _) => (),
|
||||
};
|
||||
let already_exist = self.task_states.contains_key(&flow_id);
|
||||
match (create_if_not_exists, or_replace, already_exist) {
|
||||
// if replace, ignore that old flow exists
|
||||
(_, true, true) => {
|
||||
info!("Replacing flow with id={}", flow_id);
|
||||
}
|
||||
(false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
|
||||
// already exists, and not replace, return None
|
||||
(true, false, true) => {
|
||||
info!("Flow with id={} already exists, do nothing", flow_id);
|
||||
return Ok(None);
|
||||
}
|
||||
// continue as normal
|
||||
(_, _, false) => (),
|
||||
}
|
||||
|
||||
let mut cur_task_state = ActiveDataflowState::<'s> {
|
||||
err_collector,
|
||||
@@ -341,6 +351,7 @@ impl<'s> Worker<'s> {
|
||||
source_ids,
|
||||
src_recvs,
|
||||
expire_after,
|
||||
or_replace,
|
||||
create_if_not_exists,
|
||||
err_collector,
|
||||
} => {
|
||||
@@ -352,6 +363,7 @@ impl<'s> Worker<'s> {
|
||||
&source_ids,
|
||||
src_recvs,
|
||||
expire_after,
|
||||
or_replace,
|
||||
create_if_not_exists,
|
||||
err_collector,
|
||||
);
|
||||
@@ -398,6 +410,7 @@ pub enum Request {
|
||||
source_ids: Vec<GlobalId>,
|
||||
src_recvs: Vec<broadcast::Receiver<Batch>>,
|
||||
expire_after: Option<repr::Duration>,
|
||||
or_replace: bool,
|
||||
create_if_not_exists: bool,
|
||||
err_collector: ErrCollector,
|
||||
},
|
||||
@@ -547,6 +560,7 @@ mod test {
|
||||
source_ids: src_ids,
|
||||
src_recvs: vec![rx],
|
||||
expire_after: None,
|
||||
or_replace: false,
|
||||
create_if_not_exists: true,
|
||||
err_collector: ErrCollector::default(),
|
||||
};
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::new_null_array;
|
||||
use common_telemetry::trace;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::DataType;
|
||||
@@ -398,20 +399,54 @@ fn reduce_batch_subgraph(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: here reduce numbers of eq to minimal by keeping slicing key/val batch
|
||||
let key_data_types = output_type
|
||||
.column_types
|
||||
.iter()
|
||||
.map(|t| t.scalar_type.clone())
|
||||
.collect_vec();
|
||||
|
||||
// TODO(discord9): here reduce numbers of eq to minimal by keeping slicing key/val batch
|
||||
for key_row in distinct_keys {
|
||||
let key_scalar_value = {
|
||||
let mut key_scalar_value = Vec::with_capacity(key_row.len());
|
||||
for key in key_row.iter() {
|
||||
for (key_idx, key) in key_row.iter().enumerate() {
|
||||
let v =
|
||||
key.try_to_scalar_value(&key.data_type())
|
||||
.context(DataTypeSnafu {
|
||||
msg: "can't convert key values to datafusion value",
|
||||
})?;
|
||||
let arrow_value =
|
||||
|
||||
let key_data_type = key_data_types.get(key_idx).context(InternalSnafu {
|
||||
reason: format!(
|
||||
"Key index out of bound, expected at most {} but got {}",
|
||||
output_type.column_types.len(),
|
||||
key_idx
|
||||
),
|
||||
})?;
|
||||
|
||||
// if incoming value's datatype is null, it need to be handled specially, see below
|
||||
if key_data_type.as_arrow_type() != v.data_type()
|
||||
&& !v.data_type().is_null()
|
||||
{
|
||||
crate::expr::error::InternalSnafu {
|
||||
reason: format!(
|
||||
"Key data type mismatch, expected {:?} but got {:?}",
|
||||
key_data_type.as_arrow_type(),
|
||||
v.data_type()
|
||||
),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
|
||||
// handle single null key
|
||||
let arrow_value = if v.data_type().is_null() {
|
||||
let ret = new_null_array(&arrow::datatypes::DataType::Null, 1);
|
||||
arrow::array::Scalar::new(ret)
|
||||
} else {
|
||||
v.to_scalar().context(crate::expr::error::DatafusionSnafu {
|
||||
context: "can't convert key values to arrow value",
|
||||
})?;
|
||||
})?
|
||||
};
|
||||
key_scalar_value.push(arrow_value);
|
||||
}
|
||||
key_scalar_value
|
||||
@@ -423,7 +458,19 @@ fn reduce_batch_subgraph(
|
||||
.zip(key_batch.batch().iter())
|
||||
.map(|(key, col)| {
|
||||
// TODO(discord9): this takes half of the cpu! And this is redundant amount of `eq`!
|
||||
arrow::compute::kernels::cmp::eq(&key, &col.to_arrow_array().as_ref() as _)
|
||||
|
||||
// note that if lhs is a null, we still need to get all rows that are null! But can't use `eq` since
|
||||
// it will return null if input have null, so we need to use `is_null` instead
|
||||
if arrow::array::Datum::get(&key).0.data_type().is_null() {
|
||||
arrow::compute::kernels::boolean::is_null(
|
||||
col.to_arrow_array().as_ref() as _
|
||||
)
|
||||
} else {
|
||||
arrow::compute::kernels::cmp::eq(
|
||||
&key,
|
||||
&col.to_arrow_array().as_ref() as _,
|
||||
)
|
||||
}
|
||||
})
|
||||
.try_collect::<_, Vec<_>, _>()
|
||||
.context(ArrowSnafu {
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::collections::{BTreeMap, VecDeque};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use hydroflow::scheduled::graph::Hydroflow;
|
||||
use hydroflow::scheduled::handoff::TeeingHandoff;
|
||||
use hydroflow::scheduled::port::RecvPort;
|
||||
@@ -25,6 +26,7 @@ use itertools::Itertools;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::expr::{Batch, EvalError, ScalarExpr};
|
||||
use crate::metrics::METRIC_FLOW_ERRORS;
|
||||
use crate::repr::DiffRow;
|
||||
use crate::utils::ArrangeHandler;
|
||||
|
||||
@@ -185,6 +187,9 @@ impl ErrCollector {
|
||||
}
|
||||
|
||||
pub fn push_err(&self, err: EvalError) {
|
||||
METRIC_FLOW_ERRORS
|
||||
.with_label_values(&[err.status_code().as_ref()])
|
||||
.inc();
|
||||
self.inner.blocking_lock().push_back(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -492,7 +492,7 @@ impl ScalarUDFImpl for TumbleExpand {
|
||||
if let Some(start_time) = opt{
|
||||
if !matches!(start_time, Utf8 | Date32 | Date64 | Timestamp(_, _)){
|
||||
return Err(DataFusionError::Plan(
|
||||
format!("Expect start_time to either be date, timestampe or string, found {:?}", start_time)
|
||||
format!("Expect start_time to either be date, timestamp or string, found {:?}", start_time)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,12 +16,13 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::define_into_tonic_status;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::{define_into_tonic_status, from_err_code_msg_to_header};
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_telemetry::common_error::ext::ErrorExt;
|
||||
use common_telemetry::common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::metadata::MetadataMap;
|
||||
|
||||
use crate::adapter::FlowId;
|
||||
use crate::expr::EvalError;
|
||||
@@ -31,6 +32,27 @@ use crate::expr::EvalError;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Failed to insert into flow: region_id={}, flow_ids={:?}",
|
||||
region_id,
|
||||
flow_ids
|
||||
))]
|
||||
InsertIntoFlow {
|
||||
region_id: u64,
|
||||
flow_ids: Vec<u64>,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Error encountered while creating flow: {sql}"))]
|
||||
CreateFlow {
|
||||
sql: String,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
source: BoxedError,
|
||||
@@ -186,23 +208,37 @@ pub enum Error {
|
||||
},
|
||||
}
|
||||
|
||||
/// the outer message is the full error stack, and inner message in header is the last error message that can be show directly to user
|
||||
pub fn to_status_with_last_err(err: impl ErrorExt) -> tonic::Status {
|
||||
let msg = err.to_string();
|
||||
let last_err_msg = common_error::ext::StackError::last(&err).to_string();
|
||||
let code = err.status_code() as u32;
|
||||
let header = from_err_code_msg_to_header(code, &last_err_msg);
|
||||
|
||||
tonic::Status::with_metadata(
|
||||
tonic::Code::InvalidArgument,
|
||||
msg,
|
||||
MetadataMap::from_headers(header),
|
||||
)
|
||||
}
|
||||
|
||||
/// Result type for flow module
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Self::Eval { .. } | Self::JoinTask { .. } | Self::Datafusion { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Self::Eval { .. }
|
||||
| Self::JoinTask { .. }
|
||||
| Self::Datafusion { .. }
|
||||
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
|
||||
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||
Self::TableNotFound { .. }
|
||||
| Self::TableNotFoundMeta { .. }
|
||||
| Self::FlowNotFound { .. }
|
||||
| Self::ListFlows { .. } => StatusCode::TableNotFound,
|
||||
Self::InvalidQuery { .. } | Self::Plan { .. } | Self::Datatypes { .. } => {
|
||||
StatusCode::PlanQuery
|
||||
}
|
||||
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
|
||||
Self::InvalidQuery { .. } | Self::CreateFlow { .. } => StatusCode::EngineExecuteQuery,
|
||||
Self::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
|
||||
StatusCode::Unsupported
|
||||
|
||||
@@ -14,8 +14,11 @@
|
||||
|
||||
//! Error handling for expression evaluation.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
@@ -126,3 +129,29 @@ pub enum EvalError {
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for EvalError {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use EvalError::*;
|
||||
match self {
|
||||
DivisionByZero { .. }
|
||||
| TypeMismatch { .. }
|
||||
| TryFromValue { .. }
|
||||
| DataAlreadyExpired { .. }
|
||||
| InvalidArgument { .. }
|
||||
| Overflow { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
CastValue { source, .. } | DataType { source, .. } => source.status_code(),
|
||||
|
||||
Internal { .. }
|
||||
| Optimize { .. }
|
||||
| Arrow { .. }
|
||||
| Datafusion { .. }
|
||||
| External { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,4 +30,22 @@ lazy_static! {
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
||||
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
||||
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_flow_processed_rows",
|
||||
"Count of rows flowing through the system",
|
||||
&["direction"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_PROCESSING_TIME: HistogramVec = register_histogram_vec!(
|
||||
"greptime_flow_processing_time",
|
||||
"Time spent processing requests",
|
||||
&["type"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_ERRORS: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_flow_errors",
|
||||
"Count of errors in flow processing",
|
||||
&["code"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ pub const BROADCAST_CAP: usize = 1024;
|
||||
/// The maximum capacity of the send buffer, to prevent the buffer from growing too large
|
||||
pub const SEND_BUF_CAP: usize = BROADCAST_CAP * 2;
|
||||
|
||||
/// Flow worker will try to at least accumulate this many rows before processing them(if one second havn't passed)
|
||||
/// Flow worker will try to at least accumulate this many rows before processing them(if one second haven't passed)
|
||||
pub const BATCH_SIZE: usize = 32 * 16384;
|
||||
|
||||
/// Convert a value that is or can be converted to Datetime to internal timestamp
|
||||
|
||||
@@ -212,6 +212,8 @@ impl RelationType {
|
||||
for key in &mut self.keys {
|
||||
key.remove_col(time_index.unwrap_or(usize::MAX));
|
||||
}
|
||||
// remove empty keys
|
||||
self.keys.retain(|key| !key.is_empty());
|
||||
self
|
||||
}
|
||||
|
||||
|
||||
@@ -50,10 +50,11 @@ use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::adapter::{CreateFlowArgs, FlowWorkerManagerRef};
|
||||
use crate::error::{
|
||||
CacheRequiredSnafu, ExternalSnafu, FlowNotFoundSnafu, ListFlowsSnafu, ParseAddrSnafu,
|
||||
ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
|
||||
to_status_with_last_err, CacheRequiredSnafu, CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu,
|
||||
ListFlowsSnafu, ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
||||
use crate::transform::register_function_to_query_engine;
|
||||
use crate::utils::{SizeReportSender, StateReportHandler};
|
||||
use crate::{Error, FlowWorkerManager, FlownodeOptions};
|
||||
@@ -77,41 +78,52 @@ impl flow_server::Flow for FlowService {
|
||||
&self,
|
||||
request: Request<FlowRequest>,
|
||||
) -> Result<Response<FlowResponse>, Status> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["ddl"])
|
||||
.start_timer();
|
||||
|
||||
let request = request.into_inner();
|
||||
self.manager
|
||||
.handle(request)
|
||||
.await
|
||||
.map(Response::new)
|
||||
.map_err(|e| {
|
||||
let msg = format!("failed to handle request: {:?}", e);
|
||||
Status::internal(msg)
|
||||
})
|
||||
.map_err(to_status_with_last_err)
|
||||
}
|
||||
|
||||
async fn handle_mirror_request(
|
||||
&self,
|
||||
request: Request<InsertRequests>,
|
||||
) -> Result<Response<FlowResponse>, Status> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["insert"])
|
||||
.start_timer();
|
||||
|
||||
let request = request.into_inner();
|
||||
// TODO(discord9): fix protobuf import order shenanigans to remove this duplicated define
|
||||
let mut row_count = 0;
|
||||
let request = api::v1::region::InsertRequests {
|
||||
requests: request
|
||||
.requests
|
||||
.into_iter()
|
||||
.map(|insert| api::v1::region::InsertRequest {
|
||||
region_id: insert.region_id,
|
||||
rows: insert.rows,
|
||||
.map(|insert| {
|
||||
insert.rows.as_ref().inspect(|x| row_count += x.rows.len());
|
||||
api::v1::region::InsertRequest {
|
||||
region_id: insert.region_id,
|
||||
rows: insert.rows,
|
||||
}
|
||||
})
|
||||
.collect_vec(),
|
||||
};
|
||||
|
||||
METRIC_FLOW_ROWS
|
||||
.with_label_values(&["in"])
|
||||
.inc_by(row_count as u64);
|
||||
|
||||
self.manager
|
||||
.handle_inserts(request)
|
||||
.await
|
||||
.map(Response::new)
|
||||
.map_err(|e| {
|
||||
let msg = format!("failed to handle request: {:?}", e);
|
||||
Status::internal(msg)
|
||||
})
|
||||
.map_err(to_status_with_last_err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,7 +392,13 @@ impl FlownodeBuilder {
|
||||
.build(),
|
||||
),
|
||||
};
|
||||
manager.create_flow(args).await?;
|
||||
manager
|
||||
.create_flow(args)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| CreateFlowSnafu {
|
||||
sql: info.raw_sql().clone(),
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(cnt)
|
||||
@@ -500,6 +518,10 @@ impl FrontendInvoker {
|
||||
requests: RowInsertRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> common_frontend::error::Result<Output> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["output_insert"])
|
||||
.start_timer();
|
||||
|
||||
self.inserter
|
||||
.handle_row_inserts(requests, ctx, &self.statement_executor)
|
||||
.await
|
||||
@@ -512,10 +534,18 @@ impl FrontendInvoker {
|
||||
requests: RowDeleteRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> common_frontend::error::Result<Output> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["output_delete"])
|
||||
.start_timer();
|
||||
|
||||
self.deleter
|
||||
.handle_row_deletes(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_frontend::error::ExternalSnafu)
|
||||
}
|
||||
|
||||
pub fn statement_executor(&self) -> Arc<StatementExecutor> {
|
||||
self.statement_executor.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,12 +173,11 @@ mod test {
|
||||
|
||||
use super::*;
|
||||
use crate::adapter::node_context::IdToNameMap;
|
||||
use crate::adapter::table_source::test::FlowDummyTableSource;
|
||||
use crate::df_optimizer::apply_df_optimizer;
|
||||
use crate::expr::GlobalId;
|
||||
use crate::repr::{ColumnType, RelationType};
|
||||
|
||||
pub fn create_test_ctx() -> FlownodeContext {
|
||||
let mut schemas = HashMap::new();
|
||||
let mut tri_map = IdToNameMap::new();
|
||||
{
|
||||
let gid = GlobalId::User(0);
|
||||
@@ -187,10 +186,7 @@ mod test {
|
||||
"public".to_string(),
|
||||
"numbers".to_string(),
|
||||
];
|
||||
let schema = RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)]);
|
||||
|
||||
tri_map.insert(Some(name.clone()), Some(1024), gid);
|
||||
schemas.insert(gid, schema.into_named(vec![Some("number".to_string())]));
|
||||
}
|
||||
|
||||
{
|
||||
@@ -200,23 +196,16 @@ mod test {
|
||||
"public".to_string(),
|
||||
"numbers_with_ts".to_string(),
|
||||
];
|
||||
let schema = RelationType::new(vec![
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), false),
|
||||
]);
|
||||
schemas.insert(
|
||||
gid,
|
||||
schema.into_named(vec![Some("number".to_string()), Some("ts".to_string())]),
|
||||
);
|
||||
tri_map.insert(Some(name.clone()), Some(1025), gid);
|
||||
}
|
||||
|
||||
FlownodeContext {
|
||||
schema: schemas,
|
||||
table_repr: tri_map,
|
||||
query_context: Some(Arc::new(QueryContext::with("greptime", "public"))),
|
||||
..Default::default()
|
||||
}
|
||||
let dummy_source = FlowDummyTableSource::default();
|
||||
|
||||
let mut ctx = FlownodeContext::new(Box::new(dummy_source));
|
||||
ctx.table_repr = tri_map;
|
||||
ctx.query_context = Some(Arc::new(QueryContext::with("greptime", "public")));
|
||||
|
||||
ctx
|
||||
}
|
||||
|
||||
pub fn create_test_query_engine() -> Arc<dyn QueryEngine> {
|
||||
|
||||
@@ -128,7 +128,11 @@ impl AggregateExpr {
|
||||
}
|
||||
|
||||
if args.len() != 1 {
|
||||
return not_impl_err!("Aggregated function with multiple arguments is not supported");
|
||||
let fn_name = extensions.get(&f.function_reference).cloned();
|
||||
return not_impl_err!(
|
||||
"Aggregated function (name={:?}) with multiple arguments is not supported",
|
||||
fn_name
|
||||
);
|
||||
}
|
||||
|
||||
let arg = if let Some(first) = args.first() {
|
||||
@@ -216,6 +220,7 @@ impl KeyValPlan {
|
||||
|
||||
/// find out the column that should be time index in group exprs(which is all columns that should be keys)
|
||||
/// TODO(discord9): better ways to assign time index
|
||||
/// for now, it will found the first column that is timestamp or has a tumble window floor function
|
||||
fn find_time_index_in_group_exprs(group_exprs: &[TypedExpr]) -> Option<usize> {
|
||||
group_exprs.iter().position(|expr| {
|
||||
matches!(
|
||||
@@ -224,7 +229,7 @@ fn find_time_index_in_group_exprs(group_exprs: &[TypedExpr]) -> Option<usize> {
|
||||
func: UnaryFunc::TumbleWindowFloor { .. },
|
||||
expr: _
|
||||
}
|
||||
)
|
||||
) || expr.typ.scalar_type.is_timestamp()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1482,7 +1487,7 @@ mod test {
|
||||
ColumnType::new(CDT::float64_datatype(), true),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), true),
|
||||
])
|
||||
.with_key(vec![1])
|
||||
.with_time_index(Some(1))
|
||||
.into_named(vec![
|
||||
Some(
|
||||
"MAX(numbers_with_ts.number) - MIN(numbers_with_ts.number) / Float64(30)"
|
||||
@@ -1571,7 +1576,7 @@ mod test {
|
||||
ColumnType::new(ConcreteDataType::uint32_datatype(), true), // max
|
||||
ColumnType::new(ConcreteDataType::uint32_datatype(), true), // min
|
||||
])
|
||||
.with_key(vec![0])
|
||||
.with_time_index(Some(0))
|
||||
.into_unnamed(),
|
||||
),
|
||||
),
|
||||
|
||||
@@ -176,7 +176,7 @@ impl TypedPlan {
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
let table = ctx.table(&table_reference)?;
|
||||
let table = ctx.table(&table_reference).await?;
|
||||
let get_table = Plan::Get {
|
||||
id: crate::expr::Id::Global(table.0),
|
||||
};
|
||||
|
||||
@@ -321,6 +321,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("In-flight write bytes exceeded the maximum limit"))]
|
||||
InFlightWriteBytesExceeded {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -392,6 +398,8 @@ impl ErrorExt for Error {
|
||||
Error::StartScriptManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::TableOperation { source, .. } => source.status_code(),
|
||||
|
||||
Error::InFlightWriteBytesExceeded { .. } => StatusCode::RateLimited,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::config::Configurable;
|
||||
use common_options::datanode::DatanodeClientOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
@@ -46,6 +47,7 @@ pub struct FrontendOptions {
|
||||
pub user_provider: Option<String>,
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
pub tracing: TracingOptions,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
impl Default for FrontendOptions {
|
||||
@@ -68,6 +70,7 @@ impl Default for FrontendOptions {
|
||||
user_provider: None,
|
||||
export_metrics: ExportMetricsOption::default(),
|
||||
tracing: TracingOptions::default(),
|
||||
max_in_flight_write_bytes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::limiter::LimiterRef;
|
||||
use crate::script::ScriptExecutor;
|
||||
|
||||
#[async_trait]
|
||||
@@ -126,6 +127,7 @@ pub struct Instance {
|
||||
export_metrics_task: Option<ExportMetricsTask>,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
stats: StatementStatistics,
|
||||
limiter: Option<LimiterRef>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
|
||||
@@ -43,6 +43,7 @@ use crate::frontend::FrontendOptions;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::instance::region_query::FrontendRegionQueryHandler;
|
||||
use crate::instance::Instance;
|
||||
use crate::limiter::Limiter;
|
||||
use crate::script::ScriptExecutor;
|
||||
|
||||
/// The frontend [`Instance`] builder.
|
||||
@@ -196,6 +197,14 @@ impl FrontendBuilder {
|
||||
|
||||
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
|
||||
|
||||
// Create the limiter if the max_in_flight_write_bytes is set.
|
||||
let limiter = self
|
||||
.options
|
||||
.max_in_flight_write_bytes
|
||||
.map(|max_in_flight_write_bytes| {
|
||||
Arc::new(Limiter::new(max_in_flight_write_bytes.as_bytes()))
|
||||
});
|
||||
|
||||
Ok(Instance {
|
||||
options: self.options,
|
||||
catalog_manager: self.catalog_manager,
|
||||
@@ -211,6 +220,7 @@ impl FrontendBuilder {
|
||||
export_metrics_task: None,
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(kv_backend)),
|
||||
stats: self.stats,
|
||||
limiter,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::error::{
|
||||
Error, IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, Result,
|
||||
TableOperationSnafu,
|
||||
Error, InFlightWriteBytesExceededSnafu, IncompleteGrpcRequestSnafu, NotSupportedSnafu,
|
||||
PermissionSnafu, Result, TableOperationSnafu,
|
||||
};
|
||||
use crate::instance::{attach_timer, Instance};
|
||||
use crate::metrics::{GRPC_HANDLE_PROMQL_ELAPSED, GRPC_HANDLE_SQL_ELAPSED};
|
||||
@@ -50,6 +50,16 @@ impl GrpcQueryHandler for Instance {
|
||||
.check_permission(ctx.current_user(), PermissionReq::GrpcRequest(&request))
|
||||
.context(PermissionSnafu)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_request(&request);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = match request {
|
||||
Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
|
||||
Request::RowInserts(requests) => self.handle_row_inserts(requests, ctx.clone()).await?,
|
||||
|
||||
@@ -16,7 +16,7 @@ use async_trait::async_trait;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use client::Output;
|
||||
use common_error::ext::BoxedError;
|
||||
use servers::error::{AuthSnafu, Error};
|
||||
use servers::error::{AuthSnafu, Error, InFlightWriteBytesExceededSnafu};
|
||||
use servers::influxdb::InfluxdbRequest;
|
||||
use servers::interceptor::{LineProtocolInterceptor, LineProtocolInterceptorRef};
|
||||
use servers::query_handler::InfluxdbLineProtocolHandler;
|
||||
@@ -46,6 +46,16 @@ impl InfluxdbLineProtocolHandler for Instance {
|
||||
.post_lines_conversion(requests, ctx.clone())
|
||||
.await?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_influx_row_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
|
||||
@@ -22,7 +22,8 @@ use common_error::ext::BoxedError;
|
||||
use pipeline::pipeline_operator::PipelineOperator;
|
||||
use pipeline::{GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion};
|
||||
use servers::error::{
|
||||
AuthSnafu, Error as ServerError, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult,
|
||||
AuthSnafu, Error as ServerError, ExecuteGrpcRequestSnafu, InFlightWriteBytesExceededSnafu,
|
||||
PipelineSnafu, Result as ServerResult,
|
||||
};
|
||||
use servers::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef};
|
||||
use servers::query_handler::PipelineHandler;
|
||||
@@ -110,6 +111,16 @@ impl Instance {
|
||||
log: RowInsertRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> ServerResult<Output> {
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&log);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.inserter
|
||||
.handle_log_inserts(log, ctx, self.statement_executor.as_ref())
|
||||
.await
|
||||
|
||||
@@ -17,7 +17,7 @@ use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::tracing;
|
||||
use servers::error as server_error;
|
||||
use servers::error::AuthSnafu;
|
||||
use servers::error::{AuthSnafu, InFlightWriteBytesExceededSnafu};
|
||||
use servers::opentsdb::codec::DataPoint;
|
||||
use servers::opentsdb::data_point_to_grpc_row_insert_requests;
|
||||
use servers::query_handler::OpentsdbProtocolHandler;
|
||||
@@ -41,6 +41,17 @@ impl OpentsdbProtocolHandler for Instance {
|
||||
.context(AuthSnafu)?;
|
||||
|
||||
let (requests, _) = data_point_to_grpc_row_insert_requests(data_points)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = self
|
||||
.handle_row_inserts(requests, ctx)
|
||||
.await
|
||||
|
||||
@@ -21,7 +21,7 @@ use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
|
||||
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
|
||||
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
|
||||
use pipeline::PipelineWay;
|
||||
use servers::error::{self, AuthSnafu, Result as ServerResult};
|
||||
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
|
||||
use servers::interceptor::{OpenTelemetryProtocolInterceptor, OpenTelemetryProtocolInterceptorRef};
|
||||
use servers::otlp;
|
||||
use servers::query_handler::OpenTelemetryProtocolHandler;
|
||||
@@ -53,6 +53,16 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
let (requests, rows) = otlp::metrics::to_grpc_insert_requests(request)?;
|
||||
OTLP_METRICS_ROWS.inc_by(rows as u64);
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_row_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
@@ -83,6 +93,16 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
|
||||
OTLP_TRACES_ROWS.inc_by(rows as u64);
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_log_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
@@ -109,6 +129,17 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
interceptor_ref.pre_execute(ctx.clone())?;
|
||||
|
||||
let (requests, rows) = otlp::logs::to_grpc_insert_requests(request, pipeline, table_name)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_log_inserts(requests, ctx)
|
||||
.await
|
||||
.inspect(|_| OTLP_LOGS_ROWS.inc_by(rows as u64))
|
||||
|
||||
@@ -30,7 +30,7 @@ use common_telemetry::{debug, tracing};
|
||||
use operator::insert::InserterRef;
|
||||
use operator::statement::StatementExecutor;
|
||||
use prost::Message;
|
||||
use servers::error::{self, AuthSnafu, Result as ServerResult};
|
||||
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
|
||||
use servers::http::header::{collect_plan_metrics, CONTENT_ENCODING_SNAPPY, CONTENT_TYPE_PROTOBUF};
|
||||
use servers::http::prom_store::PHYSICAL_TABLE_PARAM;
|
||||
use servers::interceptor::{PromStoreProtocolInterceptor, PromStoreProtocolInterceptorRef};
|
||||
@@ -175,6 +175,16 @@ impl PromStoreProtocolHandler for Instance {
|
||||
.get::<PromStoreProtocolInterceptorRef<servers::error::Error>>();
|
||||
interceptor_ref.pre_write(&request, ctx.clone())?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&request);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = if with_metric_engine {
|
||||
let physical_table = ctx
|
||||
.extension(PHYSICAL_TABLE_PARAM)
|
||||
|
||||
@@ -18,6 +18,7 @@ pub mod error;
|
||||
pub mod frontend;
|
||||
pub mod heartbeat;
|
||||
pub mod instance;
|
||||
pub(crate) mod limiter;
|
||||
pub(crate) mod metrics;
|
||||
mod script;
|
||||
pub mod server;
|
||||
|
||||
291
src/frontend/src/limiter.rs
Normal file
291
src/frontend/src/limiter.rs
Normal file
@@ -0,0 +1,291 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::column::Values;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{Decimal128, InsertRequests, IntervalMonthDayNano, RowInsertRequests};
|
||||
use common_telemetry::{debug, warn};
|
||||
|
||||
pub(crate) type LimiterRef = Arc<Limiter>;
|
||||
|
||||
/// A frontend request limiter that controls the total size of in-flight write requests.
|
||||
pub(crate) struct Limiter {
|
||||
// The maximum number of bytes that can be in flight.
|
||||
max_in_flight_write_bytes: u64,
|
||||
|
||||
// The current in-flight write bytes.
|
||||
in_flight_write_bytes: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
/// A counter for the in-flight write bytes.
|
||||
pub(crate) struct InFlightWriteBytesCounter {
|
||||
// The current in-flight write bytes.
|
||||
in_flight_write_bytes: Arc<AtomicU64>,
|
||||
|
||||
// The write bytes that are being processed.
|
||||
processing_write_bytes: u64,
|
||||
}
|
||||
|
||||
impl InFlightWriteBytesCounter {
|
||||
/// Creates a new InFlightWriteBytesCounter. It will decrease the in-flight write bytes when dropped.
|
||||
pub fn new(in_flight_write_bytes: Arc<AtomicU64>, processing_write_bytes: u64) -> Self {
|
||||
debug!(
|
||||
"processing write bytes: {}, current in-flight write bytes: {}",
|
||||
processing_write_bytes,
|
||||
in_flight_write_bytes.load(Ordering::Relaxed)
|
||||
);
|
||||
Self {
|
||||
in_flight_write_bytes,
|
||||
processing_write_bytes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for InFlightWriteBytesCounter {
|
||||
// When the request is finished, the in-flight write bytes should be decreased.
|
||||
fn drop(&mut self) {
|
||||
self.in_flight_write_bytes
|
||||
.fetch_sub(self.processing_write_bytes, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl Limiter {
|
||||
pub fn new(max_in_flight_write_bytes: u64) -> Self {
|
||||
Self {
|
||||
max_in_flight_write_bytes,
|
||||
in_flight_write_bytes: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limit_request(&self, request: &Request) -> Option<InFlightWriteBytesCounter> {
|
||||
let size = match request {
|
||||
Request::Inserts(requests) => self.insert_requests_data_size(requests),
|
||||
Request::RowInserts(requests) => self.rows_insert_requests_data_size(requests),
|
||||
_ => 0,
|
||||
};
|
||||
self.limit_in_flight_write_bytes(size as u64)
|
||||
}
|
||||
|
||||
pub fn limit_row_inserts(
|
||||
&self,
|
||||
requests: &RowInsertRequests,
|
||||
) -> Option<InFlightWriteBytesCounter> {
|
||||
let size = self.rows_insert_requests_data_size(requests);
|
||||
self.limit_in_flight_write_bytes(size as u64)
|
||||
}
|
||||
|
||||
/// Returns None if the in-flight write bytes exceed the maximum limit.
|
||||
/// Otherwise, returns Some(InFlightWriteBytesCounter) and the in-flight write bytes will be increased.
|
||||
pub fn limit_in_flight_write_bytes(&self, bytes: u64) -> Option<InFlightWriteBytesCounter> {
|
||||
let result = self.in_flight_write_bytes.fetch_update(
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
|current| {
|
||||
if current + bytes > self.max_in_flight_write_bytes {
|
||||
warn!(
|
||||
"in-flight write bytes exceed the maximum limit {}, request with {} bytes will be limited",
|
||||
self.max_in_flight_write_bytes,
|
||||
bytes
|
||||
);
|
||||
return None;
|
||||
}
|
||||
Some(current + bytes)
|
||||
},
|
||||
);
|
||||
|
||||
match result {
|
||||
// Update the in-flight write bytes successfully.
|
||||
Ok(_) => Some(InFlightWriteBytesCounter::new(
|
||||
self.in_flight_write_bytes.clone(),
|
||||
bytes,
|
||||
)),
|
||||
// It means the in-flight write bytes exceed the maximum limit.
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current in-flight write bytes.
|
||||
#[allow(dead_code)]
|
||||
pub fn in_flight_write_bytes(&self) -> u64 {
|
||||
self.in_flight_write_bytes.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn insert_requests_data_size(&self, request: &InsertRequests) -> usize {
|
||||
let mut size: usize = 0;
|
||||
for insert in &request.inserts {
|
||||
for column in &insert.columns {
|
||||
if let Some(values) = &column.values {
|
||||
size += self.size_of_column_values(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
size
|
||||
}
|
||||
|
||||
fn rows_insert_requests_data_size(&self, request: &RowInsertRequests) -> usize {
|
||||
let mut size: usize = 0;
|
||||
for insert in &request.inserts {
|
||||
if let Some(rows) = &insert.rows {
|
||||
for row in &rows.rows {
|
||||
for value in &row.values {
|
||||
if let Some(value) = &value.value_data {
|
||||
size += self.size_of_value_data(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
size
|
||||
}
|
||||
|
||||
fn size_of_column_values(&self, values: &Values) -> usize {
|
||||
let mut size: usize = 0;
|
||||
size += values.i8_values.len() * size_of::<i32>();
|
||||
size += values.i16_values.len() * size_of::<i32>();
|
||||
size += values.i32_values.len() * size_of::<i32>();
|
||||
size += values.i64_values.len() * size_of::<i64>();
|
||||
size += values.u8_values.len() * size_of::<u32>();
|
||||
size += values.u16_values.len() * size_of::<u32>();
|
||||
size += values.u32_values.len() * size_of::<u32>();
|
||||
size += values.u64_values.len() * size_of::<u64>();
|
||||
size += values.f32_values.len() * size_of::<f32>();
|
||||
size += values.f64_values.len() * size_of::<f64>();
|
||||
size += values.bool_values.len() * size_of::<bool>();
|
||||
size += values
|
||||
.binary_values
|
||||
.iter()
|
||||
.map(|v| v.len() * size_of::<u8>())
|
||||
.sum::<usize>();
|
||||
size += values.string_values.iter().map(|v| v.len()).sum::<usize>();
|
||||
size += values.date_values.len() * size_of::<i32>();
|
||||
size += values.datetime_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_second_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_millisecond_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_microsecond_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_nanosecond_values.len() * size_of::<i64>();
|
||||
size += values.time_second_values.len() * size_of::<i64>();
|
||||
size += values.time_millisecond_values.len() * size_of::<i64>();
|
||||
size += values.time_microsecond_values.len() * size_of::<i64>();
|
||||
size += values.time_nanosecond_values.len() * size_of::<i64>();
|
||||
size += values.interval_year_month_values.len() * size_of::<i64>();
|
||||
size += values.interval_day_time_values.len() * size_of::<i64>();
|
||||
size += values.interval_month_day_nano_values.len() * size_of::<IntervalMonthDayNano>();
|
||||
size += values.decimal128_values.len() * size_of::<Decimal128>();
|
||||
size
|
||||
}
|
||||
|
||||
fn size_of_value_data(&self, value: &ValueData) -> usize {
|
||||
match value {
|
||||
ValueData::I8Value(_) => size_of::<i32>(),
|
||||
ValueData::I16Value(_) => size_of::<i32>(),
|
||||
ValueData::I32Value(_) => size_of::<i32>(),
|
||||
ValueData::I64Value(_) => size_of::<i64>(),
|
||||
ValueData::U8Value(_) => size_of::<u32>(),
|
||||
ValueData::U16Value(_) => size_of::<u32>(),
|
||||
ValueData::U32Value(_) => size_of::<u32>(),
|
||||
ValueData::U64Value(_) => size_of::<u64>(),
|
||||
ValueData::F32Value(_) => size_of::<f32>(),
|
||||
ValueData::F64Value(_) => size_of::<f64>(),
|
||||
ValueData::BoolValue(_) => size_of::<bool>(),
|
||||
ValueData::BinaryValue(v) => v.len() * size_of::<u8>(),
|
||||
ValueData::StringValue(v) => v.len(),
|
||||
ValueData::DateValue(_) => size_of::<i32>(),
|
||||
ValueData::DatetimeValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampSecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampMillisecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampMicrosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampNanosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeSecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeMillisecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeMicrosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeNanosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::IntervalYearMonthValue(_) => size_of::<i32>(),
|
||||
ValueData::IntervalDayTimeValue(_) => size_of::<i64>(),
|
||||
ValueData::IntervalMonthDayNanoValue(_) => size_of::<IntervalMonthDayNano>(),
|
||||
ValueData::Decimal128Value(_) => size_of::<Decimal128>(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::column::Values;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{Column, InsertRequest};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn generate_request(size: usize) -> Request {
|
||||
let i8_values = vec![0; size / 4];
|
||||
Request::Inserts(InsertRequests {
|
||||
inserts: vec![InsertRequest {
|
||||
columns: vec![Column {
|
||||
values: Some(Values {
|
||||
i8_values,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
}],
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_limiter() {
|
||||
let limiter_ref: LimiterRef = Arc::new(Limiter::new(1024));
|
||||
let tasks_count = 10;
|
||||
let request_data_size = 100;
|
||||
let mut handles = vec![];
|
||||
|
||||
// Generate multiple requests to test the limiter.
|
||||
for _ in 0..tasks_count {
|
||||
let limiter = limiter_ref.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let result = limiter.limit_request(&generate_request(request_data_size));
|
||||
assert!(result.is_some());
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all threads to complete.
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_in_flight_write_bytes() {
|
||||
let limiter_ref: LimiterRef = Arc::new(Limiter::new(1024));
|
||||
let req1 = generate_request(100);
|
||||
let result1 = limiter_ref.limit_request(&req1);
|
||||
assert!(result1.is_some());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 100);
|
||||
|
||||
let req2 = generate_request(200);
|
||||
let result2 = limiter_ref.limit_request(&req2);
|
||||
assert!(result2.is_some());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 300);
|
||||
|
||||
drop(result1.unwrap());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 200);
|
||||
|
||||
drop(result2.unwrap());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 0);
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ fst.workspace = true
|
||||
futures.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
mockall.workspace = true
|
||||
parquet.workspace = true
|
||||
pin-project.workspace = true
|
||||
prost.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod applier;
|
||||
pub mod creator;
|
||||
pub mod error;
|
||||
pub mod reader;
|
||||
@@ -25,7 +26,7 @@ pub type BytesRef<'a> = &'a [u8];
|
||||
pub const SEED: u128 = 42;
|
||||
|
||||
/// The Meta information of the bloom filter stored in the file.
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
|
||||
pub struct BloomFilterMeta {
|
||||
/// The number of rows per segment.
|
||||
pub rows_per_segment: usize,
|
||||
@@ -44,7 +45,7 @@ pub struct BloomFilterMeta {
|
||||
}
|
||||
|
||||
/// The location of the bloom filter segment in the file.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Hash, PartialEq, Eq)]
|
||||
pub struct BloomFilterSegmentLocation {
|
||||
/// The offset of the bloom filter segment in the file.
|
||||
pub offset: u64,
|
||||
|
||||
133
src/index/src/bloom_filter/applier.rs
Normal file
133
src/index/src/bloom_filter/applier.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
|
||||
use parquet::arrow::arrow_reader::RowSelection;
|
||||
use parquet::file::metadata::RowGroupMetaData;
|
||||
|
||||
use crate::bloom_filter::error::Result;
|
||||
use crate::bloom_filter::reader::BloomFilterReader;
|
||||
use crate::bloom_filter::{BloomFilterMeta, BloomFilterSegmentLocation, Bytes};
|
||||
|
||||
/// Enumerates types of predicates for value filtering.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Predicate {
|
||||
/// Predicate for matching values in a list.
|
||||
InList(InListPredicate),
|
||||
}
|
||||
|
||||
/// `InListPredicate` contains a list of acceptable values. A value needs to match at least
|
||||
/// one of the elements (logical OR semantic) for the predicate to be satisfied.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct InListPredicate {
|
||||
/// List of acceptable values.
|
||||
pub list: HashSet<Bytes>,
|
||||
}
|
||||
|
||||
pub struct BloomFilterApplier {
|
||||
reader: Box<dyn BloomFilterReader + Send>,
|
||||
meta: BloomFilterMeta,
|
||||
}
|
||||
|
||||
impl BloomFilterApplier {
|
||||
pub async fn new(mut reader: Box<dyn BloomFilterReader + Send>) -> Result<Self> {
|
||||
let meta = reader.metadata().await?;
|
||||
|
||||
Ok(Self { reader, meta })
|
||||
}
|
||||
|
||||
/// Searches for matching row groups using bloom filters.
|
||||
///
|
||||
/// This method applies bloom filter index to eliminate row groups that definitely
|
||||
/// don't contain the searched values. It works by:
|
||||
///
|
||||
/// 1. Computing prefix sums for row counts
|
||||
/// 2. Calculating bloom filter segment locations for each row group
|
||||
/// 1. A row group may span multiple bloom filter segments
|
||||
/// 3. Probing bloom filter segments
|
||||
/// 4. Removing non-matching row groups from the basement
|
||||
/// 1. If a row group doesn't match any bloom filter segment with any probe, it is removed
|
||||
///
|
||||
/// # Note
|
||||
/// The method modifies the `basement` map in-place by removing row groups that
|
||||
/// don't match the bloom filter criteria.
|
||||
pub async fn search(
|
||||
&mut self,
|
||||
probes: &HashSet<Bytes>,
|
||||
row_group_metas: &[RowGroupMetaData],
|
||||
basement: &mut BTreeMap<usize, Option<RowSelection>>,
|
||||
) -> Result<()> {
|
||||
// 0. Fast path - if basement is empty return empty vec
|
||||
if basement.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 1. Compute prefix sum for row counts
|
||||
let mut sum = 0usize;
|
||||
let mut prefix_sum = Vec::with_capacity(row_group_metas.len() + 1);
|
||||
prefix_sum.push(0usize);
|
||||
for meta in row_group_metas {
|
||||
sum += meta.num_rows() as usize;
|
||||
prefix_sum.push(sum);
|
||||
}
|
||||
|
||||
// 2. Calculate bloom filter segment locations
|
||||
let mut row_groups_to_remove = HashSet::new();
|
||||
for &row_group_idx in basement.keys() {
|
||||
// TODO(ruihang): support further filter over row selection
|
||||
|
||||
// todo: dedup & overlap
|
||||
let rows_range_start = prefix_sum[row_group_idx] / self.meta.rows_per_segment;
|
||||
let rows_range_end = (prefix_sum[row_group_idx + 1] as f64
|
||||
/ self.meta.rows_per_segment as f64)
|
||||
.ceil() as usize;
|
||||
|
||||
let mut is_any_range_hit = false;
|
||||
for i in rows_range_start..rows_range_end {
|
||||
// 3. Probe each bloom filter segment
|
||||
let loc = BloomFilterSegmentLocation {
|
||||
offset: self.meta.bloom_filter_segments[i].offset,
|
||||
size: self.meta.bloom_filter_segments[i].size,
|
||||
elem_count: self.meta.bloom_filter_segments[i].elem_count,
|
||||
};
|
||||
let bloom = self.reader.bloom_filter(&loc).await?;
|
||||
|
||||
// Check if any probe exists in bloom filter
|
||||
let mut matches = false;
|
||||
for probe in probes {
|
||||
if bloom.contains(probe) {
|
||||
matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
is_any_range_hit |= matches;
|
||||
if matches {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !is_any_range_hit {
|
||||
row_groups_to_remove.insert(row_group_idx);
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Remove row groups that do not match any bloom filter segment
|
||||
for row_group_idx in row_groups_to_remove {
|
||||
basement.remove(&row_group_idx);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -73,7 +73,7 @@ impl BloomFilterCreator {
|
||||
/// `rows_per_segment` <= 0
|
||||
pub fn new(
|
||||
rows_per_segment: usize,
|
||||
intermediate_provider: Box<dyn ExternalTempFileProvider>,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
) -> Self {
|
||||
@@ -96,6 +96,49 @@ impl BloomFilterCreator {
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds multiple rows of elements to the bloom filter. If the number of accumulated rows
|
||||
/// reaches `rows_per_segment`, it finalizes the current segment.
|
||||
pub async fn push_n_row_elems(
|
||||
&mut self,
|
||||
mut nrows: usize,
|
||||
elems: impl IntoIterator<Item = Bytes>,
|
||||
) -> Result<()> {
|
||||
if nrows == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
if nrows == 1 {
|
||||
return self.push_row_elems(elems).await;
|
||||
}
|
||||
|
||||
let elems = elems.into_iter().collect::<Vec<_>>();
|
||||
while nrows > 0 {
|
||||
let rows_to_seg_end =
|
||||
self.rows_per_segment - (self.accumulated_row_count % self.rows_per_segment);
|
||||
let rows_to_push = nrows.min(rows_to_seg_end);
|
||||
nrows -= rows_to_push;
|
||||
|
||||
self.accumulated_row_count += rows_to_push;
|
||||
|
||||
let mut mem_diff = 0;
|
||||
for elem in &elems {
|
||||
let len = elem.len();
|
||||
let is_new = self.cur_seg_distinct_elems.insert(elem.clone());
|
||||
if is_new {
|
||||
mem_diff += len;
|
||||
}
|
||||
}
|
||||
self.cur_seg_distinct_elems_mem_usage += mem_diff;
|
||||
self.global_memory_usage
|
||||
.fetch_add(mem_diff, Ordering::Relaxed);
|
||||
|
||||
if self.accumulated_row_count % self.rows_per_segment == 0 {
|
||||
self.finalize_segment().await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a row of elements to the bloom filter. If the number of accumulated rows
|
||||
/// reaches `rows_per_segment`, it finalizes the current segment.
|
||||
pub async fn push_row_elems(&mut self, elems: impl IntoIterator<Item = Bytes>) -> Result<()> {
|
||||
@@ -181,6 +224,13 @@ impl BloomFilterCreator {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for BloomFilterCreator {
|
||||
fn drop(&mut self) {
|
||||
self.global_memory_usage
|
||||
.fetch_sub(self.cur_seg_distinct_elems_mem_usage, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use fastbloom::BloomFilter;
|
||||
@@ -202,7 +252,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
Box::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
);
|
||||
@@ -266,4 +316,79 @@ mod tests {
|
||||
assert!(bfs[1].contains(&b"e"));
|
||||
assert!(bfs[1].contains(&b"f"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_bloom_filter_creator_batch_push() {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator: BloomFilterCreator = BloomFilterCreator::new(
|
||||
2,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
);
|
||||
|
||||
creator
|
||||
.push_n_row_elems(5, vec![b"a".to_vec(), b"b".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(creator.cur_seg_distinct_elems_mem_usage > 0);
|
||||
assert!(creator.memory_usage() > 0);
|
||||
|
||||
creator
|
||||
.push_n_row_elems(5, vec![b"c".to_vec(), b"d".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(creator.cur_seg_distinct_elems_mem_usage, 0);
|
||||
assert!(creator.memory_usage() > 0);
|
||||
|
||||
creator
|
||||
.push_n_row_elems(10, vec![b"e".to_vec(), b"f".to_vec()])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(creator.cur_seg_distinct_elems_mem_usage, 0);
|
||||
assert!(creator.memory_usage() > 0);
|
||||
|
||||
creator.finish(&mut writer).await.unwrap();
|
||||
|
||||
let bytes = writer.into_inner();
|
||||
let total_size = bytes.len();
|
||||
let meta_size_offset = total_size - 4;
|
||||
let meta_size = u32::from_le_bytes((&bytes[meta_size_offset..]).try_into().unwrap());
|
||||
|
||||
let meta_bytes = &bytes[total_size - meta_size as usize - 4..total_size - 4];
|
||||
let meta: BloomFilterMeta = serde_json::from_slice(meta_bytes).unwrap();
|
||||
|
||||
assert_eq!(meta.rows_per_segment, 2);
|
||||
assert_eq!(meta.seg_count, 10);
|
||||
assert_eq!(meta.row_count, 20);
|
||||
assert_eq!(
|
||||
meta.bloom_filter_segments_size + meta_bytes.len() + 4,
|
||||
total_size
|
||||
);
|
||||
|
||||
let mut bfs = Vec::new();
|
||||
for segment in meta.bloom_filter_segments {
|
||||
let bloom_filter_bytes =
|
||||
&bytes[segment.offset as usize..(segment.offset + segment.size) as usize];
|
||||
let v = u64_vec_from_bytes(bloom_filter_bytes);
|
||||
let bloom_filter = BloomFilter::from_vec(v)
|
||||
.seed(&SEED)
|
||||
.expected_items(segment.elem_count);
|
||||
bfs.push(bloom_filter);
|
||||
}
|
||||
|
||||
assert_eq!(bfs.len(), 10);
|
||||
for bf in bfs.iter().take(3) {
|
||||
assert!(bf.contains(&b"a"));
|
||||
assert!(bf.contains(&b"b"));
|
||||
}
|
||||
for bf in bfs.iter().take(5).skip(2) {
|
||||
assert!(bf.contains(&b"c"));
|
||||
assert!(bf.contains(&b"d"));
|
||||
}
|
||||
for bf in bfs.iter().take(10).skip(5) {
|
||||
assert!(bf.contains(&b"e"));
|
||||
assert!(bf.contains(&b"f"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ pub struct FinalizedBloomFilterStorage {
|
||||
intermediate_prefix: String,
|
||||
|
||||
/// The provider for intermediate Bloom filter files.
|
||||
intermediate_provider: Box<dyn ExternalTempFileProvider>,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
|
||||
/// The memory usage of the in-memory Bloom filters.
|
||||
memory_usage: usize,
|
||||
@@ -59,7 +59,7 @@ pub struct FinalizedBloomFilterStorage {
|
||||
impl FinalizedBloomFilterStorage {
|
||||
/// Creates a new `FinalizedBloomFilterStorage`.
|
||||
pub fn new(
|
||||
intermediate_provider: Box<dyn ExternalTempFileProvider>,
|
||||
intermediate_provider: Arc<dyn ExternalTempFileProvider>,
|
||||
global_memory_usage: Arc<AtomicUsize>,
|
||||
global_memory_usage_threshold: Option<usize>,
|
||||
) -> Self {
|
||||
@@ -132,7 +132,7 @@ impl FinalizedBloomFilterStorage {
|
||||
/// Drains the storage and returns a stream of finalized Bloom filter segments.
|
||||
pub async fn drain(
|
||||
&mut self,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<FinalizedBloomFilterSegment>> + '_>>> {
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<FinalizedBloomFilterSegment>> + Send + '_>>> {
|
||||
// FAST PATH: memory only
|
||||
if self.intermediate_file_id_counter == 0 {
|
||||
return Ok(Box::pin(stream::iter(self.in_memory.drain(..).map(Ok))));
|
||||
@@ -183,6 +183,13 @@ impl FinalizedBloomFilterStorage {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for FinalizedBloomFilterStorage {
|
||||
fn drop(&mut self) {
|
||||
self.global_memory_usage
|
||||
.fetch_sub(self.memory_usage, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// A finalized Bloom filter segment.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct FinalizedBloomFilterSegment {
|
||||
@@ -250,7 +257,7 @@ mod tests {
|
||||
|
||||
let global_memory_usage = Arc::new(AtomicUsize::new(0));
|
||||
let global_memory_usage_threshold = Some(1024 * 1024); // 1MB
|
||||
let provider = Box::new(mock_provider);
|
||||
let provider = Arc::new(mock_provider);
|
||||
let mut storage = FinalizedBloomFilterStorage::new(
|
||||
provider,
|
||||
global_memory_usage.clone(),
|
||||
|
||||
@@ -38,7 +38,15 @@ pub trait BloomFilterReader {
|
||||
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes>;
|
||||
|
||||
/// Reads bunch of ranges from the file.
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>>;
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
let mut results = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
let size = (range.end - range.start) as u32;
|
||||
let data = self.range_read(range.start, size).await?;
|
||||
results.push(data);
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Reads the meta information of the bloom filter.
|
||||
async fn metadata(&mut self) -> Result<BloomFilterMeta>;
|
||||
@@ -190,7 +198,7 @@ mod tests {
|
||||
let mut writer = Cursor::new(vec![]);
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
2,
|
||||
Box::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -30,8 +30,8 @@ pub struct LogQuery {
|
||||
pub time_filter: TimeFilter,
|
||||
/// Columns with filters to query.
|
||||
pub columns: Vec<ColumnFilters>,
|
||||
/// Maximum number of logs to return. If not provided, it will return all matched logs.
|
||||
pub limit: Option<usize>,
|
||||
/// Controls row skipping and fetch count for logs.
|
||||
pub limit: Limit,
|
||||
/// Adjacent lines to return.
|
||||
pub context: Context,
|
||||
}
|
||||
@@ -42,7 +42,7 @@ impl Default for LogQuery {
|
||||
table: TableName::new("", "", ""),
|
||||
time_filter: Default::default(),
|
||||
columns: vec![],
|
||||
limit: None,
|
||||
limit: Limit::default(),
|
||||
context: Default::default(),
|
||||
}
|
||||
}
|
||||
@@ -266,6 +266,15 @@ pub enum Context {
|
||||
Seconds(usize, usize),
|
||||
}
|
||||
|
||||
/// Represents limit and offset parameters for query pagination.
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct Limit {
|
||||
/// Optional number of items to skip before starting to return results
|
||||
pub skip: Option<usize>,
|
||||
/// Optional number of items to return after skipping
|
||||
pub fetch: Option<usize>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -6,7 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
mock = []
|
||||
pg_kvbackend = ["dep:tokio-postgres"]
|
||||
pg_kvbackend = ["dep:tokio-postgres", "common-meta/pg_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -14,6 +14,7 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait = "0.1"
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
@@ -55,7 +56,7 @@ snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-postgres = { workspace = true, optional = true }
|
||||
tokio-postgres = { workspace = true, optional = true, features = ["with-chrono-0_4"] }
|
||||
tokio-stream = { workspace = true, features = ["net"] }
|
||||
toml.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -26,6 +26,8 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::postgres::PgStore;
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::info;
|
||||
use etcd_client::Client;
|
||||
use futures::future;
|
||||
@@ -224,8 +226,9 @@ pub async fn metasrv_builder(
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
(None, BackendImpl::PostgresStore) => {
|
||||
let pg_client = create_postgres_client(opts).await?;
|
||||
let kv_backend = PgStore::with_pg_client(pg_client).await.unwrap();
|
||||
// TODO(jeremy, weny): implement election for postgres
|
||||
let kv_backend = PgStore::with_pg_client(pg_client)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
(kv_backend, None)
|
||||
}
|
||||
};
|
||||
@@ -275,8 +278,14 @@ async fn create_postgres_client(opts: &MetasrvOptions) -> Result<tokio_postgres:
|
||||
let postgres_url = opts.store_addrs.first().context(InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let (client, _) = tokio_postgres::connect(postgres_url, NoTls)
|
||||
let (client, connection) = tokio_postgres::connect(postgres_url, NoTls)
|
||||
.await
|
||||
.context(error::ConnectPostgresSnafu)?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
error!(e; "connection error");
|
||||
}
|
||||
});
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
@@ -13,11 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod etcd;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub mod postgres;
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::sync::Arc;
|
||||
|
||||
use etcd_client::LeaderKey;
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -26,10 +27,31 @@ use crate::metasrv::MetasrvNodeInfo;
|
||||
pub const ELECTION_KEY: &str = "__metasrv_election";
|
||||
pub const CANDIDATES_ROOT: &str = "__metasrv_election_candidates/";
|
||||
|
||||
pub(crate) const CANDIDATE_LEASE_SECS: u64 = 600;
|
||||
const KEEP_ALIVE_INTERVAL_SECS: u64 = CANDIDATE_LEASE_SECS / 2;
|
||||
|
||||
/// Messages sent when the leader changes.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum LeaderChangeMessage {
|
||||
Elected(Arc<LeaderKey>),
|
||||
StepDown(Arc<LeaderKey>),
|
||||
Elected(Arc<dyn LeaderKey>),
|
||||
StepDown(Arc<dyn LeaderKey>),
|
||||
}
|
||||
|
||||
/// LeaderKey is a key that represents the leader of metasrv.
|
||||
/// The structure is corresponding to [etcd_client::LeaderKey].
|
||||
pub trait LeaderKey: Send + Sync + Debug {
|
||||
/// The name in byte. name is the election identifier that corresponds to the leadership key.
|
||||
fn name(&self) -> &[u8];
|
||||
|
||||
/// The key in byte. key is an opaque key representing the ownership of the election. If the key
|
||||
/// is deleted, then leadership is lost.
|
||||
fn key(&self) -> &[u8];
|
||||
|
||||
/// The creation revision of the key.
|
||||
fn revision(&self) -> i64;
|
||||
|
||||
/// The lease ID of the election leader.
|
||||
fn lease_id(&self) -> i64;
|
||||
}
|
||||
|
||||
impl fmt::Display for LeaderChangeMessage {
|
||||
@@ -47,8 +69,8 @@ impl fmt::Display for LeaderChangeMessage {
|
||||
write!(f, "LeaderKey {{ ")?;
|
||||
write!(f, "name: {}", String::from_utf8_lossy(leader_key.name()))?;
|
||||
write!(f, ", key: {}", String::from_utf8_lossy(leader_key.key()))?;
|
||||
write!(f, ", rev: {}", leader_key.rev())?;
|
||||
write!(f, ", lease: {}", leader_key.lease())?;
|
||||
write!(f, ", rev: {}", leader_key.revision())?;
|
||||
write!(f, ", lease: {}", leader_key.lease_id())?;
|
||||
write!(f, " }})")
|
||||
}
|
||||
}
|
||||
@@ -65,7 +87,7 @@ pub trait Election: Send + Sync {
|
||||
/// initialization operations can be performed.
|
||||
///
|
||||
/// note: a new leader will only return true on the first call.
|
||||
fn in_infancy(&self) -> bool;
|
||||
fn in_leader_infancy(&self) -> bool;
|
||||
|
||||
/// Registers a candidate for the election.
|
||||
async fn register_candidate(&self, node_info: &MetasrvNodeInfo) -> Result<()>;
|
||||
|
||||
@@ -18,18 +18,41 @@ use std::time::Duration;
|
||||
|
||||
use common_meta::distributed_time_constants::{META_KEEP_ALIVE_INTERVAL_SECS, META_LEASE_SECS};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use etcd_client::{Client, GetOptions, LeaderKey, LeaseKeepAliveStream, LeaseKeeper, PutOptions};
|
||||
use etcd_client::{
|
||||
Client, GetOptions, LeaderKey as EtcdLeaderKey, LeaseKeepAliveStream, LeaseKeeper, PutOptions,
|
||||
};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
use tokio::time::{timeout, MissedTickBehavior};
|
||||
|
||||
use crate::election::{Election, LeaderChangeMessage, CANDIDATES_ROOT, ELECTION_KEY};
|
||||
use crate::election::{
|
||||
Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT, CANDIDATE_LEASE_SECS, ELECTION_KEY,
|
||||
KEEP_ALIVE_INTERVAL_SECS,
|
||||
};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::metasrv::{ElectionRef, LeaderValue, MetasrvNodeInfo};
|
||||
|
||||
impl LeaderKey for EtcdLeaderKey {
|
||||
fn name(&self) -> &[u8] {
|
||||
self.name()
|
||||
}
|
||||
|
||||
fn key(&self) -> &[u8] {
|
||||
self.key()
|
||||
}
|
||||
|
||||
fn revision(&self) -> i64 {
|
||||
self.rev()
|
||||
}
|
||||
|
||||
fn lease_id(&self) -> i64 {
|
||||
self.lease()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EtcdElection {
|
||||
leader_value: String,
|
||||
client: Client,
|
||||
@@ -75,15 +98,15 @@ impl EtcdElection {
|
||||
LeaderChangeMessage::Elected(key) => {
|
||||
info!(
|
||||
"[{leader_ident}] is elected as leader: {:?}, lease: {}",
|
||||
key.name_str(),
|
||||
key.lease()
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
LeaderChangeMessage::StepDown(key) => {
|
||||
warn!(
|
||||
"[{leader_ident}] is stepping down: {:?}, lease: {}",
|
||||
key.name_str(),
|
||||
key.lease()
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
},
|
||||
@@ -126,16 +149,13 @@ impl Election for EtcdElection {
|
||||
self.is_leader.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn in_infancy(&self) -> bool {
|
||||
fn in_leader_infancy(&self) -> bool {
|
||||
self.infancy
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn register_candidate(&self, node_info: &MetasrvNodeInfo) -> Result<()> {
|
||||
const CANDIDATE_LEASE_SECS: u64 = 600;
|
||||
const KEEP_ALIVE_INTERVAL_SECS: u64 = CANDIDATE_LEASE_SECS / 2;
|
||||
|
||||
let mut lease_client = self.client.lease_client();
|
||||
let res = lease_client
|
||||
.grant(CANDIDATE_LEASE_SECS as i64, None)
|
||||
@@ -239,7 +259,7 @@ impl Election for EtcdElection {
|
||||
// The keep alive operation MUST be done in `META_KEEP_ALIVE_INTERVAL_SECS`.
|
||||
match timeout(
|
||||
keep_lease_duration,
|
||||
self.keep_alive(&mut keeper, &mut receiver, leader),
|
||||
self.keep_alive(&mut keeper, &mut receiver, leader.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -303,7 +323,7 @@ impl EtcdElection {
|
||||
&self,
|
||||
keeper: &mut LeaseKeeper,
|
||||
receiver: &mut LeaseKeepAliveStream,
|
||||
leader: &LeaderKey,
|
||||
leader: EtcdLeaderKey,
|
||||
) -> Result<()> {
|
||||
keeper.keep_alive().await.context(error::EtcdFailedSnafu)?;
|
||||
if let Some(res) = receiver.message().await.context(error::EtcdFailedSnafu)? {
|
||||
@@ -324,7 +344,7 @@ impl EtcdElection {
|
||||
|
||||
if let Err(e) = self
|
||||
.leader_watcher
|
||||
.send(LeaderChangeMessage::Elected(Arc::new(leader.clone())))
|
||||
.send(LeaderChangeMessage::Elected(Arc::new(leader)))
|
||||
{
|
||||
error!(e; "Failed to send leader change message");
|
||||
}
|
||||
|
||||
519
src/meta-srv/src/election/postgres.rs
Normal file
519
src/meta-srv/src/election/postgres.rs
Normal file
@@ -0,0 +1,519 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_time::Timestamp;
|
||||
use itertools::Itertools;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio_postgres::Client;
|
||||
|
||||
use crate::election::{Election, LeaderChangeMessage, CANDIDATES_ROOT, ELECTION_KEY};
|
||||
use crate::error::{
|
||||
DeserializeFromJsonSnafu, PostgresExecutionSnafu, Result, SerializeToJsonSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metasrv::{ElectionRef, LeaderValue, MetasrvNodeInfo};
|
||||
|
||||
// Separator between value and expire time.
|
||||
const LEASE_SEP: &str = r#"||__metadata_lease_sep||"#;
|
||||
|
||||
// SQL to put a value with expire time. Parameters: key, value, LEASE_SEP, expire_time
|
||||
const PUT_IF_NOT_EXISTS_WITH_EXPIRE_TIME: &str = r#"
|
||||
WITH prev AS (
|
||||
SELECT k, v FROM greptime_metakv WHERE k = $1
|
||||
), insert AS (
|
||||
INSERT INTO greptime_metakv
|
||||
VALUES($1, $2 || $3 || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $4, 'YYYY-MM-DD HH24:MI:SS.MS'))
|
||||
ON CONFLICT (k) DO NOTHING
|
||||
)
|
||||
|
||||
SELECT k, v FROM prev;
|
||||
"#;
|
||||
|
||||
// SQL to update a value with expire time. Parameters: key, prev_value_with_lease, updated_value, LEASE_SEP, expire_time
|
||||
const CAS_WITH_EXPIRE_TIME: &str = r#"
|
||||
UPDATE greptime_metakv
|
||||
SET k=$1,
|
||||
v=$3 || $4 || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $5, 'YYYY-MM-DD HH24:MI:SS.MS')
|
||||
WHERE
|
||||
k=$1 AND v=$2
|
||||
"#;
|
||||
|
||||
const GET_WITH_CURRENT_TIMESTAMP: &str = r#"SELECT v, TO_CHAR(CURRENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS.MS') FROM greptime_metakv WHERE k = $1"#;
|
||||
|
||||
const PREFIX_GET_WITH_CURRENT_TIMESTAMP: &str = r#"SELECT v, TO_CHAR(CURRENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS.MS') FROM greptime_metakv WHERE k LIKE $1"#;
|
||||
|
||||
const POINT_DELETE: &str = "DELETE FROM greptime_metakv WHERE k = $1 RETURNING k,v;";
|
||||
|
||||
/// Parse the value and expire time from the given string. The value should be in the format "value || LEASE_SEP || expire_time".
|
||||
fn parse_value_and_expire_time(value: &str) -> Result<(String, Timestamp)> {
|
||||
let (value, expire_time) = value
|
||||
.split(LEASE_SEP)
|
||||
.collect_tuple()
|
||||
.context(UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Invalid value {}, expect node info || {} || expire time",
|
||||
value, LEASE_SEP
|
||||
),
|
||||
})?;
|
||||
// Given expire_time is in the format 'YYYY-MM-DD HH24:MI:SS.MS'
|
||||
let expire_time = match Timestamp::from_str(expire_time, None) {
|
||||
Ok(ts) => ts,
|
||||
Err(_) => UnexpectedSnafu {
|
||||
violated: format!("Invalid timestamp: {}", expire_time),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
Ok((value.to_string(), expire_time))
|
||||
}
|
||||
|
||||
/// PostgreSql implementation of Election.
|
||||
/// TODO(CookiePie): Currently only support candidate registration. Add election logic.
|
||||
pub struct PgElection {
|
||||
leader_value: String,
|
||||
client: Client,
|
||||
is_leader: AtomicBool,
|
||||
leader_infancy: AtomicBool,
|
||||
leader_watcher: broadcast::Sender<LeaderChangeMessage>,
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
}
|
||||
|
||||
impl PgElection {
|
||||
pub async fn with_pg_client(
|
||||
leader_value: String,
|
||||
client: Client,
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
) -> Result<ElectionRef> {
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
Ok(Arc::new(Self {
|
||||
leader_value,
|
||||
client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(false),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix,
|
||||
candidate_lease_ttl_secs,
|
||||
}))
|
||||
}
|
||||
|
||||
fn _election_key(&self) -> String {
|
||||
format!("{}{}", self.store_key_prefix, ELECTION_KEY)
|
||||
}
|
||||
|
||||
fn candidate_root(&self) -> String {
|
||||
format!("{}{}", self.store_key_prefix, CANDIDATES_ROOT)
|
||||
}
|
||||
|
||||
fn candidate_key(&self) -> String {
|
||||
format!("{}{}", self.candidate_root(), self.leader_value)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Election for PgElection {
|
||||
type Leader = LeaderValue;
|
||||
|
||||
fn is_leader(&self) -> bool {
|
||||
self.is_leader.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn in_leader_infancy(&self) -> bool {
|
||||
self.leader_infancy
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// TODO(CookiePie): Split the candidate registration and keep alive logic into separate methods, so that upper layers can call them separately.
|
||||
async fn register_candidate(&self, node_info: &MetasrvNodeInfo) -> Result<()> {
|
||||
let key = self.candidate_key();
|
||||
let node_info =
|
||||
serde_json::to_string(node_info).with_context(|_| SerializeToJsonSnafu {
|
||||
input: format!("{node_info:?}"),
|
||||
})?;
|
||||
let res = self.put_value_with_lease(&key, &node_info).await?;
|
||||
// May registered before, just update the lease.
|
||||
if !res {
|
||||
self.delete_value(&key).await?;
|
||||
self.put_value_with_lease(&key, &node_info).await?;
|
||||
}
|
||||
|
||||
// Check if the current lease has expired and renew the lease.
|
||||
let mut keep_alive_interval =
|
||||
tokio::time::interval(Duration::from_secs(self.candidate_lease_ttl_secs / 2));
|
||||
loop {
|
||||
let _ = keep_alive_interval.tick().await;
|
||||
|
||||
let (_, prev_expire_time, current_time, origin) = self
|
||||
.get_value_with_lease(&key, true)
|
||||
.await?
|
||||
.unwrap_or_default();
|
||||
|
||||
ensure!(
|
||||
prev_expire_time > current_time,
|
||||
UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Candidate lease expired, key: {:?}",
|
||||
String::from_utf8_lossy(&key.into_bytes())
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
// Safety: origin is Some since we are using `get_value_with_lease` with `true`.
|
||||
let origin = origin.unwrap();
|
||||
self.update_value_with_lease(&key, &origin, &node_info)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
async fn all_candidates(&self) -> Result<Vec<MetasrvNodeInfo>> {
|
||||
let key_prefix = self.candidate_root();
|
||||
let (mut candidates, current) = self.get_value_with_lease_by_prefix(&key_prefix).await?;
|
||||
// Remove expired candidates
|
||||
candidates.retain(|c| c.1 > current);
|
||||
let mut valid_candidates = Vec::with_capacity(candidates.len());
|
||||
for (c, _) in candidates {
|
||||
let node_info: MetasrvNodeInfo =
|
||||
serde_json::from_str(&c).with_context(|_| DeserializeFromJsonSnafu {
|
||||
input: format!("{:?}", c),
|
||||
})?;
|
||||
valid_candidates.push(node_info);
|
||||
}
|
||||
Ok(valid_candidates)
|
||||
}
|
||||
|
||||
async fn campaign(&self) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn leader(&self) -> Result<Self::Leader> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn resign(&self) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn subscribe_leader_change(&self) -> broadcast::Receiver<LeaderChangeMessage> {
|
||||
self.leader_watcher.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
impl PgElection {
|
||||
/// Returns value, expire time and current time. If `with_origin` is true, the origin string is also returned.
|
||||
async fn get_value_with_lease(
|
||||
&self,
|
||||
key: &String,
|
||||
with_origin: bool,
|
||||
) -> Result<Option<(String, Timestamp, Timestamp, Option<String>)>> {
|
||||
let res = self
|
||||
.client
|
||||
.query(GET_WITH_CURRENT_TIMESTAMP, &[&key])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
|
||||
if res.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
// Safety: Checked if res is empty above.
|
||||
let current_time_str = res[0].get(1);
|
||||
let current_time = match Timestamp::from_str(current_time_str, None) {
|
||||
Ok(ts) => ts,
|
||||
Err(_) => UnexpectedSnafu {
|
||||
violated: format!("Invalid timestamp: {}", current_time_str),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
// Safety: Checked if res is empty above.
|
||||
let value_and_expire_time = res[0].get(0);
|
||||
let (value, expire_time) = parse_value_and_expire_time(value_and_expire_time)?;
|
||||
|
||||
if with_origin {
|
||||
Ok(Some((
|
||||
value,
|
||||
expire_time,
|
||||
current_time,
|
||||
Some(value_and_expire_time.to_string()),
|
||||
)))
|
||||
} else {
|
||||
Ok(Some((value, expire_time, current_time, None)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns all values and expire time with the given key prefix. Also returns the current time.
|
||||
async fn get_value_with_lease_by_prefix(
|
||||
&self,
|
||||
key_prefix: &str,
|
||||
) -> Result<(Vec<(String, Timestamp)>, Timestamp)> {
|
||||
let key_prefix = format!("{}%", key_prefix);
|
||||
let res = self
|
||||
.client
|
||||
.query(PREFIX_GET_WITH_CURRENT_TIMESTAMP, &[&key_prefix])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
|
||||
let mut values_with_leases = vec![];
|
||||
let mut current = Timestamp::default();
|
||||
for row in res {
|
||||
let current_time_str = row.get(1);
|
||||
current = match Timestamp::from_str(current_time_str, None) {
|
||||
Ok(ts) => ts,
|
||||
Err(_) => UnexpectedSnafu {
|
||||
violated: format!("Invalid timestamp: {}", current_time_str),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
let value_and_expire_time = row.get(0);
|
||||
let (value, expire_time) = parse_value_and_expire_time(value_and_expire_time)?;
|
||||
|
||||
values_with_leases.push((value, expire_time));
|
||||
}
|
||||
Ok((values_with_leases, current))
|
||||
}
|
||||
|
||||
async fn update_value_with_lease(&self, key: &str, prev: &str, updated: &str) -> Result<()> {
|
||||
let res = self
|
||||
.client
|
||||
.execute(
|
||||
CAS_WITH_EXPIRE_TIME,
|
||||
&[
|
||||
&key,
|
||||
&prev,
|
||||
&updated,
|
||||
&LEASE_SEP,
|
||||
&(self.candidate_lease_ttl_secs as f64),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
|
||||
ensure!(
|
||||
res == 1,
|
||||
UnexpectedSnafu {
|
||||
violated: format!("Failed to update key: {}", key),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` if the insertion is successful
|
||||
async fn put_value_with_lease(&self, key: &str, value: &str) -> Result<bool> {
|
||||
let res = self
|
||||
.client
|
||||
.query(
|
||||
PUT_IF_NOT_EXISTS_WITH_EXPIRE_TIME,
|
||||
&[
|
||||
&key,
|
||||
&value,
|
||||
&LEASE_SEP,
|
||||
&(self.candidate_lease_ttl_secs as f64),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
Ok(res.is_empty())
|
||||
}
|
||||
|
||||
/// Returns `true` if the deletion is successful.
|
||||
/// Caution: Should only delete the key if the lease is expired.
|
||||
async fn delete_value(&self, key: &String) -> Result<bool> {
|
||||
let res = self
|
||||
.client
|
||||
.query(POINT_DELETE, &[&key])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
|
||||
Ok(res.len() == 1)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
|
||||
use tokio_postgres::{Client, NoTls};
|
||||
|
||||
use super::*;
|
||||
use crate::error::PostgresExecutionSnafu;
|
||||
|
||||
async fn create_postgres_client() -> Result<Client> {
|
||||
let endpoint = env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
|
||||
if endpoint.is_empty() {
|
||||
return UnexpectedSnafu {
|
||||
violated: "Postgres endpoint is empty".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let (client, connection) = tokio_postgres::connect(&endpoint, NoTls)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
tokio::spawn(async move {
|
||||
connection.await.context(PostgresExecutionSnafu).unwrap();
|
||||
});
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_postgres_crud() {
|
||||
let client = create_postgres_client().await.unwrap();
|
||||
|
||||
let key = "test_key".to_string();
|
||||
let value = "test_value".to_string();
|
||||
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let pg_election = PgElection {
|
||||
leader_value: "test_leader".to_string(),
|
||||
client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs: 10,
|
||||
};
|
||||
|
||||
let res = pg_election
|
||||
.put_value_with_lease(&key, &value)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res);
|
||||
|
||||
let (value, _, _, prev) = pg_election
|
||||
.get_value_with_lease(&key, true)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(value, value);
|
||||
|
||||
let prev = prev.unwrap();
|
||||
pg_election
|
||||
.update_value_with_lease(&key, &prev, &value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let res = pg_election.delete_value(&key).await.unwrap();
|
||||
assert!(res);
|
||||
|
||||
let res = pg_election.get_value_with_lease(&key, false).await.unwrap();
|
||||
assert!(res.is_none());
|
||||
|
||||
for i in 0..10 {
|
||||
let key = format!("test_key_{}", i);
|
||||
let value = format!("test_value_{}", i);
|
||||
pg_election
|
||||
.put_value_with_lease(&key, &value)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let key_prefix = "test_key".to_string();
|
||||
let (res, _) = pg_election
|
||||
.get_value_with_lease_by_prefix(&key_prefix)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(res.len(), 10);
|
||||
|
||||
for i in 0..10 {
|
||||
let key = format!("test_key_{}", i);
|
||||
let res = pg_election.delete_value(&key).await.unwrap();
|
||||
assert!(res);
|
||||
}
|
||||
|
||||
let (res, current) = pg_election
|
||||
.get_value_with_lease_by_prefix(&key_prefix)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.is_empty());
|
||||
assert!(current == Timestamp::default());
|
||||
}
|
||||
|
||||
async fn candidate(leader_value: String, candidate_lease_ttl_secs: u64) {
|
||||
let client = create_postgres_client().await.unwrap();
|
||||
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let pg_election = PgElection {
|
||||
leader_value,
|
||||
client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs,
|
||||
};
|
||||
|
||||
let node_info = MetasrvNodeInfo {
|
||||
addr: "test_addr".to_string(),
|
||||
version: "test_version".to_string(),
|
||||
git_commit: "test_git_commit".to_string(),
|
||||
start_time_ms: 0,
|
||||
};
|
||||
pg_election.register_candidate(&node_info).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_candidate_registration() {
|
||||
let leader_value_prefix = "test_leader".to_string();
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let mut handles = vec![];
|
||||
for i in 0..10 {
|
||||
let leader_value = format!("{}{}", leader_value_prefix, i);
|
||||
let handle = tokio::spawn(candidate(leader_value, candidate_lease_ttl_secs));
|
||||
handles.push(handle);
|
||||
}
|
||||
// Wait for candidates to registrate themselves and renew their leases at least once.
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
|
||||
let client = create_postgres_client().await.unwrap();
|
||||
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let leader_value = "test_leader".to_string();
|
||||
let pg_election = PgElection {
|
||||
leader_value,
|
||||
client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs,
|
||||
};
|
||||
|
||||
let candidates = pg_election.all_candidates().await.unwrap();
|
||||
assert_eq!(candidates.len(), 10);
|
||||
|
||||
for handle in handles {
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
// Wait for the candidate leases to expire.
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
let candidates = pg_election.all_candidates().await.unwrap();
|
||||
assert!(candidates.is_empty());
|
||||
|
||||
// Garbage collection
|
||||
for i in 0..10 {
|
||||
let key = format!(
|
||||
"{}{}{}{}",
|
||||
"test_prefix", CANDIDATES_ROOT, leader_value_prefix, i
|
||||
);
|
||||
let res = pg_election.delete_value(&key).await.unwrap();
|
||||
assert!(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -697,6 +697,8 @@ pub enum Error {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to execute via postgres"))]
|
||||
PostgresExecution {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -36,7 +36,7 @@ impl HeartbeatHandler for OnLeaderStartHandler {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
if election.in_infancy() {
|
||||
if election.in_leader_infancy() {
|
||||
ctx.is_infancy = true;
|
||||
// TODO(weny): Unifies the multiple leader state between Context and Metasrv.
|
||||
// we can't ensure the in-memory kv has already been reset in the outside loop.
|
||||
|
||||
@@ -19,10 +19,11 @@ use object_store::util::{join_dir, with_instrument_layers};
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::storage::SequenceNumber;
|
||||
|
||||
use crate::cache::write_cache::SstUploadRequest;
|
||||
use crate::cache::CacheManagerRef;
|
||||
use crate::config::{FulltextIndexConfig, InvertedIndexConfig};
|
||||
use crate::config::{BloomFilterConfig, FulltextIndexConfig, InvertedIndexConfig};
|
||||
use crate::error::{CleanDirSnafu, DeleteIndexSnafu, DeleteSstSnafu, OpenDalSnafu, Result};
|
||||
use crate::read::Source;
|
||||
use crate::region::options::IndexOptions;
|
||||
@@ -154,6 +155,7 @@ impl AccessLayer {
|
||||
index_options: request.index_options,
|
||||
inverted_index_config: request.inverted_index_config,
|
||||
fulltext_index_config: request.fulltext_index_config,
|
||||
bloom_filter_index_config: request.bloom_filter_index_config,
|
||||
}
|
||||
.build()
|
||||
.await;
|
||||
@@ -163,7 +165,9 @@ impl AccessLayer {
|
||||
request.metadata,
|
||||
indexer,
|
||||
);
|
||||
writer.write_all(request.source, write_opts).await?
|
||||
writer
|
||||
.write_all(request.source, request.max_sequence, write_opts)
|
||||
.await?
|
||||
};
|
||||
|
||||
// Put parquet metadata to cache manager.
|
||||
@@ -193,11 +197,13 @@ pub(crate) struct SstWriteRequest {
|
||||
pub(crate) cache_manager: CacheManagerRef,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) storage: Option<String>,
|
||||
pub(crate) max_sequence: Option<SequenceNumber>,
|
||||
|
||||
/// Configs for index
|
||||
pub(crate) index_options: IndexOptions,
|
||||
pub(crate) inverted_index_config: InvertedIndexConfig,
|
||||
pub(crate) fulltext_index_config: FulltextIndexConfig,
|
||||
pub(crate) bloom_filter_index_config: BloomFilterConfig,
|
||||
}
|
||||
|
||||
pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> {
|
||||
|
||||
@@ -28,6 +28,7 @@ use std::sync::Arc;
|
||||
use bytes::Bytes;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use index::bloom_filter_index::{BloomFilterIndexCache, BloomFilterIndexCacheRef};
|
||||
use moka::notification::RemovalCause;
|
||||
use moka::sync::Cache;
|
||||
use parquet::column::page::Page;
|
||||
@@ -54,6 +55,195 @@ const FILE_TYPE: &str = "file";
|
||||
/// Metrics type key for selector result cache.
|
||||
const SELECTOR_RESULT_TYPE: &str = "selector_result";
|
||||
|
||||
/// Cache strategies that may only enable a subset of caches.
|
||||
#[derive(Clone)]
|
||||
pub enum CacheStrategy {
|
||||
/// Strategy for normal operations.
|
||||
/// Doesn't disable any cache.
|
||||
EnableAll(CacheManagerRef),
|
||||
/// Strategy for compaction.
|
||||
/// Disables some caches during compaction to avoid affecting queries.
|
||||
/// Enables the write cache so that the compaction can read files cached
|
||||
/// in the write cache and write the compacted files back to the write cache.
|
||||
Compaction(CacheManagerRef),
|
||||
/// Do not use any cache.
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl CacheStrategy {
|
||||
/// Calls [CacheManager::get_parquet_meta_data()].
|
||||
pub async fn get_parquet_meta_data(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
file_id: FileId,
|
||||
) -> Option<Arc<ParquetMetaData>> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => {
|
||||
cache_manager
|
||||
.get_parquet_meta_data(region_id, file_id)
|
||||
.await
|
||||
}
|
||||
CacheStrategy::Compaction(cache_manager) => {
|
||||
cache_manager
|
||||
.get_parquet_meta_data(region_id, file_id)
|
||||
.await
|
||||
}
|
||||
CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::get_parquet_meta_data_from_mem_cache()].
|
||||
pub fn get_parquet_meta_data_from_mem_cache(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
file_id: FileId,
|
||||
) -> Option<Arc<ParquetMetaData>> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => {
|
||||
cache_manager.get_parquet_meta_data_from_mem_cache(region_id, file_id)
|
||||
}
|
||||
CacheStrategy::Compaction(cache_manager) => {
|
||||
cache_manager.get_parquet_meta_data_from_mem_cache(region_id, file_id)
|
||||
}
|
||||
CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::put_parquet_meta_data()].
|
||||
pub fn put_parquet_meta_data(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
file_id: FileId,
|
||||
metadata: Arc<ParquetMetaData>,
|
||||
) {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => {
|
||||
cache_manager.put_parquet_meta_data(region_id, file_id, metadata);
|
||||
}
|
||||
CacheStrategy::Compaction(cache_manager) => {
|
||||
cache_manager.put_parquet_meta_data(region_id, file_id, metadata);
|
||||
}
|
||||
CacheStrategy::Disabled => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::remove_parquet_meta_data()].
|
||||
pub fn remove_parquet_meta_data(&self, region_id: RegionId, file_id: FileId) {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => {
|
||||
cache_manager.remove_parquet_meta_data(region_id, file_id);
|
||||
}
|
||||
CacheStrategy::Compaction(cache_manager) => {
|
||||
cache_manager.remove_parquet_meta_data(region_id, file_id);
|
||||
}
|
||||
CacheStrategy::Disabled => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::get_repeated_vector()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Compaction] or [CacheStrategy::Disabled].
|
||||
pub fn get_repeated_vector(
|
||||
&self,
|
||||
data_type: &ConcreteDataType,
|
||||
value: &Value,
|
||||
) -> Option<VectorRef> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => {
|
||||
cache_manager.get_repeated_vector(data_type, value)
|
||||
}
|
||||
CacheStrategy::Compaction(_) | CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::put_repeated_vector()].
|
||||
/// It does nothing if the strategy isn't [CacheStrategy::EnableAll].
|
||||
pub fn put_repeated_vector(&self, value: Value, vector: VectorRef) {
|
||||
if let CacheStrategy::EnableAll(cache_manager) = self {
|
||||
cache_manager.put_repeated_vector(value, vector);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::get_pages()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Compaction] or [CacheStrategy::Disabled].
|
||||
pub fn get_pages(&self, page_key: &PageKey) -> Option<Arc<PageValue>> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => cache_manager.get_pages(page_key),
|
||||
CacheStrategy::Compaction(_) | CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::put_pages()].
|
||||
/// It does nothing if the strategy isn't [CacheStrategy::EnableAll].
|
||||
pub fn put_pages(&self, page_key: PageKey, pages: Arc<PageValue>) {
|
||||
if let CacheStrategy::EnableAll(cache_manager) = self {
|
||||
cache_manager.put_pages(page_key, pages);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::get_selector_result()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Compaction] or [CacheStrategy::Disabled].
|
||||
pub fn get_selector_result(
|
||||
&self,
|
||||
selector_key: &SelectorResultKey,
|
||||
) -> Option<Arc<SelectorResultValue>> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => {
|
||||
cache_manager.get_selector_result(selector_key)
|
||||
}
|
||||
CacheStrategy::Compaction(_) | CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::put_selector_result()].
|
||||
/// It does nothing if the strategy isn't [CacheStrategy::EnableAll].
|
||||
pub fn put_selector_result(
|
||||
&self,
|
||||
selector_key: SelectorResultKey,
|
||||
result: Arc<SelectorResultValue>,
|
||||
) {
|
||||
if let CacheStrategy::EnableAll(cache_manager) = self {
|
||||
cache_manager.put_selector_result(selector_key, result);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::write_cache()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Disabled].
|
||||
pub fn write_cache(&self) -> Option<&WriteCacheRef> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => cache_manager.write_cache(),
|
||||
CacheStrategy::Compaction(cache_manager) => cache_manager.write_cache(),
|
||||
CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::index_cache()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Compaction] or [CacheStrategy::Disabled].
|
||||
pub fn index_cache(&self) -> Option<&InvertedIndexCacheRef> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => cache_manager.index_cache(),
|
||||
CacheStrategy::Compaction(_) | CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::bloom_filter_index_cache()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Compaction] or [CacheStrategy::Disabled].
|
||||
pub fn bloom_filter_index_cache(&self) -> Option<&BloomFilterIndexCacheRef> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => cache_manager.bloom_filter_index_cache(),
|
||||
CacheStrategy::Compaction(_) | CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls [CacheManager::puffin_metadata_cache()].
|
||||
/// It returns None if the strategy is [CacheStrategy::Compaction] or [CacheStrategy::Disabled].
|
||||
pub fn puffin_metadata_cache(&self) -> Option<&PuffinMetadataCacheRef> {
|
||||
match self {
|
||||
CacheStrategy::EnableAll(cache_manager) => cache_manager.puffin_metadata_cache(),
|
||||
CacheStrategy::Compaction(_) | CacheStrategy::Disabled => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Manages cached data for the engine.
|
||||
///
|
||||
/// All caches are disabled by default.
|
||||
@@ -69,6 +259,8 @@ pub struct CacheManager {
|
||||
write_cache: Option<WriteCacheRef>,
|
||||
/// Cache for inverted index.
|
||||
index_cache: Option<InvertedIndexCacheRef>,
|
||||
/// Cache for bloom filter index.
|
||||
bloom_filter_index_cache: Option<BloomFilterIndexCacheRef>,
|
||||
/// Puffin metadata cache.
|
||||
puffin_metadata_cache: Option<PuffinMetadataCacheRef>,
|
||||
/// Cache for time series selectors.
|
||||
@@ -221,6 +413,10 @@ impl CacheManager {
|
||||
self.index_cache.as_ref()
|
||||
}
|
||||
|
||||
pub(crate) fn bloom_filter_index_cache(&self) -> Option<&BloomFilterIndexCacheRef> {
|
||||
self.bloom_filter_index_cache.as_ref()
|
||||
}
|
||||
|
||||
pub(crate) fn puffin_metadata_cache(&self) -> Option<&PuffinMetadataCacheRef> {
|
||||
self.puffin_metadata_cache.as_ref()
|
||||
}
|
||||
@@ -364,6 +560,12 @@ impl CacheManagerBuilder {
|
||||
self.index_content_size,
|
||||
self.index_content_page_size,
|
||||
);
|
||||
// TODO(ruihang): check if it's ok to reuse the same param with inverted index
|
||||
let bloom_filter_index_cache = BloomFilterIndexCache::new(
|
||||
self.index_metadata_size,
|
||||
self.index_content_size,
|
||||
self.index_content_page_size,
|
||||
);
|
||||
let puffin_metadata_cache =
|
||||
PuffinMetadataCache::new(self.puffin_metadata_size, &CACHE_BYTES);
|
||||
let selector_result_cache = (self.selector_result_cache_size != 0).then(|| {
|
||||
@@ -387,6 +589,7 @@ impl CacheManagerBuilder {
|
||||
page_cache,
|
||||
write_cache: self.write_cache,
|
||||
index_cache: Some(Arc::new(inverted_index_cache)),
|
||||
bloom_filter_index_cache: Some(Arc::new(bloom_filter_index_cache)),
|
||||
puffin_metadata_cache: Some(Arc::new(puffin_metadata_cache)),
|
||||
selector_result_cache,
|
||||
}
|
||||
|
||||
6
src/mito2/src/cache/file_cache.rs
vendored
6
src/mito2/src/cache/file_cache.rs
vendored
@@ -37,8 +37,10 @@ use crate::sst::file::FileId;
|
||||
use crate::sst::parquet::helper::fetch_byte_ranges;
|
||||
use crate::sst::parquet::metadata::MetadataLoader;
|
||||
|
||||
/// Subdirectory of cached files.
|
||||
const FILE_DIR: &str = "files/";
|
||||
/// Subdirectory of cached files for write.
|
||||
///
|
||||
/// This must contain three layers, corresponding to [`build_prometheus_metrics_layer`](object_store::layers::build_prometheus_metrics_layer).
|
||||
const FILE_DIR: &str = "cache/object/write/";
|
||||
|
||||
/// A file cache manages files on local store and evict files based
|
||||
/// on size.
|
||||
|
||||
1
src/mito2/src/cache/index.rs
vendored
1
src/mito2/src/cache/index.rs
vendored
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod bloom_filter_index;
|
||||
pub mod inverted_index;
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
167
src/mito2/src/cache/index/bloom_filter_index.rs
vendored
Normal file
167
src/mito2/src/cache/index/bloom_filter_index.rs
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use index::bloom_filter::error::Result;
|
||||
use index::bloom_filter::reader::BloomFilterReader;
|
||||
use index::bloom_filter::BloomFilterMeta;
|
||||
use store_api::storage::ColumnId;
|
||||
|
||||
use crate::cache::index::{IndexCache, PageKey, INDEX_METADATA_TYPE};
|
||||
use crate::metrics::{CACHE_HIT, CACHE_MISS};
|
||||
use crate::sst::file::FileId;
|
||||
|
||||
const INDEX_TYPE_BLOOM_FILTER_INDEX: &str = "bloom_filter_index";
|
||||
|
||||
/// Cache for bloom filter index.
|
||||
pub type BloomFilterIndexCache = IndexCache<(FileId, ColumnId), BloomFilterMeta>;
|
||||
pub type BloomFilterIndexCacheRef = Arc<BloomFilterIndexCache>;
|
||||
|
||||
impl BloomFilterIndexCache {
|
||||
/// Creates a new bloom filter index cache.
|
||||
pub fn new(index_metadata_cap: u64, index_content_cap: u64, page_size: u64) -> Self {
|
||||
Self::new_with_weighter(
|
||||
index_metadata_cap,
|
||||
index_content_cap,
|
||||
page_size,
|
||||
INDEX_TYPE_BLOOM_FILTER_INDEX,
|
||||
bloom_filter_index_metadata_weight,
|
||||
bloom_filter_index_content_weight,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates weight for bloom filter index metadata.
|
||||
fn bloom_filter_index_metadata_weight(k: &(FileId, ColumnId), _: &Arc<BloomFilterMeta>) -> u32 {
|
||||
(k.0.as_bytes().len()
|
||||
+ std::mem::size_of::<ColumnId>()
|
||||
+ std::mem::size_of::<BloomFilterMeta>()) as u32
|
||||
}
|
||||
|
||||
/// Calculates weight for bloom filter index content.
|
||||
fn bloom_filter_index_content_weight((k, _): &((FileId, ColumnId), PageKey), v: &Bytes) -> u32 {
|
||||
(k.0.as_bytes().len() + std::mem::size_of::<ColumnId>() + v.len()) as u32
|
||||
}
|
||||
|
||||
/// Bloom filter index blob reader with cache.
|
||||
pub struct CachedBloomFilterIndexBlobReader<R> {
|
||||
file_id: FileId,
|
||||
column_id: ColumnId,
|
||||
blob_size: u64,
|
||||
inner: R,
|
||||
cache: BloomFilterIndexCacheRef,
|
||||
}
|
||||
|
||||
impl<R> CachedBloomFilterIndexBlobReader<R> {
|
||||
/// Creates a new bloom filter index blob reader with cache.
|
||||
pub fn new(
|
||||
file_id: FileId,
|
||||
column_id: ColumnId,
|
||||
blob_size: u64,
|
||||
inner: R,
|
||||
cache: BloomFilterIndexCacheRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
file_id,
|
||||
column_id,
|
||||
blob_size,
|
||||
inner,
|
||||
cache,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<R: BloomFilterReader + Send> BloomFilterReader for CachedBloomFilterIndexBlobReader<R> {
|
||||
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes> {
|
||||
let inner = &mut self.inner;
|
||||
self.cache
|
||||
.get_or_load(
|
||||
(self.file_id, self.column_id),
|
||||
self.blob_size,
|
||||
offset,
|
||||
size,
|
||||
move |ranges| async move { inner.read_vec(&ranges).await },
|
||||
)
|
||||
.await
|
||||
.map(|b| b.into())
|
||||
}
|
||||
|
||||
/// Reads the meta information of the bloom filter.
|
||||
async fn metadata(&mut self) -> Result<BloomFilterMeta> {
|
||||
if let Some(cached) = self.cache.get_metadata((self.file_id, self.column_id)) {
|
||||
CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
|
||||
Ok((*cached).clone())
|
||||
} else {
|
||||
let meta = self.inner.metadata().await?;
|
||||
self.cache
|
||||
.put_metadata((self.file_id, self.column_id), Arc::new(meta.clone()));
|
||||
CACHE_MISS.with_label_values(&[INDEX_METADATA_TYPE]).inc();
|
||||
Ok(meta)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rand::{Rng, RngCore};
|
||||
|
||||
use super::*;
|
||||
|
||||
const FUZZ_REPEAT_TIMES: usize = 100;
|
||||
|
||||
#[test]
|
||||
fn fuzz_index_calculation() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut data = vec![0u8; 1024 * 1024];
|
||||
rng.fill_bytes(&mut data);
|
||||
|
||||
for _ in 0..FUZZ_REPEAT_TIMES {
|
||||
let offset = rng.gen_range(0..data.len() as u64);
|
||||
let size = rng.gen_range(0..data.len() as u32 - offset as u32);
|
||||
let page_size: usize = rng.gen_range(1..1024);
|
||||
|
||||
let indexes =
|
||||
PageKey::generate_page_keys(offset, size, page_size as u64).collect::<Vec<_>>();
|
||||
let page_num = indexes.len();
|
||||
let mut read = Vec::with_capacity(size as usize);
|
||||
for key in indexes.into_iter() {
|
||||
let start = key.page_id as usize * page_size;
|
||||
let page = if start + page_size < data.len() {
|
||||
&data[start..start + page_size]
|
||||
} else {
|
||||
&data[start..]
|
||||
};
|
||||
read.extend_from_slice(page);
|
||||
}
|
||||
let expected_range = offset as usize..(offset + size as u64 as u64) as usize;
|
||||
let read = read[PageKey::calculate_range(offset, size, page_size as u64)].to_vec();
|
||||
assert_eq!(
|
||||
read,
|
||||
data.get(expected_range).unwrap(),
|
||||
"fuzz_read_index failed, offset: {}, size: {}, page_size: {}\nread len: {}, expected len: {}\nrange: {:?}, page num: {}",
|
||||
offset,
|
||||
size,
|
||||
page_size,
|
||||
read.len(),
|
||||
size as usize,
|
||||
PageKey::calculate_range(offset, size, page_size as u64),
|
||||
page_num
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user