mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 05:12:54 +00:00
Compare commits
115 Commits
v0.12.0-ni
...
v0.11.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fe735009c | ||
|
|
f0298afaf0 | ||
|
|
5175dea6b3 | ||
|
|
7caa88abc7 | ||
|
|
eafb01dfff | ||
|
|
b0de816d3d | ||
|
|
5c6161a95e | ||
|
|
5e3c5945c4 | ||
|
|
f6feac26f5 | ||
|
|
4b2c59e626 | ||
|
|
cf605ecccc | ||
|
|
ab3f9c42f1 | ||
|
|
258fc6f31b | ||
|
|
e2dccc1d1a | ||
|
|
78c5707642 | ||
|
|
204b5e474f | ||
|
|
e9f1fa0b7d | ||
|
|
a988ff5acf | ||
|
|
ef0fca9388 | ||
|
|
b704e7f703 | ||
|
|
3a4c636e29 | ||
|
|
a22e8b421c | ||
|
|
5b42546204 | ||
|
|
0678a31ab1 | ||
|
|
589cc84048 | ||
|
|
ed8c072a5e | ||
|
|
9d172f1cae | ||
|
|
236888313d | ||
|
|
0b97ef0e4f | ||
|
|
316e6a83eb | ||
|
|
6dc57b7a6c | ||
|
|
1f5c2b32e5 | ||
|
|
01e907be40 | ||
|
|
e4dc5ea243 | ||
|
|
3ff5754b5a | ||
|
|
c22ca3ebd5 | ||
|
|
327d165ad9 | ||
|
|
fe63a620ef | ||
|
|
be81f0db5a | ||
|
|
6ca7a305ae | ||
|
|
1111a8bd57 | ||
|
|
66b21b29b5 | ||
|
|
31cfab81ad | ||
|
|
dd3a509607 | ||
|
|
d4cae6af1e | ||
|
|
3fec71b5c0 | ||
|
|
9e31a6478b | ||
|
|
bce291a8e1 | ||
|
|
c788eb67e2 | ||
|
|
0c32dcf46c | ||
|
|
68a05b38bd | ||
|
|
ee72ae8bd0 | ||
|
|
556bd796d8 | ||
|
|
1327e8809f | ||
|
|
17d75c767c | ||
|
|
a1ed450c0c | ||
|
|
ea4ce9d1e3 | ||
|
|
1f7d9666b7 | ||
|
|
9f1a0d78b2 | ||
|
|
ed8e418716 | ||
|
|
9e7121c1bb | ||
|
|
94a49ed4f0 | ||
|
|
f5e743379f | ||
|
|
6735e5867e | ||
|
|
925525726b | ||
|
|
6427682a9a | ||
|
|
55b0022676 | ||
|
|
2d84cc8d87 | ||
|
|
c030705b17 | ||
|
|
443c600bd0 | ||
|
|
39cadfe10b | ||
|
|
9b5e4e80f7 | ||
|
|
041a276b66 | ||
|
|
614a25ddc5 | ||
|
|
4337e20010 | ||
|
|
65c52cc698 | ||
|
|
50f31fd681 | ||
|
|
b5af5aaf8d | ||
|
|
27693c7f1e | ||
|
|
a59fef9ffb | ||
|
|
bcecd8ce52 | ||
|
|
ffdcb8c1ac | ||
|
|
554121ad79 | ||
|
|
43c12b4f2c | ||
|
|
7aa8c28fe4 | ||
|
|
34fbe7739e | ||
|
|
06d7bd99dd | ||
|
|
b71d842615 | ||
|
|
7f71693b8e | ||
|
|
615ea1a171 | ||
|
|
4e725d259d | ||
|
|
dc2252eb6d | ||
|
|
6d4cc2e070 | ||
|
|
6066ce2c4a | ||
|
|
b90d8f7dbd | ||
|
|
fdccf4ff84 | ||
|
|
8b1484c064 | ||
|
|
576e20ac78 | ||
|
|
10b3e3da0f | ||
|
|
4a3ef2d718 | ||
|
|
65eabb2a05 | ||
|
|
bc5a57f51f | ||
|
|
f24b9d8814 | ||
|
|
dd4d0a88ce | ||
|
|
3d2096fe9d | ||
|
|
35715bb710 | ||
|
|
08a3befa67 | ||
|
|
ca1758d4e7 | ||
|
|
42bf818167 | ||
|
|
2c9b117224 | ||
|
|
3edf2317e1 | ||
|
|
85d72a3cd0 | ||
|
|
928172bd82 | ||
|
|
e9f5bddeff | ||
|
|
486755d795 |
@@ -53,7 +53,7 @@ runs:
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -71,7 +71,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -9,8 +9,8 @@ runs:
|
||||
steps:
|
||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||
# ${WORKING_DIR}
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# ...
|
||||
|
||||
6
.github/actions/upload-artifacts/action.yml
vendored
6
.github/actions/upload-artifacts/action.yml
vendored
@@ -30,9 +30,9 @@ runs:
|
||||
done
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-v0.3.0
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-pyo3-v0.3.0
|
||||
# └── greptime
|
||||
- name: Compress artifacts and calculate checksum
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
|
||||
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,11 +27,11 @@ function upload_artifacts() {
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
|
||||
2
.github/workflows/dev-build.yml
vendored
2
.github/workflows/dev-build.yml
vendored
@@ -29,7 +29,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
127
.github/workflows/develop.yml
vendored
127
.github/workflows/develop.yml
vendored
@@ -51,12 +51,12 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# # Shares with `Clippy` job
|
||||
# shared-key: "check-lint"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Clippy` job
|
||||
shared-key: "check-lint"
|
||||
- name: Run cargo check
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
@@ -67,11 +67,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# shared-key: "check-toml"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-toml"
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
- name: Run taplo
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -142,11 +142,11 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# shared-key: "fuzz-test-targets"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -200,11 +200,11 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# shared-key: "fuzz-test-targets"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -261,7 +261,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -317,11 +317,11 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# shared-key: "fuzz-test-targets"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -466,11 +466,11 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# shared-key: "fuzz-test-targets"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -573,16 +573,13 @@ jobs:
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "Pg Kvbackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait kafka
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -612,11 +609,11 @@ jobs:
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# # Shares across multiple jobs
|
||||
# shared-key: "check-rust-fmt"
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
|
||||
@@ -641,52 +638,52 @@ jobs:
|
||||
- name: Run cargo clippy
|
||||
run: make clippy
|
||||
|
||||
conflict-check:
|
||||
name: Check for conflict
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Merge Conflict Finder
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
needs: [clippy, fmt]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: llvm-tools
|
||||
cache: false
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
# Disabled temporarily to see performance
|
||||
# - name: Docker Cache
|
||||
# uses: ScribeMD/docker-cache@0.5.0
|
||||
# with:
|
||||
# key: docker-${{ runner.os }}-coverage
|
||||
- name: Docker Cache
|
||||
uses: ScribeMD/docker-cache@0.3.7
|
||||
with:
|
||||
key: docker-${{ runner.os }}-coverage
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup postgres server
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
|
||||
2
.github/workflows/nightly-build.yml
vendored
2
.github/workflows/nightly-build.yml
vendored
@@ -27,7 +27,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
46
.github/workflows/nightly-ci.yml
vendored
46
.github/workflows/nightly-ci.yml
vendored
@@ -108,53 +108,7 @@ jobs:
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
## this is designed for generating cache that usable for pull requests
|
||||
test-on-linux:
|
||||
name: Run tests on Linux
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
cleanbuild-linux-nix:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
NEXT_RELEASE_VERSION: v0.11.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
1576
Cargo.lock
generated
1576
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
22
Cargo.toml
22
Cargo.toml
@@ -55,6 +55,7 @@ members = [
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
@@ -67,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.12.0"
|
||||
version = "0.11.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -78,6 +79,8 @@ clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
@@ -96,7 +99,6 @@ arrow-schema = { version = "51.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
axum = { version = "0.6", features = ["headers"] }
|
||||
backon = "1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
@@ -116,8 +118,6 @@ datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
deadpool = "0.10"
|
||||
deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.13"
|
||||
@@ -132,7 +132,6 @@ humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
@@ -180,17 +179,15 @@ similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
|
||||
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # on branch v0.44.x
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||
tokio-stream = "0.1"
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
@@ -257,6 +254,7 @@ plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
puffin = { path = "src/puffin" }
|
||||
query = { path = "src/query" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
@@ -266,9 +264,9 @@ table = { path = "src/table" }
|
||||
|
||||
[patch.crates-io]
|
||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
|
||||
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
|
||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||
|
||||
@@ -138,8 +138,7 @@ Check the prerequisite:
|
||||
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||
* Python toolchain (optional): Required only if using some test scripts.
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
|
||||
Build GreptimeDB binary:
|
||||
|
||||
@@ -229,3 +228,4 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -132,10 +132,10 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
@@ -214,7 +214,7 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
@@ -293,11 +293,9 @@
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
@@ -380,7 +378,7 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
@@ -468,10 +466,10 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
|
||||
@@ -59,7 +59,7 @@ body_limit = "64MB"
|
||||
addr = "127.0.0.1:3001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1:3001"
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
@@ -475,18 +475,18 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
write_cache_size = "5GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -38,7 +38,7 @@ body_limit = "64MB"
|
||||
addr = "127.0.0.1:4001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1:4001"
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
|
||||
@@ -8,29 +8,13 @@ bind_addr = "127.0.0.1:3002"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||
## For etcd store, the format is:
|
||||
## "127.0.0.1:2379"
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## The datastore for meta server.
|
||||
## Available values:
|
||||
## - `etcd_store` (default value)
|
||||
## - `memory_store`
|
||||
## - `postgres_store`
|
||||
backend = "etcd_store"
|
||||
|
||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||
## **Only used when backend is `postgres_store`.**
|
||||
meta_table_name = "greptime_metakv"
|
||||
|
||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||
## Only used when backend is `postgres_store`.
|
||||
meta_election_lock_id = 1
|
||||
backend = "EtcdStore"
|
||||
|
||||
## Datanode selector type.
|
||||
## - `round_robin` (default value)
|
||||
|
||||
@@ -337,7 +337,7 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
@@ -518,18 +518,18 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
write_cache_size = "5GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
|
||||
@@ -7,8 +7,10 @@ ARG OUTPUT_DIR
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
|
||||
@@ -13,7 +13,12 @@ RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
|
||||
# Trust workdir
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
@@ -12,6 +12,8 @@ RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
@@ -21,7 +23,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
|
||||
@@ -6,8 +6,11 @@ ARG DOCKER_BUILD_ROOT=.
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
@@ -17,7 +20,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN echo "target platform: $TARGETPLATFORM"
|
||||
@@ -33,6 +38,21 @@ fi
|
||||
RUN mv protoc3/bin/* /usr/local/bin/
|
||||
RUN mv protoc3/include/* /usr/local/include/
|
||||
|
||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
||||
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
|
||||
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
|
||||
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
|
||||
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
|
||||
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
|
||||
|
||||
# Remove Python 3.8 and install pip.
|
||||
RUN apt-get -y purge python3.8 && \
|
||||
apt-get -y autoremove && \
|
||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||
|
||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||
@@ -45,6 +65,10 @@ RUN mv protoc3/include/* /usr/local/include/
|
||||
# it can be a different user that have prepared the submodules.
|
||||
RUN git config --global --add safe.directory '*'
|
||||
|
||||
# Install Python dependencies.
|
||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
|
||||
5
docker/python/requirements.txt
Normal file
5
docker/python/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
numpy>=1.24.2
|
||||
pandas>=1.5.3
|
||||
pyarrow>=11.0.0
|
||||
requests>=2.28.2
|
||||
scipy>=1.10.1
|
||||
@@ -5296,7 +5296,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_requests_total{pod=~\"$datanode\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
|
||||
"range": true,
|
||||
@@ -5392,7 +5392,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"read\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"read\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
|
||||
"range": true,
|
||||
@@ -5488,7 +5488,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\",operation=\"read\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\",operation=\"read\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-{{scheme}}-p99",
|
||||
"range": true,
|
||||
@@ -5584,7 +5584,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
|
||||
"range": true,
|
||||
@@ -5680,7 +5680,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-p99",
|
||||
"range": true,
|
||||
@@ -5776,7 +5776,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
|
||||
@@ -5873,7 +5873,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-p99",
|
||||
@@ -5970,7 +5970,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
|
||||
"range": true,
|
||||
@@ -6066,7 +6066,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation!~\"read|write|list\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation!~\"read|write|list\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-p99",
|
||||
"range": true,
|
||||
@@ -6298,6 +6298,6 @@
|
||||
"timezone": "",
|
||||
"title": "GreptimeDB Cluster Metrics",
|
||||
"uid": "ce3q6xwn3xa0wa",
|
||||
"version": 2,
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -122,6 +122,13 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
@@ -336,7 +343,9 @@ impl ErrorExt for Error {
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::Internal { source, .. } => source.status_code(),
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
|
||||
@@ -58,8 +58,6 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||
/// Fulltext index constraint name
|
||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||
/// Skipping index constraint name
|
||||
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
|
||||
|
||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||
@@ -227,12 +225,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
let keys = &table_info.meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
// For compatibility, use primary key columns as inverted index columns.
|
||||
let pk_as_inverted_index = !schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.any(|c| c.has_inverted_index_key());
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let mut constraints = vec![];
|
||||
if column.is_time_index() {
|
||||
@@ -250,20 +242,14 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
if keys.contains(&idx) {
|
||||
constraints.push(PRI_CONSTRAINT_NAME);
|
||||
|
||||
if pk_as_inverted_index {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
}
|
||||
if column.is_inverted_indexed() {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
if column.is_fulltext_indexed() {
|
||||
|
||||
if column.has_fulltext_index_key() {
|
||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
if column.is_skipping_indexed() {
|
||||
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
|
||||
if !constraints.is_empty() {
|
||||
let aggregated_constraints = constraints.join(", ");
|
||||
|
||||
@@ -4,9 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -59,6 +56,7 @@ tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -22,9 +22,6 @@ use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::postgres::PgStore;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_telemetry::info;
|
||||
@@ -58,34 +55,18 @@ where
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct BenchTableMetadataCommand {
|
||||
#[clap(long)]
|
||||
etcd_addr: Option<String>,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
etcd_addr: String,
|
||||
#[clap(long)]
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
|
||||
info!("Using etcd as kv backend");
|
||||
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
|
||||
} else {
|
||||
Arc::new(MemoryKvBackend::new())
|
||||
};
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
|
||||
info!("Using postgres as kv backend");
|
||||
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
|
||||
.await
|
||||
.unwrap()
|
||||
} else {
|
||||
kv_backend
|
||||
};
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
table_metadata_manager,
|
||||
|
||||
@@ -10,8 +10,9 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["servers/pprof", "servers/mem-prof"]
|
||||
default = ["python", "servers/pprof", "servers/mem-prof"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -62,11 +62,6 @@ impl Instance {
|
||||
pub fn datanode(&self) -> &Datanode {
|
||||
&self.datanode
|
||||
}
|
||||
|
||||
/// allow customizing datanode for downstream projects
|
||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||
&mut self.datanode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -276,8 +271,7 @@ impl StartCommand {
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
|
||||
@@ -66,11 +66,6 @@ impl Instance {
|
||||
pub fn flownode(&self) -> &FlownodeInstance {
|
||||
&self.flownode
|
||||
}
|
||||
|
||||
/// allow customizing flownode for downstream projects
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -222,8 +217,7 @@ impl StartCommand {
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let opts = opts.component;
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
@@ -268,8 +268,7 @@ impl StartCommand {
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
|
||||
@@ -249,6 +249,8 @@ impl StartCommand {
|
||||
|
||||
if let Some(backend) = &self.backend {
|
||||
opts.backend.clone_from(backend);
|
||||
} else {
|
||||
opts.backend = BackendImpl::default()
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
@@ -272,8 +274,7 @@ impl StartCommand {
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.detect_server_addr();
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
|
||||
@@ -461,8 +461,7 @@ impl StartCommand {
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let opts = opts.component;
|
||||
let fe_opts = opts.frontend_options();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ fn test_load_datanode_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
@@ -85,9 +85,7 @@ fn test_load_datanode_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default()
|
||||
.with_addr("127.0.0.1:3001")
|
||||
.with_hostname("127.0.0.1:3001"),
|
||||
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
|
||||
rpc_addr: Some("127.0.0.1:3001".to_string()),
|
||||
rpc_hostname: Some("127.0.0.1".to_string()),
|
||||
rpc_runtime_size: Some(8),
|
||||
@@ -139,7 +137,6 @@ fn test_load_frontend_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default().with_hostname("127.0.0.1:4001"),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -157,7 +154,6 @@ fn test_load_metasrv_example_config() {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
@@ -207,7 +203,7 @@ fn test_load_standalone_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
|
||||
@@ -4,9 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::io;
|
||||
use std::ops::Range;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
@@ -34,22 +33,19 @@ pub struct Metadata {
|
||||
pub content_length: u64,
|
||||
}
|
||||
|
||||
/// `SizeAwareRangeReader` is a `RangeReader` that supports setting a file size hint.
|
||||
pub trait SizeAwareRangeReader: RangeReader {
|
||||
/// `RangeReader` reads a range of bytes from a source.
|
||||
#[async_trait]
|
||||
pub trait RangeReader: Send + Unpin {
|
||||
/// Sets the file size hint for the reader.
|
||||
///
|
||||
/// It's used to optimize the reading process by reducing the number of remote requests.
|
||||
fn with_file_size_hint(&mut self, file_size_hint: u64);
|
||||
}
|
||||
|
||||
/// `RangeReader` reads a range of bytes from a source.
|
||||
#[async_trait]
|
||||
pub trait RangeReader: Sync + Send + Unpin {
|
||||
/// Returns the metadata of the source.
|
||||
async fn metadata(&self) -> io::Result<Metadata>;
|
||||
async fn metadata(&mut self) -> io::Result<Metadata>;
|
||||
|
||||
/// Reads the bytes in the given range.
|
||||
async fn read(&self, range: Range<u64>) -> io::Result<Bytes>;
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
|
||||
|
||||
/// Reads the bytes in the given range into the buffer.
|
||||
///
|
||||
@@ -57,14 +53,18 @@ pub trait RangeReader: Sync + Send + Unpin {
|
||||
/// - If the buffer is insufficient to hold the bytes, it will either:
|
||||
/// - Allocate additional space (e.g., for `Vec<u8>`)
|
||||
/// - Panic (e.g., for `&mut [u8]`)
|
||||
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
|
||||
async fn read_into(
|
||||
&mut self,
|
||||
range: Range<u64>,
|
||||
buf: &mut (impl BufMut + Send),
|
||||
) -> io::Result<()> {
|
||||
let bytes = self.read(range).await?;
|
||||
buf.put_slice(&bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads the bytes in the given ranges.
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
let mut result = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
result.push(self.read(range.clone()).await?);
|
||||
@@ -74,20 +74,25 @@ pub trait RangeReader: Sync + Send + Unpin {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<R: ?Sized + RangeReader> RangeReader for &R {
|
||||
async fn metadata(&self) -> io::Result<Metadata> {
|
||||
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
||||
fn with_file_size_hint(&mut self, file_size_hint: u64) {
|
||||
(*self).with_file_size_hint(file_size_hint)
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
(*self).metadata().await
|
||||
}
|
||||
|
||||
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
(*self).read(range).await
|
||||
}
|
||||
|
||||
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
|
||||
async fn read_into(
|
||||
&mut self,
|
||||
range: Range<u64>,
|
||||
buf: &mut (impl BufMut + Send),
|
||||
) -> io::Result<()> {
|
||||
(*self).read_into(range, buf).await
|
||||
}
|
||||
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
(*self).read_vec(ranges).await
|
||||
}
|
||||
}
|
||||
@@ -115,7 +120,7 @@ pub struct AsyncReadAdapter<R> {
|
||||
|
||||
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
|
||||
pub async fn new(inner: R) -> io::Result<Self> {
|
||||
let inner = inner;
|
||||
let mut inner = inner;
|
||||
let metadata = inner.metadata().await?;
|
||||
Ok(AsyncReadAdapter {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
@@ -155,7 +160,7 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
||||
let range = *this.position..(*this.position + size);
|
||||
let inner = this.inner.clone();
|
||||
let fut = async move {
|
||||
let inner = inner.lock().await;
|
||||
let mut inner = inner.lock().await;
|
||||
inner.read(range).await
|
||||
};
|
||||
|
||||
@@ -190,24 +195,27 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for Vec<u8> {
|
||||
async fn metadata(&self) -> io::Result<Metadata> {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.len() as u64,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(weny): considers replacing `tokio::fs::File` with opendal reader.
|
||||
/// `FileReader` is a `RangeReader` for reading a file.
|
||||
pub struct FileReader {
|
||||
content_length: u64,
|
||||
position: AtomicU64,
|
||||
file: Mutex<tokio::fs::File>,
|
||||
position: u64,
|
||||
file: tokio::fs::File,
|
||||
}
|
||||
|
||||
impl FileReader {
|
||||
@@ -217,37 +225,32 @@ impl FileReader {
|
||||
let metadata = file.metadata().await?;
|
||||
Ok(FileReader {
|
||||
content_length: metadata.len(),
|
||||
position: AtomicU64::new(0),
|
||||
file: Mutex::new(file),
|
||||
position: 0,
|
||||
file,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
impl SizeAwareRangeReader for FileReader {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for FileReader {
|
||||
async fn metadata(&self) -> io::Result<Metadata> {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.content_length,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
let mut file = self.file.lock().await;
|
||||
|
||||
if range.start != self.position.load(Ordering::Relaxed) {
|
||||
file.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.position.store(range.start, Ordering::Relaxed);
|
||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
if range.start != self.position {
|
||||
self.file.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.position = range.start;
|
||||
}
|
||||
|
||||
range.end = range.end.min(self.content_length);
|
||||
if range.end <= self.position.load(Ordering::Relaxed) {
|
||||
if range.end <= self.position {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"Start of range is out of bounds",
|
||||
@@ -256,8 +259,8 @@ impl RangeReader for FileReader {
|
||||
|
||||
let mut buf = vec![0; (range.end - range.start) as usize];
|
||||
|
||||
file.read_exact(&mut buf).await?;
|
||||
self.position.store(range.end, Ordering::Relaxed);
|
||||
self.file.read_exact(&mut buf).await?;
|
||||
self.position = range.end;
|
||||
|
||||
Ok(Bytes::from(buf))
|
||||
}
|
||||
@@ -298,7 +301,7 @@ mod tests {
|
||||
let data = b"hello world";
|
||||
tokio::fs::write(path, data).await.unwrap();
|
||||
|
||||
let reader = FileReader::new(path).await.unwrap();
|
||||
let mut reader = FileReader::new(path).await.unwrap();
|
||||
let metadata = reader.metadata().await.unwrap();
|
||||
assert_eq!(metadata.content_length, data.len() as u64);
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
|
||||
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::vector::product::VectorProductCreator;
|
||||
use crate::scalars::vector::sum::VectorSumCreator;
|
||||
|
||||
/// A function creates `AggregateFunctionCreator`.
|
||||
@@ -94,7 +93,6 @@ impl AggregateFunctions {
|
||||
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
||||
register_aggr_func!("vec_sum", 1, VectorSumCreator);
|
||||
register_aggr_func!("vec_product", 1, VectorProductCreator);
|
||||
|
||||
#[cfg(feature = "geo")]
|
||||
register_aggr_func!(
|
||||
|
||||
@@ -14,17 +14,14 @@
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
mod elem_product;
|
||||
mod elem_sum;
|
||||
pub mod impl_conv;
|
||||
pub(crate) mod product;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
mod sub;
|
||||
pub(crate) mod sum;
|
||||
mod vector_div;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -49,10 +46,8 @@ impl VectorFunction {
|
||||
|
||||
// vector calculation
|
||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(sub::SubFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
const NAME: &str = "vec_elem_product";
|
||||
|
||||
/// Multiplies all elements of the vector, returns a scalar.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_elem_product(parse_vec('[1.0, 2.0, 3.0, 4.0]'));
|
||||
///
|
||||
// +-----------------------------------------------------------+
|
||||
// | vec_elem_product(parse_vec(Utf8("[1.0, 2.0, 3.0, 4.0]"))) |
|
||||
// +-----------------------------------------------------------+
|
||||
// | 24.0 |
|
||||
// +-----------------------------------------------------------+
|
||||
/// ``````
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ElemProductFunction;
|
||||
|
||||
impl Function for ElemProductFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
result.push(Some(DVectorView::from_slice(&arg0, arg0.len()).product()));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ElemProductFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
|
||||
#[test]
|
||||
fn test_elem_product() {
|
||||
let func = ElemProductFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(6.0));
|
||||
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(120.0));
|
||||
assert_eq!(result.get_ref(2).as_f32().unwrap(), None);
|
||||
}
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
/// Aggregates by multiplying elements across the same dimension, returns a vector.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VectorProduct {
|
||||
product: Option<OVector<f32, Dyn>>,
|
||||
has_null: bool,
|
||||
}
|
||||
|
||||
#[as_aggr_func_creator]
|
||||
#[derive(Debug, Default, AggrFuncTypeStore)]
|
||||
pub struct VectorProductCreator {}
|
||||
|
||||
impl AggregateFunctionCreator for VectorProductCreator {
|
||||
fn creator(&self) -> AccumulatorCreatorFunction {
|
||||
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
|
||||
ensure!(
|
||||
types.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let input_type = &types[0];
|
||||
match input_type {
|
||||
ConcreteDataType::String(_) | ConcreteDataType::Binary(_) => {
|
||||
Ok(Box::new(VectorProduct::default()))
|
||||
}
|
||||
_ => {
|
||||
let err_msg = format!(
|
||||
"\"VEC_PRODUCT\" aggregate function not support data type {:?}",
|
||||
input_type.logical_type_id(),
|
||||
);
|
||||
CreateAccumulatorSnafu { err_msg }.fail()?
|
||||
}
|
||||
}
|
||||
});
|
||||
creator
|
||||
}
|
||||
|
||||
fn output_type(&self) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn state_types(&self) -> common_query::error::Result<Vec<ConcreteDataType>> {
|
||||
Ok(vec![self.output_type()?])
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorProduct {
|
||||
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||
self.product.get_or_insert_with(|| {
|
||||
OVector::from_iterator_generic(Dyn(len), Const::<1>, (0..len).map(|_| 1.0))
|
||||
})
|
||||
}
|
||||
|
||||
fn update(&mut self, values: &[VectorRef], is_update: bool) -> Result<(), Error> {
|
||||
if values.is_empty() || self.has_null {
|
||||
return Ok(());
|
||||
};
|
||||
let column = &values[0];
|
||||
let len = column.len();
|
||||
|
||||
match as_veclit_if_const(column)? {
|
||||
Some(column) => {
|
||||
let vec_column = DVectorView::from_slice(&column, column.len()).scale(len as f32);
|
||||
*self.inner(vec_column.len()) =
|
||||
(*self.inner(vec_column.len())).component_mul(&vec_column);
|
||||
}
|
||||
None => {
|
||||
for i in 0..len {
|
||||
let Some(arg0) = as_veclit(column.get_ref(i))? else {
|
||||
if is_update {
|
||||
self.has_null = true;
|
||||
self.product = None;
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
let vec_column = DVectorView::from_slice(&arg0, arg0.len());
|
||||
*self.inner(vec_column.len()) =
|
||||
(*self.inner(vec_column.len())).component_mul(&vec_column);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for VectorProduct {
|
||||
fn state(&self) -> common_query::error::Result<Vec<Value>> {
|
||||
self.evaluate().map(|v| vec![v])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(values, true)
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(states, false)
|
||||
}
|
||||
|
||||
fn evaluate(&self) -> common_query::error::Result<Value> {
|
||||
match &self.product {
|
||||
None => Ok(Value::Null),
|
||||
Some(vector) => {
|
||||
let v = vector.as_slice();
|
||||
Ok(Value::from(veclit_to_binlit(v)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{ConstantVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut vec_product = VectorProduct::default();
|
||||
vec_product.update_batch(&[]).unwrap();
|
||||
assert!(vec_product.product.is_none());
|
||||
assert!(!vec_product.has_null);
|
||||
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Some(
|
||||
"[1.0,2.0,3.0]".to_string(),
|
||||
)]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[1.0, 2.0, 3.0])),
|
||||
vec_product.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update one null value
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Option::<String>::None]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[28.0, 80.0, 162.0])),
|
||||
vec_product.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update null-value batch
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
None,
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
|
||||
4,
|
||||
))];
|
||||
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[4.0, 8.0, 12.0])),
|
||||
vec_product.evaluate().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_norm";
|
||||
|
||||
/// Normalizes the vector to length 1, returns a vector.
|
||||
/// This's equivalent to `VECTOR_SCALAR_MUL(1/SQRT(VECTOR_ELEM_SUM(VECTOR_MUL(v, v))), v)`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_norm('[7.0, 8.0, 9.0]'));
|
||||
///
|
||||
/// +--------------------------------------------------+
|
||||
/// | vec_to_string(vec_norm(Utf8("[7.0, 8.0, 9.0]"))) |
|
||||
/// +--------------------------------------------------+
|
||||
/// | [0.013888889,0.015873017,0.017857144] |
|
||||
/// +--------------------------------------------------+
|
||||
///
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorNormFunction;
|
||||
|
||||
impl Function for VectorNormFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec2scalar = vec1.component_mul(&vec0);
|
||||
let scalar_var = vec2scalar.sum().sqrt();
|
||||
|
||||
let vec = DVectorView::from_slice(&arg0, arg0.len());
|
||||
// Use unscale to avoid division by zero and keep more precision as possible
|
||||
let vec_res = vec.unscale(scalar_var);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorNormFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vec_norm() {
|
||||
let func = VectorNormFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0,2.0,3.0]".to_string()),
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
Some("[7.0,-8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 5);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.0, 0.5547002, 0.8320503]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.26726124, 0.5345225, 0.8017837]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(2).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.5025707, 0.5743665, 0.64616233]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(3).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.5025707, -0.5743665, 0.64616233]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(4).is_null());
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,7 @@ mod version;
|
||||
use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::{CurrentSchemaFunction, DatabaseFunction, SessionUserFunction};
|
||||
use database::{CurrentSchemaFunction, DatabaseFunction};
|
||||
use pg_catalog::PGCatalogFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
@@ -36,9 +36,8 @@ impl SystemFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(BuildFunction));
|
||||
registry.register(Arc::new(VersionFunction));
|
||||
registry.register(Arc::new(CurrentSchemaFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(SessionUserFunction));
|
||||
registry.register(Arc::new(CurrentSchemaFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register_async(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
|
||||
@@ -28,11 +28,9 @@ pub struct DatabaseFunction;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct CurrentSchemaFunction;
|
||||
pub struct SessionUserFunction;
|
||||
|
||||
const DATABASE_FUNCTION_NAME: &str = "database";
|
||||
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||
|
||||
impl Function for DatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
@@ -74,26 +72,6 @@ impl Function for CurrentSchemaFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for SessionUserFunction {
|
||||
fn name(&self) -> &str {
|
||||
SESSION_USER_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(0, vec![], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let user = func_ctx.query_ctx.current_user();
|
||||
|
||||
Ok(Arc::new(StringVector::from_slice(&[user.username()])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DatabaseFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "DATABASE")
|
||||
@@ -106,12 +84,6 @@ impl fmt::Display for CurrentSchemaFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SessionUserFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "SESSION_USER")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -6,7 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon"]
|
||||
pg_kvbackend = ["dep:tokio-postgres"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -17,7 +17,6 @@ api.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-stream = "0.3"
|
||||
async-trait.workspace = true
|
||||
backon = { workspace = true, optional = true }
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
@@ -36,8 +35,6 @@ common-wal.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -639,6 +639,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse {} from str to utf8", name))]
|
||||
StrFromUtf8 {
|
||||
name: String,
|
||||
#[snafu(source)]
|
||||
error: std::str::Utf8Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Value not exists"))]
|
||||
ValueNotExist {
|
||||
#[snafu(implicit)]
|
||||
@@ -649,9 +658,8 @@ pub enum Error {
|
||||
GetCache { source: Arc<Error> },
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to execute via Postgres, sql: {}", sql))]
|
||||
#[snafu(display("Failed to execute via Postgres"))]
|
||||
PostgresExecution {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
@@ -659,37 +667,12 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to create connection pool for Postgres"))]
|
||||
CreatePostgresPool {
|
||||
#[snafu(source)]
|
||||
error: deadpool_postgres::CreatePoolError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to get Postgres connection from pool: {}", reason))]
|
||||
GetPostgresConnection {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to {} Postgres transaction", operation))]
|
||||
PostgresTransaction {
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
operation: String,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Postgres transaction retry failed"))]
|
||||
PostgresTransactionRetryFailed {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
@@ -755,7 +738,8 @@ impl ErrorExt for Error {
|
||||
| UnexpectedLogicalRouteTable { .. }
|
||||
| ProcedureOutput { .. }
|
||||
| FromUtf8 { .. }
|
||||
| MetadataCorruption { .. } => StatusCode::Unexpected,
|
||||
| MetadataCorruption { .. }
|
||||
| StrFromUtf8 { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -802,11 +786,9 @@ impl ErrorExt for Error {
|
||||
| EmptyDdlTasks { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
PostgresExecution { .. }
|
||||
| CreatePostgresPool { .. }
|
||||
| GetPostgresConnection { .. }
|
||||
| PostgresTransaction { .. }
|
||||
| PostgresTransactionRetryFailed { .. } => StatusCode::Internal,
|
||||
PostgresExecution { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
ConnectPostgres { .. } => StatusCode::Internal,
|
||||
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
@@ -817,20 +799,6 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
/// Check if the error is a serialization error.
|
||||
pub fn is_serialization_error(&self) -> bool {
|
||||
match self {
|
||||
Error::PostgresTransaction { error, .. } => {
|
||||
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
|
||||
}
|
||||
Error::PostgresExecution { error, .. } => {
|
||||
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] error from source `err`.
|
||||
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::RetryLater {
|
||||
|
||||
@@ -190,13 +190,6 @@ impl TableInfoManager {
|
||||
))
|
||||
}
|
||||
|
||||
/// Checks if the table exists.
|
||||
pub async fn exists(&self, table_id: TableId) -> Result<bool> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
self.kv_backend.exists(&raw_key).await
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
|
||||
@@ -542,8 +542,6 @@ mod tests {
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
|
||||
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
unprepare_kv,
|
||||
};
|
||||
|
||||
@@ -591,7 +589,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_range_2() {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
test_kv_range_2_with_prefix(&kv_backend, b"range2/".to_vec()).await;
|
||||
test_kv_range_2_with_prefix(kv_backend, b"range2/".to_vec()).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -618,8 +616,7 @@ mod tests {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
let prefix = b"deleteRange/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
test_kv_delete_range_with_prefix(kv_backend, prefix.to_vec()).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,20 +625,7 @@ mod tests {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
let prefix = b"batchDelete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_etcd_txn() {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
test_txn_compare_equal(&kv_backend).await;
|
||||
test_txn_compare_greater(&kv_backend).await;
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
test_kv_batch_delete_with_prefix(kv_backend, prefix.to_vec()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,9 +325,7 @@ mod tests {
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv, test_kv_batch_delete, test_kv_batch_get, test_kv_compare_and_put,
|
||||
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2, test_txn_compare_equal,
|
||||
test_txn_compare_greater, test_txn_compare_less, test_txn_compare_not_equal,
|
||||
test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2,
|
||||
};
|
||||
|
||||
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
|
||||
@@ -355,7 +353,7 @@ mod tests {
|
||||
async fn test_range_2() {
|
||||
let kv = MemoryKvBackend::<Error>::new();
|
||||
|
||||
test_kv_range_2(&kv).await;
|
||||
test_kv_range_2(kv).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -376,24 +374,13 @@ mod tests {
|
||||
async fn test_delete_range() {
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_delete_range(&kv_backend).await;
|
||||
test_kv_delete_range(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_batch_delete(&kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_txn() {
|
||||
let kv_backend = MemoryKvBackend::<Error>::new();
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
test_txn_compare_equal(&kv_backend).await;
|
||||
test_txn_compare_greater(&kv_backend).await;
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
test_kv_batch_delete(kv_backend).await;
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,8 +15,6 @@
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use txn::{Compare, CompareOp, TxnOp};
|
||||
|
||||
use super::{KvBackend, *};
|
||||
use crate::error::Error;
|
||||
use crate::rpc::store::{BatchGetRequest, PutRequest};
|
||||
@@ -61,18 +59,14 @@ pub async fn prepare_kv_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>
|
||||
|
||||
pub async fn unprepare_kv(kv_backend: &impl KvBackend, prefix: &[u8]) {
|
||||
let range_end = util::get_prefix_end_key(prefix);
|
||||
assert!(
|
||||
kv_backend
|
||||
.delete_range(DeleteRangeRequest {
|
||||
key: prefix.to_vec(),
|
||||
range_end,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok(),
|
||||
"prefix: {:?}",
|
||||
std::str::from_utf8(prefix).unwrap()
|
||||
);
|
||||
assert!(kv_backend
|
||||
.delete_range(DeleteRangeRequest {
|
||||
key: prefix.to_vec(),
|
||||
range_end,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
pub async fn test_kv_put(kv_backend: &impl KvBackend) {
|
||||
@@ -174,11 +168,11 @@ pub async fn test_kv_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_2(kv_backend: &impl KvBackend) {
|
||||
pub async fn test_kv_range_2(kv_backend: impl KvBackend) {
|
||||
test_kv_range_2_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_2_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
pub async fn test_kv_range_2_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
|
||||
let atest = [prefix.clone(), b"atest".to_vec()].concat();
|
||||
let test = [prefix.clone(), b"test".to_vec()].concat();
|
||||
|
||||
@@ -352,11 +346,11 @@ pub async fn test_kv_compare_and_put_with_prefix(
|
||||
assert!(resp.is_none());
|
||||
}
|
||||
|
||||
pub async fn test_kv_delete_range(kv_backend: &impl KvBackend) {
|
||||
pub async fn test_kv_delete_range(kv_backend: impl KvBackend) {
|
||||
test_kv_delete_range_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_kv_delete_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
pub async fn test_kv_delete_range_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
|
||||
let key3 = [prefix.clone(), b"key3".to_vec()].concat();
|
||||
let req = DeleteRangeRequest {
|
||||
key: key3.clone(),
|
||||
@@ -407,11 +401,11 @@ pub async fn test_kv_delete_range_with_prefix(kv_backend: &impl KvBackend, prefi
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_delete(kv_backend: &impl KvBackend) {
|
||||
pub async fn test_kv_batch_delete(kv_backend: impl KvBackend) {
|
||||
test_kv_batch_delete_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_delete_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
pub async fn test_kv_batch_delete_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
|
||||
let key1 = [prefix.clone(), b"key1".to_vec()].concat();
|
||||
let key100 = [prefix.clone(), b"key100".to_vec()].concat();
|
||||
assert!(kv_backend.get(&key1).await.unwrap().is_some());
|
||||
@@ -450,207 +444,3 @@ pub async fn test_kv_batch_delete_with_prefix(kv_backend: &impl KvBackend, prefi
|
||||
assert!(kv_backend.get(&key3).await.unwrap().is_none());
|
||||
assert!(kv_backend.get(&key11).await.unwrap().is_none());
|
||||
}
|
||||
|
||||
pub async fn test_txn_one_compare_op(kv_backend: &impl KvBackend) {
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![11],
|
||||
value: vec![3],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
vec![11],
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
|
||||
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 1);
|
||||
}
|
||||
|
||||
pub async fn text_txn_multi_compare_op(kv_backend: &impl KvBackend) {
|
||||
for i in 1..3 {
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![i],
|
||||
value: vec![i],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let when: Vec<_> = (1..3u8)
|
||||
.map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
|
||||
.collect();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(when)
|
||||
.and_then(vec![
|
||||
TxnOp::Put(vec![1], vec![10]),
|
||||
TxnOp::Put(vec![2], vec![20]),
|
||||
])
|
||||
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 2);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_equal(kv_backend: &impl KvBackend) {
|
||||
let key = vec![101u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Put(key, vec![4])]);
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_greater(kv_backend: &impl KvBackend) {
|
||||
let key = vec![102u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_less(kv_backend: &impl KvBackend) {
|
||||
let key = vec![103u8];
|
||||
kv_backend.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![2]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_not_equal(kv_backend: &impl KvBackend) {
|
||||
let key = vec![104u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::NotEqual,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -131,9 +131,9 @@ pub struct TxnResponse {
|
||||
pub struct Txn {
|
||||
// HACK - chroot would modify this field
|
||||
pub(super) req: TxnRequest,
|
||||
pub(super) c_when: bool,
|
||||
pub(super) c_then: bool,
|
||||
pub(super) c_else: bool,
|
||||
c_when: bool,
|
||||
c_then: bool,
|
||||
c_else: bool,
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
@@ -241,7 +241,14 @@ impl From<Txn> for TxnRequest {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
#[test]
|
||||
fn test_compare() {
|
||||
@@ -303,4 +310,232 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_one_compare_op() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![11],
|
||||
value: vec![3],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
vec![11],
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
|
||||
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_multi_compare_op() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
for i in 1..3 {
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![i],
|
||||
value: vec![i],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let when: Vec<_> = (1..3u8)
|
||||
.map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
|
||||
.collect();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(when)
|
||||
.and_then(vec![
|
||||
TxnOp::Put(vec![1], vec![10]),
|
||||
TxnOp::Put(vec![2], vec![20]),
|
||||
])
|
||||
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_equal() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![101u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Put(key, vec![4])]);
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_greater() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![102u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_less() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![103u8];
|
||||
kv_backend.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![2]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_not_equal() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![104u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::NotEqual,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_kv_backend() -> KvBackendRef {
|
||||
Arc::new(MemoryKvBackend::<Error>::new())
|
||||
// TODO(jiachun): Add a feature to test against etcd in github CI
|
||||
//
|
||||
// The same test can be run against etcd by uncommenting the following line
|
||||
// crate::service::store::etcd::EtcdStore::with_endpoints(["127.0.0.1:2379"])
|
||||
// .await
|
||||
// .unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,7 +266,7 @@ impl PutRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct PutResponse {
|
||||
pub prev_kv: Option<KeyValue>,
|
||||
}
|
||||
@@ -425,7 +425,7 @@ impl BatchPutRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BatchPutResponse {
|
||||
pub prev_kvs: Vec<KeyValue>,
|
||||
}
|
||||
@@ -509,7 +509,7 @@ impl BatchDeleteRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BatchDeleteResponse {
|
||||
pub prev_kvs: Vec<KeyValue>,
|
||||
}
|
||||
@@ -754,19 +754,6 @@ impl TryFrom<PbDeleteRangeResponse> for DeleteRangeResponse {
|
||||
}
|
||||
|
||||
impl DeleteRangeResponse {
|
||||
/// Creates a new [`DeleteRangeResponse`] with the given deleted count.
|
||||
pub fn new(deleted: i64) -> Self {
|
||||
Self {
|
||||
deleted,
|
||||
prev_kvs: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [`DeleteRangeResponse`] with the given deleted count and previous key-value pairs.
|
||||
pub fn with_prev_kvs(&mut self, prev_kvs: Vec<KeyValue>) {
|
||||
self.prev_kvs = prev_kvs;
|
||||
}
|
||||
|
||||
pub fn to_proto_resp(self, header: PbResponseHeader) -> PbDeleteRangeResponse {
|
||||
PbDeleteRangeResponse {
|
||||
header: Some(header),
|
||||
|
||||
@@ -13,7 +13,7 @@ workspace = true
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon.workspace = true
|
||||
backon = "1"
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -18,6 +18,7 @@ use arrow::error::ArrowError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_recordbatch::error::Error as RecordbatchError;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::datatypes::DataType as ArrowDatatype;
|
||||
@@ -30,6 +31,21 @@ use statrs::StatsError;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to execute Python UDF: {}", msg))]
|
||||
PyUdf {
|
||||
// TODO(discord9): find a way that prevent circle depend(query<-script<-query) and can use script's error type
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create temporary recordbatch when eval Python UDF"))]
|
||||
UdfTempRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: RecordbatchError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute function"))]
|
||||
ExecuteFunction {
|
||||
#[snafu(source)]
|
||||
@@ -244,7 +260,9 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CreateAccumulator { .. }
|
||||
Error::UdfTempRecordBatch { .. }
|
||||
| Error::PyUdf { .. }
|
||||
| Error::CreateAccumulator { .. }
|
||||
| Error::DowncastVector { .. }
|
||||
| Error::InvalidInputState { .. }
|
||||
| Error::InvalidInputCol { .. }
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::DfRecordBatch;
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct RecordBatch {
|
||||
pub schema: SchemaRef,
|
||||
pub columns: Vec<VectorRef>,
|
||||
columns: Vec<VectorRef>,
|
||||
df_record_batch: DfRecordBatch,
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ futures-util.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
rskafka.workspace = true
|
||||
rustls = { workspace = true, default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||
rustls-native-certs = "0.7"
|
||||
rustls-pemfile = "2.1"
|
||||
serde.workspace = true
|
||||
|
||||
@@ -433,8 +433,8 @@ impl DatanodeBuilder {
|
||||
) -> Result<MitoEngine> {
|
||||
if opts.storage.is_object_storage() {
|
||||
// Enable the write cache when setting object storage
|
||||
config.enable_write_cache = true;
|
||||
info!("Configured 'enable_write_cache=true' for mito engine.");
|
||||
config.enable_experimental_write_cache = true;
|
||||
info!("Configured 'enable_experimental_write_cache=true' for mito engine.");
|
||||
}
|
||||
|
||||
let mito_engine = match &opts.wal {
|
||||
|
||||
@@ -164,15 +164,8 @@ impl ColumnSchema {
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_fulltext_indexed(&self) -> bool {
|
||||
self.fulltext_options()
|
||||
.unwrap_or_default()
|
||||
.map(|option| option.enable)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_skipping_indexed(&self) -> bool {
|
||||
self.skipping_index_options().unwrap_or_default().is_some()
|
||||
pub fn has_fulltext_index_key(&self) -> bool {
|
||||
self.metadata.contains_key(FULLTEXT_KEY)
|
||||
}
|
||||
|
||||
pub fn has_inverted_index_key(&self) -> bool {
|
||||
|
||||
@@ -40,6 +40,7 @@ datatypes.workspace = true
|
||||
enum-as-inner = "0.6.0"
|
||||
enum_dispatch = "0.3"
|
||||
futures = "0.3"
|
||||
get-size-derive2 = "0.1.2"
|
||||
get-size2 = "0.1.2"
|
||||
greptime-proto.workspace = true
|
||||
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version
|
||||
|
||||
@@ -804,8 +804,6 @@ impl FlowWorkerManager {
|
||||
}
|
||||
}
|
||||
|
||||
node_ctx.add_flow_plan(flow_id, flow_plan.clone());
|
||||
|
||||
let _ = comment;
|
||||
let _ = flow_options;
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ use std::collections::{BTreeMap, BTreeSet, HashMap};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_recordbatch::RecordBatch;
|
||||
use common_telemetry::trace;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use session::context::QueryContext;
|
||||
@@ -32,7 +31,6 @@ use crate::error::{Error, EvalSnafu, TableNotFoundSnafu};
|
||||
use crate::expr::error::InternalSnafu;
|
||||
use crate::expr::{Batch, GlobalId};
|
||||
use crate::metrics::METRIC_FLOW_INPUT_BUF_SIZE;
|
||||
use crate::plan::TypedPlan;
|
||||
use crate::repr::{DiffRow, RelationDesc, BATCH_SIZE, BROADCAST_CAP, SEND_BUF_CAP};
|
||||
|
||||
/// A context that holds the information of the dataflow
|
||||
@@ -42,7 +40,6 @@ pub struct FlownodeContext {
|
||||
pub source_to_tasks: BTreeMap<TableId, BTreeSet<FlowId>>,
|
||||
/// mapping from task to sink table, useful for sending data back to the client when a task is done running
|
||||
pub flow_to_sink: BTreeMap<FlowId, TableName>,
|
||||
pub flow_plans: BTreeMap<FlowId, TypedPlan>,
|
||||
pub sink_to_flow: BTreeMap<TableName, FlowId>,
|
||||
/// broadcast sender for source table, any incoming write request will be sent to the source table's corresponding sender
|
||||
///
|
||||
@@ -66,7 +63,6 @@ impl FlownodeContext {
|
||||
Self {
|
||||
source_to_tasks: Default::default(),
|
||||
flow_to_sink: Default::default(),
|
||||
flow_plans: Default::default(),
|
||||
sink_to_flow: Default::default(),
|
||||
source_sender: Default::default(),
|
||||
sink_receiver: Default::default(),
|
||||
@@ -183,22 +179,6 @@ impl SourceSender {
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
/// send record batch
|
||||
pub async fn send_record_batch(&self, batch: RecordBatch) -> Result<usize, Error> {
|
||||
let row_cnt = batch.num_rows();
|
||||
let batch = Batch::from(batch);
|
||||
|
||||
self.send_buf_row_cnt.fetch_add(row_cnt, Ordering::SeqCst);
|
||||
|
||||
self.send_buf_tx.send(batch).await.map_err(|e| {
|
||||
crate::error::InternalSnafu {
|
||||
reason: format!("Failed to send batch, error = {:?}", e),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
Ok(row_cnt)
|
||||
}
|
||||
}
|
||||
|
||||
impl FlownodeContext {
|
||||
@@ -220,16 +200,6 @@ impl FlownodeContext {
|
||||
sender.send_rows(rows, batch_datatypes).await
|
||||
}
|
||||
|
||||
pub async fn send_rb(&self, table_id: TableId, batch: RecordBatch) -> Result<usize, Error> {
|
||||
let sender = self
|
||||
.source_sender
|
||||
.get(&table_id)
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: table_id.to_string(),
|
||||
})?;
|
||||
sender.send_record_batch(batch).await
|
||||
}
|
||||
|
||||
/// flush all sender's buf
|
||||
///
|
||||
/// return numbers being sent
|
||||
@@ -265,15 +235,6 @@ impl FlownodeContext {
|
||||
self.sink_to_flow.insert(sink_table_name, task_id);
|
||||
}
|
||||
|
||||
/// add flow plan to worker context
|
||||
pub fn add_flow_plan(&mut self, task_id: FlowId, plan: TypedPlan) {
|
||||
self.flow_plans.insert(task_id, plan);
|
||||
}
|
||||
|
||||
pub fn get_flow_plan(&self, task_id: &FlowId) -> Option<TypedPlan> {
|
||||
self.flow_plans.get(task_id).cloned()
|
||||
}
|
||||
|
||||
/// remove flow from worker context
|
||||
pub fn remove_flow(&mut self, task_id: FlowId) {
|
||||
if let Some(sink_table_name) = self.flow_to_sink.remove(&task_id) {
|
||||
@@ -285,7 +246,6 @@ impl FlownodeContext {
|
||||
self.source_sender.remove(source_table_id);
|
||||
}
|
||||
}
|
||||
self.flow_plans.remove(&task_id);
|
||||
}
|
||||
|
||||
/// try add source sender, if already exist, do nothing
|
||||
|
||||
@@ -82,31 +82,6 @@ impl ManagedTableSource {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the time index column from table id
|
||||
pub async fn get_time_index_column_from_table_id(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<(usize, datatypes::schema::ColumnSchema), Error> {
|
||||
let info = self
|
||||
.table_info_manager
|
||||
.get(table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Table id = {:?}, couldn't found table info", table_id),
|
||||
})?;
|
||||
let raw_schema = &info.table_info.meta.schema;
|
||||
let Some(ts_index) = raw_schema.timestamp_index else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Table id = {:?}, couldn't found timestamp index", table_id),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
let col_schema = raw_schema.column_schemas[ts_index].clone();
|
||||
Ok((ts_index, col_schema))
|
||||
}
|
||||
|
||||
pub async fn get_table_id_from_proto_name(
|
||||
&self,
|
||||
name: &greptime_proto::v1::TableName,
|
||||
@@ -193,14 +168,6 @@ impl ManagedTableSource {
|
||||
let desc = table_info_value_to_relation_desc(table_info_value)?;
|
||||
Ok((table_name, desc))
|
||||
}
|
||||
|
||||
pub async fn check_table_exist(&self, table_id: &TableId) -> Result<bool, Error> {
|
||||
self.table_info_manager
|
||||
.exists(*table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ManagedTableSource {
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_error::{define_into_tonic_status, from_err_code_msg_to_header};
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_telemetry::common_error::ext::ErrorExt;
|
||||
use common_telemetry::common_error::status_code::StatusCode;
|
||||
use snafu::{Location, ResultExt, Snafu};
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::metadata::MetadataMap;
|
||||
|
||||
use crate::adapter::FlowId;
|
||||
@@ -259,9 +259,3 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
define_into_tonic_status!(Error);
|
||||
|
||||
impl From<EvalError> for Error {
|
||||
fn from(e: EvalError) -> Self {
|
||||
Err::<(), _>(e).context(EvalSnafu).unwrap_err()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ mod linear;
|
||||
pub(crate) mod relation;
|
||||
mod scalar;
|
||||
mod signature;
|
||||
pub(crate) mod utils;
|
||||
|
||||
use arrow::compute::FilterBuilder;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
@@ -55,16 +54,6 @@ pub struct Batch {
|
||||
diffs: Option<VectorRef>,
|
||||
}
|
||||
|
||||
impl From<common_recordbatch::RecordBatch> for Batch {
|
||||
fn from(value: common_recordbatch::RecordBatch) -> Self {
|
||||
Self {
|
||||
row_count: value.num_rows(),
|
||||
batch: value.columns,
|
||||
diffs: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Batch {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
let mut batch_eq = true;
|
||||
|
||||
@@ -94,30 +94,6 @@ impl MapFilterProject {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_nth_expr(&self, n: usize) -> Option<ScalarExpr> {
|
||||
let idx = *self.projection.get(n)?;
|
||||
if idx < self.input_arity {
|
||||
Some(ScalarExpr::Column(idx))
|
||||
} else {
|
||||
// find direct ref to input's expr
|
||||
|
||||
let mut expr = self.expressions.get(idx - self.input_arity)?;
|
||||
loop {
|
||||
match expr {
|
||||
ScalarExpr::Column(prev) => {
|
||||
if *prev < self.input_arity {
|
||||
return Some(ScalarExpr::Column(*prev));
|
||||
} else {
|
||||
expr = self.expressions.get(*prev - self.input_arity)?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
_ => return Some(expr.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The number of columns expected in the output row.
|
||||
pub fn output_arity(&self) -> usize {
|
||||
self.projection.len()
|
||||
|
||||
@@ -311,9 +311,6 @@ impl ScalarExpr {
|
||||
}
|
||||
|
||||
/// Eval this expression with the given values.
|
||||
///
|
||||
/// TODO(discord9): add tests to make sure `eval_batch` is the same as `eval` in
|
||||
/// most cases
|
||||
pub fn eval(&self, values: &[Value]) -> Result<Value, EvalError> {
|
||||
match self {
|
||||
ScalarExpr::Column(index) => Ok(values[*index].clone()),
|
||||
|
||||
@@ -1,340 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! This module contains utility functions for expressions.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use datatypes::value::Value;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::error::UnexpectedSnafu;
|
||||
use crate::expr::ScalarExpr;
|
||||
use crate::plan::TypedPlan;
|
||||
use crate::Result;
|
||||
|
||||
/// Find lower bound for time `current` in given `plan` for the time window expr.
|
||||
///
|
||||
/// i.e. for time window expr being `date_bin(INTERVAL '5 minutes', ts) as time_window` and `current="2021-07-01 00:01:01.000"`,
|
||||
/// return `Some("2021-07-01 00:00:00.000")`
|
||||
///
|
||||
/// if `plan` doesn't contain a `TIME INDEX` column, return `None`
|
||||
pub fn find_plan_time_window_expr_lower_bound(
|
||||
plan: &TypedPlan,
|
||||
current: common_time::Timestamp,
|
||||
) -> Result<Option<common_time::Timestamp>> {
|
||||
let typ = plan.schema.typ();
|
||||
let Some(mut time_index) = typ.time_index else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let mut cur_plan = plan;
|
||||
let mut expr_time_index;
|
||||
|
||||
loop {
|
||||
// follow upward and find deepest time index expr that is not a column ref
|
||||
expr_time_index = Some(cur_plan.plan.get_nth_expr(time_index).context(
|
||||
UnexpectedSnafu {
|
||||
reason: "Failed to find time index expr",
|
||||
},
|
||||
)?);
|
||||
|
||||
if let Some(ScalarExpr::Column(i)) = expr_time_index {
|
||||
time_index = i;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if let Some(input) = cur_plan.plan.get_first_input_plan() {
|
||||
cur_plan = input;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let expr_time_index = expr_time_index.context(UnexpectedSnafu {
|
||||
reason: "Failed to find time index expr",
|
||||
})?;
|
||||
|
||||
let ts_col = expr_time_index
|
||||
.get_all_ref_columns()
|
||||
.first()
|
||||
.cloned()
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "Failed to find time index column",
|
||||
})?;
|
||||
|
||||
find_time_window_lower_bound(&expr_time_index, ts_col, current)
|
||||
}
|
||||
|
||||
/// Find the lower bound of time window in given `expr` and `current` timestamp.
|
||||
///
|
||||
/// i.e. for `current="2021-07-01 00:01:01.000"` and `expr=date_bin(INTERVAL '5 minutes', ts) as time_window` and `ts_col=ts`,
|
||||
/// return `Some("2021-07-01 00:00:00.000")` since it's the lower bound
|
||||
/// of current time window given the current timestamp
|
||||
///
|
||||
/// if return None, meaning this time window have no lower bound
|
||||
pub fn find_time_window_lower_bound(
|
||||
expr: &ScalarExpr,
|
||||
ts_col_idx: usize,
|
||||
current: common_time::Timestamp,
|
||||
) -> Result<Option<common_time::Timestamp>> {
|
||||
let all_ref_columns = expr.get_all_ref_columns();
|
||||
|
||||
ensure!(
|
||||
all_ref_columns.contains(&ts_col_idx),
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Expected column {} to be referenced in expression {expr:?}",
|
||||
ts_col_idx
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
ensure!(all_ref_columns.len() == 1, UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Expect only one column to be referenced in expression {expr:?}, found {all_ref_columns:?}"
|
||||
),
|
||||
});
|
||||
|
||||
let permute_map = BTreeMap::from([(ts_col_idx, 0usize)]);
|
||||
|
||||
let mut rewrote_expr = expr.clone();
|
||||
|
||||
rewrote_expr.permute_map(&permute_map)?;
|
||||
|
||||
fn eval_to_timestamp(expr: &ScalarExpr, values: &[Value]) -> Result<common_time::Timestamp> {
|
||||
let val = expr.eval(values)?;
|
||||
if let Value::Timestamp(ts) = val {
|
||||
Ok(ts)
|
||||
} else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Expected timestamp in expression {expr:?} but got {val:?}"),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
let cur_time_window = eval_to_timestamp(&rewrote_expr, &[current.into()])?;
|
||||
|
||||
// search to find the lower bound
|
||||
let mut offset: i64 = 1;
|
||||
let lower_bound;
|
||||
let mut upper_bound = Some(current);
|
||||
// first expontial probe to found a range for binary search
|
||||
loop {
|
||||
let Some(next_val) = current.value().checked_sub(offset) else {
|
||||
// no lower bound
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let prev_time_probe = common_time::Timestamp::new(next_val, current.unit());
|
||||
|
||||
let prev_time_window = eval_to_timestamp(&rewrote_expr, &[prev_time_probe.into()])?;
|
||||
|
||||
match prev_time_window.cmp(&cur_time_window) {
|
||||
Ordering::Less => {
|
||||
lower_bound = Some(prev_time_probe);
|
||||
break;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
upper_bound = Some(prev_time_probe);
|
||||
}
|
||||
Ordering::Greater => {
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Unsupported time window expression {rewrote_expr:?}, expect monotonic increasing for time window expression {expr:?}"
|
||||
),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
let Some(new_offset) = offset.checked_mul(2) else {
|
||||
// no lower bound
|
||||
return Ok(None);
|
||||
};
|
||||
offset = new_offset;
|
||||
}
|
||||
|
||||
// binary search for the lower bound
|
||||
|
||||
ensure!(lower_bound.map(|v|v.unit())==upper_bound.map(|v|v.unit()), UnexpectedSnafu{
|
||||
reason: format!(" unit mismatch for time window expression {expr:?}, found {lower_bound:?} and {upper_bound:?}"),
|
||||
});
|
||||
|
||||
let output_unit = lower_bound.expect("should have lower bound").unit();
|
||||
|
||||
let mut low = lower_bound.expect("should have lower bound").value();
|
||||
let mut high = upper_bound.expect("should have upper bound").value();
|
||||
while low < high {
|
||||
let mid = (low + high) / 2;
|
||||
let mid_probe = common_time::Timestamp::new(mid, output_unit);
|
||||
let mid_time_window = eval_to_timestamp(&rewrote_expr, &[mid_probe.into()])?;
|
||||
|
||||
match mid_time_window.cmp(&cur_time_window) {
|
||||
Ordering::Less => low = mid + 1,
|
||||
Ordering::Equal => high = mid,
|
||||
Ordering::Greater => UnexpectedSnafu {
|
||||
reason: format!("Binary search failed for time window expression {expr:?}"),
|
||||
}
|
||||
.fail()?,
|
||||
}
|
||||
}
|
||||
|
||||
let final_lower_bound_for_time_window = common_time::Timestamp::new(low, output_unit);
|
||||
|
||||
Ok(Some(final_lower_bound_for_time_window))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
use crate::plan::{Plan, TypedPlan};
|
||||
use crate::test_utils::{create_test_ctx, create_test_query_engine, sql_to_substrait};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_plan_time_window_lower_bound() {
|
||||
let testcases = [
|
||||
// no time index
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
||||
"2021-07-01 00:01:01.000",
|
||||
None,
|
||||
),
|
||||
// time index
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
"2021-07-01 00:01:01.000",
|
||||
Some("2021-07-01 00:00:00.000"),
|
||||
),
|
||||
// time index with other fields
|
||||
(
|
||||
"SELECT sum(number) as sum_up, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
"2021-07-01 00:01:01.000",
|
||||
Some("2021-07-01 00:00:00.000"),
|
||||
),
|
||||
// time index with other pks
|
||||
(
|
||||
"SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number;",
|
||||
"2021-07-01 00:01:01.000",
|
||||
Some("2021-07-01 00:00:00.000"),
|
||||
),
|
||||
];
|
||||
let engine = create_test_query_engine();
|
||||
|
||||
for (sql, current, expected) in &testcases {
|
||||
let plan = sql_to_substrait(engine.clone(), sql).await;
|
||||
let mut ctx = create_test_ctx();
|
||||
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let current = common_time::Timestamp::from_str(current, None).unwrap();
|
||||
|
||||
let expected =
|
||||
expected.map(|expected| common_time::Timestamp::from_str(expected, None).unwrap());
|
||||
|
||||
assert_eq!(
|
||||
find_plan_time_window_expr_lower_bound(&flow_plan, current).unwrap(),
|
||||
expected
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timewindow_lower_bound() {
|
||||
let testcases = [
|
||||
(
|
||||
("'5 minutes'", "ts", Some("2021-07-01 00:00:00.000")),
|
||||
"2021-07-01 00:01:01.000",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:01:01.000",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:00:00.000",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
// test edge cases
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:05:00.000",
|
||||
"2021-07-01 00:05:00.000",
|
||||
),
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:04:59.999",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:04:59.999999999",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:04:59.999999999999",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
(
|
||||
("'5 minutes'", "ts", None),
|
||||
"2021-07-01 00:04:59.999999999999999",
|
||||
"2021-07-01 00:00:00.000",
|
||||
),
|
||||
];
|
||||
let engine = create_test_query_engine();
|
||||
|
||||
for (args, current, expected) in testcases {
|
||||
let sql = if let Some(origin) = args.2 {
|
||||
format!(
|
||||
"SELECT date_bin({}, {}, '{origin}') FROM numbers_with_ts;",
|
||||
args.0, args.1
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"SELECT date_bin({}, {}) FROM numbers_with_ts;",
|
||||
args.0, args.1
|
||||
)
|
||||
};
|
||||
let plan = sql_to_substrait(engine.clone(), &sql).await;
|
||||
let mut ctx = create_test_ctx();
|
||||
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let expr = {
|
||||
let mfp = flow_plan.plan;
|
||||
let Plan::Mfp { mfp, .. } = mfp else {
|
||||
unreachable!()
|
||||
};
|
||||
mfp.expressions[0].clone()
|
||||
};
|
||||
|
||||
let current = common_time::Timestamp::from_str(current, None).unwrap();
|
||||
|
||||
let res = find_time_window_lower_bound(&expr, 1, current).unwrap();
|
||||
|
||||
let expected = Some(common_time::Timestamp::from_str(expected, None).unwrap());
|
||||
|
||||
assert_eq!(res, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -38,9 +38,6 @@ mod server;
|
||||
mod transform;
|
||||
mod utils;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
|
||||
pub use adapter::{FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions};
|
||||
pub use error::{Error, Result};
|
||||
pub use server::{FlownodeBuilder, FlownodeInstance, FlownodeServer, FrontendInvoker};
|
||||
|
||||
@@ -18,10 +18,8 @@
|
||||
mod join;
|
||||
mod reduce;
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::expr::{GlobalId, Id, LocalId, MapFilterProject, SafeMfpPlan, ScalarExpr, TypedExpr};
|
||||
use crate::expr::{Id, LocalId, MapFilterProject, SafeMfpPlan, TypedExpr};
|
||||
use crate::plan::join::JoinPlan;
|
||||
pub(crate) use crate::plan::reduce::{AccumulablePlan, AggrWithIndex, KeyValPlan, ReducePlan};
|
||||
use crate::repr::{DiffRow, RelationDesc};
|
||||
@@ -191,78 +189,3 @@ impl Plan {
|
||||
TypedPlan { schema, plan: self }
|
||||
}
|
||||
}
|
||||
|
||||
impl Plan {
|
||||
/// Get nth expr using column ref
|
||||
pub fn get_nth_expr(&self, n: usize) -> Option<ScalarExpr> {
|
||||
match self {
|
||||
Self::Mfp { mfp, .. } => mfp.get_nth_expr(n),
|
||||
Self::Reduce { key_val_plan, .. } => key_val_plan.get_nth_expr(n),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the first input plan if exists
|
||||
pub fn get_first_input_plan(&self) -> Option<&TypedPlan> {
|
||||
match self {
|
||||
Plan::Let { value, .. } => Some(value),
|
||||
Plan::Mfp { input, .. } => Some(input),
|
||||
Plan::Reduce { input, .. } => Some(input),
|
||||
Plan::Join { inputs, .. } => inputs.first(),
|
||||
Plan::Union { inputs, .. } => inputs.first(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get mutable ref to the first input plan if exists
|
||||
pub fn get_mut_first_input_plan(&mut self) -> Option<&mut TypedPlan> {
|
||||
match self {
|
||||
Plan::Let { value, .. } => Some(value),
|
||||
Plan::Mfp { input, .. } => Some(input),
|
||||
Plan::Reduce { input, .. } => Some(input),
|
||||
Plan::Join { inputs, .. } => inputs.first_mut(),
|
||||
Plan::Union { inputs, .. } => inputs.first_mut(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Find all the used collection in the plan
|
||||
pub fn find_used_collection(&self) -> BTreeSet<GlobalId> {
|
||||
fn recur_find_use(plan: &Plan, used: &mut BTreeSet<GlobalId>) {
|
||||
match plan {
|
||||
Plan::Get { id } => {
|
||||
match id {
|
||||
Id::Local(_) => (),
|
||||
Id::Global(g) => {
|
||||
used.insert(*g);
|
||||
}
|
||||
};
|
||||
}
|
||||
Plan::Let { value, body, .. } => {
|
||||
recur_find_use(&value.plan, used);
|
||||
recur_find_use(&body.plan, used);
|
||||
}
|
||||
Plan::Mfp { input, .. } => {
|
||||
recur_find_use(&input.plan, used);
|
||||
}
|
||||
Plan::Reduce { input, .. } => {
|
||||
recur_find_use(&input.plan, used);
|
||||
}
|
||||
Plan::Join { inputs, .. } => {
|
||||
for input in inputs {
|
||||
recur_find_use(&input.plan, used);
|
||||
}
|
||||
}
|
||||
Plan::Union { inputs, .. } => {
|
||||
for input in inputs {
|
||||
recur_find_use(&input.plan, used);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
let mut ret = Default::default();
|
||||
recur_find_use(self, &mut ret);
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::expr::{AggregateExpr, SafeMfpPlan, ScalarExpr};
|
||||
use crate::expr::{AggregateExpr, SafeMfpPlan};
|
||||
|
||||
/// Describe how to extract key-value pair from a `Row`
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
||||
@@ -23,16 +23,6 @@ pub struct KeyValPlan {
|
||||
pub val_plan: SafeMfpPlan,
|
||||
}
|
||||
|
||||
impl KeyValPlan {
|
||||
/// Get nth expr using column ref
|
||||
pub fn get_nth_expr(&self, n: usize) -> Option<ScalarExpr> {
|
||||
self.key_plan.get_nth_expr(n).or_else(|| {
|
||||
self.val_plan
|
||||
.get_nth_expr(n - self.key_plan.projection.len())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO(discord9): def&impl of Hierarchical aggregates(for min/max with support to deletion) and
|
||||
/// basic aggregates(for other aggregate functions) and mixed aggregate
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
||||
|
||||
@@ -440,7 +440,6 @@ impl FlownodeBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct FrontendInvoker {
|
||||
inserter: Arc<Inserter>,
|
||||
deleter: Arc<Deleter>,
|
||||
@@ -550,42 +549,3 @@ impl FrontendInvoker {
|
||||
self.statement_executor.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// get all flow ids in this flownode
|
||||
pub(crate) async fn get_all_flow_ids(
|
||||
flow_metadata_manager: &FlowMetadataManagerRef,
|
||||
catalog_manager: &CatalogManagerRef,
|
||||
nodeid: Option<u64>,
|
||||
) -> Result<Vec<u32>, Error> {
|
||||
let ret = if let Some(nodeid) = nodeid {
|
||||
let flow_ids_one_node = flow_metadata_manager
|
||||
.flownode_flow_manager()
|
||||
.flows(nodeid)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(ListFlowsSnafu { id: Some(nodeid) })?;
|
||||
flow_ids_one_node.into_iter().map(|(id, _)| id).collect()
|
||||
} else {
|
||||
let all_catalogs = catalog_manager
|
||||
.catalog_names()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let mut all_flow_ids = vec![];
|
||||
for catalog in all_catalogs {
|
||||
let flows = flow_metadata_manager
|
||||
.flow_name_manager()
|
||||
.flow_names(&catalog)
|
||||
.await
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
all_flow_ids.extend(flows.into_iter().map(|(_, id)| id.flow_id()));
|
||||
}
|
||||
all_flow_ids
|
||||
};
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::RegisterTableRequest;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use datatypes::data_type::ConcreteDataType as CDT;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::schema::Schema;
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::vectors::{TimestampMillisecondVectorBuilder, VectorRef};
|
||||
use itertools::Itertools;
|
||||
use prost::Message;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use query::QueryEngine;
|
||||
use session::context::QueryContext;
|
||||
/// note here we are using the `substrait_proto_df` crate from the `substrait` module and
|
||||
/// rename it to `substrait_proto`
|
||||
use substrait::substrait_proto_df as substrait_proto;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use substrait_proto::proto;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::test_util::MemTable;
|
||||
|
||||
use crate::adapter::node_context::IdToNameMap;
|
||||
use crate::adapter::table_source::test::FlowDummyTableSource;
|
||||
use crate::adapter::FlownodeContext;
|
||||
use crate::df_optimizer::apply_df_optimizer;
|
||||
use crate::expr::GlobalId;
|
||||
use crate::transform::register_function_to_query_engine;
|
||||
|
||||
pub fn create_test_ctx() -> FlownodeContext {
|
||||
let mut tri_map = IdToNameMap::new();
|
||||
{
|
||||
let gid = GlobalId::User(0);
|
||||
let name = [
|
||||
"greptime".to_string(),
|
||||
"public".to_string(),
|
||||
"numbers".to_string(),
|
||||
];
|
||||
tri_map.insert(Some(name.clone()), Some(1024), gid);
|
||||
}
|
||||
|
||||
{
|
||||
let gid = GlobalId::User(1);
|
||||
let name = [
|
||||
"greptime".to_string(),
|
||||
"public".to_string(),
|
||||
"numbers_with_ts".to_string(),
|
||||
];
|
||||
tri_map.insert(Some(name.clone()), Some(1025), gid);
|
||||
}
|
||||
|
||||
let dummy_source = FlowDummyTableSource::default();
|
||||
|
||||
let mut ctx = FlownodeContext::new(Box::new(dummy_source));
|
||||
ctx.table_repr = tri_map;
|
||||
ctx.query_context = Some(Arc::new(QueryContext::with("greptime", "public")));
|
||||
|
||||
ctx
|
||||
}
|
||||
|
||||
pub fn create_test_query_engine() -> Arc<dyn QueryEngine> {
|
||||
let catalog_list = catalog::memory::new_memory_catalog_manager().unwrap();
|
||||
let req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: NUMBERS_TABLE_NAME.to_string(),
|
||||
table_id: NUMBERS_TABLE_ID,
|
||||
table: NumbersTable::table(NUMBERS_TABLE_ID),
|
||||
};
|
||||
catalog_list.register_table_sync(req).unwrap();
|
||||
|
||||
let schema = vec![
|
||||
datatypes::schema::ColumnSchema::new("number", CDT::uint32_datatype(), false),
|
||||
datatypes::schema::ColumnSchema::new("ts", CDT::timestamp_millisecond_datatype(), false),
|
||||
];
|
||||
let mut columns = vec![];
|
||||
let numbers = (1..=10).collect_vec();
|
||||
let column: VectorRef = Arc::new(<u32 as Scalar>::VectorType::from_vec(numbers));
|
||||
columns.push(column);
|
||||
|
||||
let ts = (1..=10).collect_vec();
|
||||
let mut builder = TimestampMillisecondVectorBuilder::with_capacity(10);
|
||||
ts.into_iter()
|
||||
.map(|v| builder.push(Some(TimestampMillisecond::new(v))))
|
||||
.count();
|
||||
let column: VectorRef = builder.to_vector_cloned();
|
||||
columns.push(column);
|
||||
|
||||
let schema = Arc::new(Schema::new(schema));
|
||||
let recordbatch = common_recordbatch::RecordBatch::new(schema, columns).unwrap();
|
||||
let table = MemTable::table("numbers_with_ts", recordbatch);
|
||||
|
||||
let req_with_ts = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers_with_ts".to_string(),
|
||||
table_id: 1024,
|
||||
table,
|
||||
};
|
||||
catalog_list.register_table_sync(req_with_ts).unwrap();
|
||||
|
||||
let factory = query::QueryEngineFactory::new(catalog_list, None, None, None, None, false);
|
||||
|
||||
let engine = factory.query_engine();
|
||||
register_function_to_query_engine(&engine);
|
||||
|
||||
assert_eq!("datafusion", engine.name());
|
||||
engine
|
||||
}
|
||||
|
||||
pub async fn sql_to_substrait(engine: Arc<dyn QueryEngine>, sql: &str) -> proto::Plan {
|
||||
// let engine = create_test_query_engine();
|
||||
let stmt = QueryLanguageParser::parse_sql(sql, &QueryContext::arc()).unwrap();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(&stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let plan = apply_df_optimizer(plan).await.unwrap();
|
||||
|
||||
// encode then decode so to rely on the impl of conversion from logical plan to substrait plan
|
||||
let bytes = DFLogicalSubstraitConvertor {}
|
||||
.encode(&plan, DefaultSerializer)
|
||||
.unwrap();
|
||||
|
||||
proto::Plan::decode(bytes).unwrap()
|
||||
}
|
||||
@@ -5,6 +5,8 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
python = ["dep:script"]
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
@@ -50,6 +52,7 @@ prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query.workspace = true
|
||||
raft-engine.workspace = true
|
||||
script = { workspace = true, features = ["python"], optional = true }
|
||||
serde.workspace = true
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -238,6 +238,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "python")]
|
||||
#[snafu(display("Failed to start script manager"))]
|
||||
StartScriptManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: script::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert value into table: {}", table_name))]
|
||||
Insert {
|
||||
table_name: String,
|
||||
@@ -386,6 +394,9 @@ impl ErrorExt for Error {
|
||||
}
|
||||
Error::FindTableRoute { source, .. } => source.status_code(),
|
||||
|
||||
#[cfg(feature = "python")]
|
||||
Error::StartScriptManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::TableOperation { source, .. } => source.status_code(),
|
||||
|
||||
Error::InFlightWriteBytesExceeded { .. } => StatusCode::RateLimited,
|
||||
|
||||
@@ -21,6 +21,7 @@ mod opentsdb;
|
||||
mod otlp;
|
||||
mod prom_store;
|
||||
mod region_query;
|
||||
mod script;
|
||||
pub mod standalone;
|
||||
|
||||
use std::sync::Arc;
|
||||
@@ -65,7 +66,7 @@ use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::{
|
||||
InfluxdbLineProtocolHandler, LogQueryHandler, OpenTelemetryProtocolHandler,
|
||||
OpentsdbProtocolHandler, PipelineHandler, PromStoreProtocolHandler,
|
||||
OpentsdbProtocolHandler, PipelineHandler, PromStoreProtocolHandler, ScriptHandler,
|
||||
};
|
||||
use servers::server::ServerHandlers;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -87,6 +88,7 @@ use crate::error::{
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::limiter::LimiterRef;
|
||||
use crate::script::ScriptExecutor;
|
||||
|
||||
#[async_trait]
|
||||
pub trait FrontendInstance:
|
||||
@@ -96,6 +98,7 @@ pub trait FrontendInstance:
|
||||
+ InfluxdbLineProtocolHandler
|
||||
+ PromStoreProtocolHandler
|
||||
+ OpenTelemetryProtocolHandler
|
||||
+ ScriptHandler
|
||||
+ PrometheusHandler
|
||||
+ PipelineHandler
|
||||
+ LogQueryHandler
|
||||
@@ -112,6 +115,7 @@ pub type FrontendInstanceRef = Arc<dyn FrontendInstance>;
|
||||
pub struct Instance {
|
||||
options: FrontendOptions,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
script_executor: Arc<ScriptExecutor>,
|
||||
pipeline_operator: Arc<PipelineOperator>,
|
||||
statement_executor: Arc<StatementExecutor>,
|
||||
query_engine: QueryEngineRef,
|
||||
@@ -201,6 +205,8 @@ impl FrontendInstance for Instance {
|
||||
heartbeat_task.start().await?;
|
||||
}
|
||||
|
||||
self.script_executor.start(self)?;
|
||||
|
||||
if let Some(t) = self.export_metrics_task.as_ref() {
|
||||
if t.send_by_handler {
|
||||
let handler = ExportMetricHandler::new_handler(
|
||||
@@ -489,8 +495,7 @@ pub fn check_permission(
|
||||
| Statement::Explain(_)
|
||||
| Statement::Tql(_)
|
||||
| Statement::Delete(_)
|
||||
| Statement::DeclareCursor(_)
|
||||
| Statement::Copy(sql::statements::copy::Copy::CopyQueryTo(_)) => {}
|
||||
| Statement::DeclareCursor(_) => {}
|
||||
// database ops won't be checked
|
||||
Statement::CreateDatabase(_)
|
||||
| Statement::ShowDatabases(_)
|
||||
@@ -565,7 +570,6 @@ pub fn check_permission(
|
||||
validate_db_permission!(stmt, query_ctx);
|
||||
}
|
||||
Statement::ShowStatus(_stmt) => {}
|
||||
Statement::ShowSearchPath(_stmt) => {}
|
||||
Statement::DescribeTable(stmt) => {
|
||||
validate_param(stmt.name(), query_ctx)?;
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ use crate::heartbeat::HeartbeatTask;
|
||||
use crate::instance::region_query::FrontendRegionQueryHandler;
|
||||
use crate::instance::Instance;
|
||||
use crate::limiter::Limiter;
|
||||
use crate::script::ScriptExecutor;
|
||||
|
||||
/// The frontend [`Instance`] builder.
|
||||
pub struct FrontendBuilder {
|
||||
@@ -173,6 +174,10 @@ impl FrontendBuilder {
|
||||
)
|
||||
.query_engine();
|
||||
|
||||
let script_executor = Arc::new(
|
||||
ScriptExecutor::new(self.catalog_manager.clone(), query_engine.clone()).await?,
|
||||
);
|
||||
|
||||
let statement_executor = Arc::new(StatementExecutor::new(
|
||||
self.catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
@@ -203,6 +208,7 @@ impl FrontendBuilder {
|
||||
Ok(Instance {
|
||||
options: self.options,
|
||||
catalog_manager: self.catalog_manager,
|
||||
script_executor,
|
||||
pipeline_operator,
|
||||
statement_executor,
|
||||
query_engine,
|
||||
|
||||
58
src/frontend/src/instance/script.rs
Normal file
58
src/frontend/src/instance/script.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use servers::error::Error;
|
||||
use servers::interceptor::{ScriptInterceptor, ScriptInterceptorRef};
|
||||
use servers::query_handler::ScriptHandler;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::instance::Instance;
|
||||
use crate::metrics;
|
||||
|
||||
#[async_trait]
|
||||
impl ScriptHandler for Instance {
|
||||
async fn insert_script(
|
||||
&self,
|
||||
query_ctx: QueryContextRef,
|
||||
name: &str,
|
||||
script: &str,
|
||||
) -> servers::error::Result<()> {
|
||||
let interceptor_ref = self.plugins.get::<ScriptInterceptorRef<Error>>();
|
||||
interceptor_ref.pre_execute(name, query_ctx.clone())?;
|
||||
|
||||
let _timer = metrics::INSERT_SCRIPTS_ELAPSED.start_timer();
|
||||
self.script_executor
|
||||
.insert_script(query_ctx, name, script)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn execute_script(
|
||||
&self,
|
||||
query_ctx: QueryContextRef,
|
||||
name: &str,
|
||||
params: HashMap<String, String>,
|
||||
) -> servers::error::Result<Output> {
|
||||
let interceptor_ref = self.plugins.get::<ScriptInterceptorRef<Error>>();
|
||||
interceptor_ref.pre_execute(name, query_ctx.clone())?;
|
||||
|
||||
let _timer = metrics::EXECUTE_SCRIPT_ELAPSED.start_timer();
|
||||
self.script_executor
|
||||
.execute_script(query_ctx, name, params)
|
||||
.await
|
||||
}
|
||||
}
|
||||
@@ -20,5 +20,6 @@ pub mod heartbeat;
|
||||
pub mod instance;
|
||||
pub(crate) mod limiter;
|
||||
pub(crate) mod metrics;
|
||||
mod script;
|
||||
pub mod server;
|
||||
pub mod service_config;
|
||||
|
||||
@@ -29,6 +29,19 @@ lazy_static! {
|
||||
pub static ref GRPC_HANDLE_PROMQL_ELAPSED: Histogram = GRPC_HANDLE_QUERY_ELAPSED
|
||||
.with_label_values(&["promql"]);
|
||||
|
||||
/// Timer of handling scripts in the script handler.
|
||||
pub static ref HANDLE_SCRIPT_ELAPSED: HistogramVec = register_histogram_vec!(
|
||||
"greptime_frontend_handle_script_elapsed",
|
||||
"Elapsed time of handling scripts in the script handler",
|
||||
&["type"],
|
||||
vec![0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 60.0, 300.0]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref INSERT_SCRIPTS_ELAPSED: Histogram = HANDLE_SCRIPT_ELAPSED
|
||||
.with_label_values(&["insert"]);
|
||||
pub static ref EXECUTE_SCRIPT_ELAPSED: Histogram = HANDLE_SCRIPT_ELAPSED
|
||||
.with_label_values(&["execute"]);
|
||||
|
||||
/// The number of OpenTelemetry metrics send by frontend node.
|
||||
pub static ref OTLP_METRICS_ROWS: IntCounter = register_int_counter!(
|
||||
"greptime_frontend_otlp_metrics_rows",
|
||||
|
||||
294
src/frontend/src/script.rs
Normal file
294
src/frontend/src/script.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_query::Output;
|
||||
use query::QueryEngineRef;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::instance::Instance;
|
||||
|
||||
#[cfg(not(feature = "python"))]
|
||||
mod dummy {
|
||||
use super::*;
|
||||
|
||||
pub struct ScriptExecutor;
|
||||
|
||||
impl ScriptExecutor {
|
||||
pub async fn new(
|
||||
_catalog_manager: CatalogManagerRef,
|
||||
_query_engine: QueryEngineRef,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {})
|
||||
}
|
||||
|
||||
pub fn start(&self, _instance: &Instance) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn insert_script(
|
||||
&self,
|
||||
_query_ctx: QueryContextRef,
|
||||
_name: &str,
|
||||
_script: &str,
|
||||
) -> servers::error::Result<()> {
|
||||
servers::error::NotSupportedSnafu { feat: "script" }.fail()
|
||||
}
|
||||
|
||||
pub async fn execute_script(
|
||||
&self,
|
||||
_query_ctx: QueryContextRef,
|
||||
_name: &str,
|
||||
_params: HashMap<String, String>,
|
||||
) -> servers::error::Result<Output> {
|
||||
servers::error::NotSupportedSnafu { feat: "script" }.fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python")]
|
||||
mod python {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::ddl_request::Expr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::DdlRequest;
|
||||
use arc_swap::ArcSwap;
|
||||
use catalog::RegisterSystemTableRequest;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_telemetry::{error, info};
|
||||
use script::manager::ScriptManager;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::*;
|
||||
use crate::error::{CatalogSnafu, Error, TableNotFoundSnafu};
|
||||
|
||||
type FrontendGrpcQueryHandlerRef = Arc<dyn GrpcQueryHandler<Error = Error> + Send + Sync>;
|
||||
|
||||
/// A placeholder for the real gRPC handler.
|
||||
/// It is temporary and will be replaced soon.
|
||||
struct DummyHandler;
|
||||
|
||||
impl DummyHandler {
|
||||
fn arc() -> Arc<Self> {
|
||||
Arc::new(Self {})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl GrpcQueryHandler for DummyHandler {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(
|
||||
&self,
|
||||
_query: Request,
|
||||
_ctx: QueryContextRef,
|
||||
) -> std::result::Result<Output, Self::Error> {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScriptExecutor {
|
||||
script_manager: ScriptManager<Error>,
|
||||
grpc_handler: ArcSwap<FrontendGrpcQueryHandlerRef>,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl ScriptExecutor {
|
||||
pub async fn new(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
) -> Result<Self> {
|
||||
let grpc_handler = DummyHandler::arc();
|
||||
Ok(Self {
|
||||
grpc_handler: ArcSwap::new(Arc::new(grpc_handler.clone() as _)),
|
||||
script_manager: ScriptManager::new(grpc_handler as _, query_engine)
|
||||
.await
|
||||
.context(crate::error::StartScriptManagerSnafu)?,
|
||||
catalog_manager,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn start(&self, instance: &Instance) -> Result<()> {
|
||||
let handler = Arc::new(instance.clone());
|
||||
self.grpc_handler.store(Arc::new(handler.clone() as _));
|
||||
self.script_manager
|
||||
.start(handler)
|
||||
.context(crate::error::StartScriptManagerSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create scripts table for the specific catalog if it's not exists.
|
||||
/// The function is idempotent and safe to be called more than once for the same catalog
|
||||
async fn create_scripts_table_if_need(&self, catalog: &str) -> Result<()> {
|
||||
let scripts_table = self.script_manager.get_scripts_table(catalog);
|
||||
|
||||
if scripts_table.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let RegisterSystemTableRequest {
|
||||
create_table_expr: expr,
|
||||
open_hook,
|
||||
} = self.script_manager.create_table_request(catalog);
|
||||
|
||||
if let Some(table) = self
|
||||
.catalog_manager
|
||||
.table(
|
||||
&expr.catalog_name,
|
||||
&expr.schema_name,
|
||||
&expr.table_name,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
{
|
||||
if let Some(open_hook) = open_hook {
|
||||
(open_hook)(table.clone()).await.context(CatalogSnafu)?;
|
||||
}
|
||||
|
||||
self.script_manager.insert_scripts_table(catalog, table);
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
|
||||
let _ = self
|
||||
.grpc_handler
|
||||
.load()
|
||||
.do_query(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(Expr::CreateTable(expr)),
|
||||
}),
|
||||
QueryContext::arc(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?;
|
||||
|
||||
if let Some(open_hook) = open_hook {
|
||||
(open_hook)(table.clone()).await.context(CatalogSnafu)?;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Created scripts table {}.",
|
||||
table.table_info().full_table_name()
|
||||
);
|
||||
|
||||
self.script_manager.insert_scripts_table(catalog, table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn insert_script(
|
||||
&self,
|
||||
query_ctx: QueryContextRef,
|
||||
name: &str,
|
||||
script: &str,
|
||||
) -> servers::error::Result<()> {
|
||||
self.create_scripts_table_if_need(query_ctx.current_catalog())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.status_code().should_log_error() {
|
||||
error!(e; "Failed to create scripts table");
|
||||
}
|
||||
|
||||
servers::error::InternalSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
let _s = self
|
||||
.script_manager
|
||||
.insert_and_compile(
|
||||
query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
name,
|
||||
script,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.status_code().should_log_error() {
|
||||
error!(e; "Failed to insert script");
|
||||
}
|
||||
|
||||
BoxedError::new(e)
|
||||
})
|
||||
.context(servers::error::InsertScriptSnafu { name })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn execute_script(
|
||||
&self,
|
||||
query_ctx: QueryContextRef,
|
||||
name: &str,
|
||||
params: HashMap<String, String>,
|
||||
) -> servers::error::Result<Output> {
|
||||
self.create_scripts_table_if_need(query_ctx.current_catalog())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(e; "Failed to create scripts table");
|
||||
servers::error::InternalSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
self.script_manager
|
||||
.execute(
|
||||
query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
name,
|
||||
params,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.status_code().should_log_error() {
|
||||
error!(e; "Failed to execute script");
|
||||
}
|
||||
|
||||
BoxedError::new(e)
|
||||
})
|
||||
.context(servers::error::ExecuteScriptSnafu { name })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "python"))]
|
||||
pub use self::dummy::*;
|
||||
#[cfg(feature = "python")]
|
||||
pub use self::python::*;
|
||||
@@ -78,8 +78,10 @@ where
|
||||
}
|
||||
|
||||
pub fn http_server_builder(&self, opts: &FrontendOptions) -> HttpServerBuilder {
|
||||
let mut builder = HttpServerBuilder::new(opts.http.clone())
|
||||
.with_sql_handler(ServerSqlQueryHandlerAdapter::arc(self.instance.clone()));
|
||||
let mut builder = HttpServerBuilder::new(opts.http.clone()).with_sql_handler(
|
||||
ServerSqlQueryHandlerAdapter::arc(self.instance.clone()),
|
||||
Some(self.instance.clone()),
|
||||
);
|
||||
|
||||
let validator = self.plugins.get::<LogValidatorRef>();
|
||||
let ingest_interceptor = self.plugins.get::<LogIngestInterceptorRef<ServerError>>();
|
||||
|
||||
@@ -22,6 +22,7 @@ fst.workspace = true
|
||||
futures.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
mockall.workspace = true
|
||||
parquet.workspace = true
|
||||
pin-project.workspace = true
|
||||
prost.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
@@ -45,7 +45,7 @@ pub struct BloomFilterMeta {
|
||||
}
|
||||
|
||||
/// The location of the bloom filter segment in the file.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, Hash, PartialEq, Eq)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Hash, PartialEq, Eq)]
|
||||
pub struct BloomFilterSegmentLocation {
|
||||
/// The offset of the bloom filter segment in the file.
|
||||
pub offset: u64,
|
||||
|
||||
@@ -12,12 +12,29 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::ops::Range;
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
|
||||
use parquet::arrow::arrow_reader::RowSelection;
|
||||
use parquet::file::metadata::RowGroupMetaData;
|
||||
|
||||
use crate::bloom_filter::error::Result;
|
||||
use crate::bloom_filter::reader::BloomFilterReader;
|
||||
use crate::bloom_filter::{BloomFilterMeta, Bytes};
|
||||
use crate::bloom_filter::{BloomFilterMeta, BloomFilterSegmentLocation, Bytes};
|
||||
|
||||
/// Enumerates types of predicates for value filtering.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Predicate {
|
||||
/// Predicate for matching values in a list.
|
||||
InList(InListPredicate),
|
||||
}
|
||||
|
||||
/// `InListPredicate` contains a list of acceptable values. A value needs to match at least
|
||||
/// one of the elements (logical OR semantic) for the predicate to be satisfied.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct InListPredicate {
|
||||
/// List of acceptable values.
|
||||
pub list: HashSet<Bytes>,
|
||||
}
|
||||
|
||||
pub struct BloomFilterApplier {
|
||||
reader: Box<dyn BloomFilterReader + Send>,
|
||||
@@ -25,132 +42,92 @@ pub struct BloomFilterApplier {
|
||||
}
|
||||
|
||||
impl BloomFilterApplier {
|
||||
pub async fn new(reader: Box<dyn BloomFilterReader + Send>) -> Result<Self> {
|
||||
pub async fn new(mut reader: Box<dyn BloomFilterReader + Send>) -> Result<Self> {
|
||||
let meta = reader.metadata().await?;
|
||||
|
||||
Ok(Self { reader, meta })
|
||||
}
|
||||
|
||||
/// Searches ranges of rows that match the given probes in the given search range.
|
||||
/// Searches for matching row groups using bloom filters.
|
||||
///
|
||||
/// This method applies bloom filter index to eliminate row groups that definitely
|
||||
/// don't contain the searched values. It works by:
|
||||
///
|
||||
/// 1. Computing prefix sums for row counts
|
||||
/// 2. Calculating bloom filter segment locations for each row group
|
||||
/// 1. A row group may span multiple bloom filter segments
|
||||
/// 3. Probing bloom filter segments
|
||||
/// 4. Removing non-matching row groups from the basement
|
||||
/// 1. If a row group doesn't match any bloom filter segment with any probe, it is removed
|
||||
///
|
||||
/// # Note
|
||||
/// The method modifies the `basement` map in-place by removing row groups that
|
||||
/// don't match the bloom filter criteria.
|
||||
pub async fn search(
|
||||
&mut self,
|
||||
probes: &HashSet<Bytes>,
|
||||
search_range: Range<usize>,
|
||||
) -> Result<Vec<Range<usize>>> {
|
||||
let rows_per_segment = self.meta.rows_per_segment;
|
||||
let start_seg = search_range.start / rows_per_segment;
|
||||
let end_seg = search_range.end.div_ceil(rows_per_segment);
|
||||
row_group_metas: &[RowGroupMetaData],
|
||||
basement: &mut BTreeMap<usize, Option<RowSelection>>,
|
||||
) -> Result<()> {
|
||||
// 0. Fast path - if basement is empty return empty vec
|
||||
if basement.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let locs = &self.meta.bloom_filter_segments[start_seg..end_seg];
|
||||
let bfs = self.reader.bloom_filter_vec(locs).await?;
|
||||
// 1. Compute prefix sum for row counts
|
||||
let mut sum = 0usize;
|
||||
let mut prefix_sum = Vec::with_capacity(row_group_metas.len() + 1);
|
||||
prefix_sum.push(0usize);
|
||||
for meta in row_group_metas {
|
||||
sum += meta.num_rows() as usize;
|
||||
prefix_sum.push(sum);
|
||||
}
|
||||
|
||||
let mut ranges: Vec<Range<usize>> = Vec::with_capacity(end_seg - start_seg);
|
||||
for (seg_id, bloom) in (start_seg..end_seg).zip(bfs) {
|
||||
let start = seg_id * rows_per_segment;
|
||||
for probe in probes {
|
||||
if bloom.contains(probe) {
|
||||
let end = (start + rows_per_segment).min(search_range.end);
|
||||
let start = start.max(search_range.start);
|
||||
// 2. Calculate bloom filter segment locations
|
||||
let mut row_groups_to_remove = HashSet::new();
|
||||
for &row_group_idx in basement.keys() {
|
||||
// TODO(ruihang): support further filter over row selection
|
||||
|
||||
match ranges.last_mut() {
|
||||
Some(last) if last.end == start => {
|
||||
last.end = end;
|
||||
}
|
||||
_ => {
|
||||
ranges.push(start..end);
|
||||
}
|
||||
// todo: dedup & overlap
|
||||
let rows_range_start = prefix_sum[row_group_idx] / self.meta.rows_per_segment;
|
||||
let rows_range_end = (prefix_sum[row_group_idx + 1] as f64
|
||||
/ self.meta.rows_per_segment as f64)
|
||||
.ceil() as usize;
|
||||
|
||||
let mut is_any_range_hit = false;
|
||||
for i in rows_range_start..rows_range_end {
|
||||
// 3. Probe each bloom filter segment
|
||||
let loc = BloomFilterSegmentLocation {
|
||||
offset: self.meta.bloom_filter_segments[i].offset,
|
||||
size: self.meta.bloom_filter_segments[i].size,
|
||||
elem_count: self.meta.bloom_filter_segments[i].elem_count,
|
||||
};
|
||||
let bloom = self.reader.bloom_filter(&loc).await?;
|
||||
|
||||
// Check if any probe exists in bloom filter
|
||||
let mut matches = false;
|
||||
for probe in probes {
|
||||
if bloom.contains(probe) {
|
||||
matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
is_any_range_hit |= matches;
|
||||
if matches {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !is_any_range_hit {
|
||||
row_groups_to_remove.insert(row_group_idx);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ranges)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::io::Cursor;
|
||||
|
||||
use super::*;
|
||||
use crate::bloom_filter::creator::BloomFilterCreator;
|
||||
use crate::bloom_filter::reader::BloomFilterReaderImpl;
|
||||
use crate::external_provider::MockExternalTempFileProvider;
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::single_range_in_vec_init)]
|
||||
async fn test_appliter() {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let mut creator = BloomFilterCreator::new(
|
||||
4,
|
||||
Arc::new(MockExternalTempFileProvider::new()),
|
||||
Arc::new(AtomicUsize::new(0)),
|
||||
None,
|
||||
);
|
||||
|
||||
let rows = vec![
|
||||
vec![b"row00".to_vec(), b"seg00".to_vec(), b"overl".to_vec()],
|
||||
vec![b"row01".to_vec(), b"seg00".to_vec(), b"overl".to_vec()],
|
||||
vec![b"row02".to_vec(), b"seg00".to_vec(), b"overl".to_vec()],
|
||||
vec![b"row03".to_vec(), b"seg00".to_vec(), b"overl".to_vec()],
|
||||
vec![b"row04".to_vec(), b"seg01".to_vec(), b"overl".to_vec()],
|
||||
vec![b"row05".to_vec(), b"seg01".to_vec(), b"overl".to_vec()],
|
||||
vec![b"row06".to_vec(), b"seg01".to_vec(), b"overp".to_vec()],
|
||||
vec![b"row07".to_vec(), b"seg01".to_vec(), b"overp".to_vec()],
|
||||
vec![b"row08".to_vec(), b"seg02".to_vec(), b"overp".to_vec()],
|
||||
vec![b"row09".to_vec(), b"seg02".to_vec(), b"overp".to_vec()],
|
||||
vec![b"row10".to_vec(), b"seg02".to_vec(), b"overp".to_vec()],
|
||||
vec![b"row11".to_vec(), b"seg02".to_vec(), b"overp".to_vec()],
|
||||
];
|
||||
|
||||
let cases = vec![
|
||||
(vec![b"row00".to_vec()], 0..12, vec![0..4]), // search one row in full range
|
||||
(vec![b"row05".to_vec()], 4..8, vec![4..8]), // search one row in partial range
|
||||
(vec![b"row03".to_vec()], 4..8, vec![]), // search for a row that doesn't exist in the partial range
|
||||
(
|
||||
vec![b"row01".to_vec(), b"row06".to_vec()],
|
||||
0..12,
|
||||
vec![0..8],
|
||||
), // search multiple rows in multiple ranges
|
||||
(
|
||||
vec![b"row01".to_vec(), b"row11".to_vec()],
|
||||
0..12,
|
||||
vec![0..4, 8..12],
|
||||
), // search multiple rows in multiple ranges
|
||||
(vec![b"row99".to_vec()], 0..12, vec![]), // search for a row that doesn't exist in the full range
|
||||
(vec![b"row00".to_vec()], 12..12, vec![]), // search in an empty range
|
||||
(
|
||||
vec![b"row04".to_vec(), b"row05".to_vec()],
|
||||
0..12,
|
||||
vec![4..8],
|
||||
), // search multiple rows in same segment
|
||||
(vec![b"seg01".to_vec()], 0..12, vec![4..8]), // search rows in a segment
|
||||
(vec![b"seg01".to_vec()], 6..12, vec![6..8]), // search rows in a segment in partial range
|
||||
(vec![b"overl".to_vec()], 0..12, vec![0..8]), // search rows in multiple segments
|
||||
(vec![b"overl".to_vec()], 2..12, vec![2..8]), // search range starts from the middle of a segment
|
||||
(vec![b"overp".to_vec()], 0..10, vec![4..10]), // search range ends at the middle of a segment
|
||||
];
|
||||
|
||||
for row in rows {
|
||||
creator.push_row_elems(row).await.unwrap();
|
||||
}
|
||||
|
||||
creator.finish(&mut writer).await.unwrap();
|
||||
|
||||
let bytes = writer.into_inner();
|
||||
|
||||
let reader = BloomFilterReaderImpl::new(bytes);
|
||||
|
||||
let mut applier = BloomFilterApplier::new(Box::new(reader)).await.unwrap();
|
||||
|
||||
for (probes, search_range, expected) in cases {
|
||||
let probes: HashSet<Bytes> = probes.into_iter().collect();
|
||||
let ranges = applier.search(&probes, search_range).await.unwrap();
|
||||
assert_eq!(ranges, expected);
|
||||
}
|
||||
// 4. Remove row groups that do not match any bloom filter segment
|
||||
for row_group_idx in row_groups_to_remove {
|
||||
basement.remove(&row_group_idx);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,16 +29,16 @@ use crate::bloom_filter::{BloomFilterMeta, BloomFilterSegmentLocation, SEED};
|
||||
const BLOOM_META_LEN_SIZE: u64 = 4;
|
||||
|
||||
/// Default prefetch size of bloom filter meta.
|
||||
pub const DEFAULT_PREFETCH_SIZE: u64 = 8192; // 8KiB
|
||||
pub const DEFAULT_PREFETCH_SIZE: u64 = 1024; // 1KiB
|
||||
|
||||
/// `BloomFilterReader` reads the bloom filter from the file.
|
||||
#[async_trait]
|
||||
pub trait BloomFilterReader: Sync {
|
||||
pub trait BloomFilterReader {
|
||||
/// Reads range of bytes from the file.
|
||||
async fn range_read(&self, offset: u64, size: u32) -> Result<Bytes>;
|
||||
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes>;
|
||||
|
||||
/// Reads bunch of ranges from the file.
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
let mut results = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
let size = (range.end - range.start) as u32;
|
||||
@@ -49,10 +49,10 @@ pub trait BloomFilterReader: Sync {
|
||||
}
|
||||
|
||||
/// Reads the meta information of the bloom filter.
|
||||
async fn metadata(&self) -> Result<BloomFilterMeta>;
|
||||
async fn metadata(&mut self) -> Result<BloomFilterMeta>;
|
||||
|
||||
/// Reads a bloom filter with the given location.
|
||||
async fn bloom_filter(&self, loc: &BloomFilterSegmentLocation) -> Result<BloomFilter> {
|
||||
async fn bloom_filter(&mut self, loc: &BloomFilterSegmentLocation) -> Result<BloomFilter> {
|
||||
let bytes = self.range_read(loc.offset, loc.size as _).await?;
|
||||
let vec = bytes
|
||||
.chunks_exact(std::mem::size_of::<u64>())
|
||||
@@ -63,31 +63,6 @@ pub trait BloomFilterReader: Sync {
|
||||
.expected_items(loc.elem_count);
|
||||
Ok(bm)
|
||||
}
|
||||
|
||||
async fn bloom_filter_vec(
|
||||
&self,
|
||||
locs: &[BloomFilterSegmentLocation],
|
||||
) -> Result<Vec<BloomFilter>> {
|
||||
let ranges = locs
|
||||
.iter()
|
||||
.map(|l| l.offset..l.offset + l.size)
|
||||
.collect::<Vec<_>>();
|
||||
let bss = self.read_vec(&ranges).await?;
|
||||
|
||||
let mut result = Vec::with_capacity(bss.len());
|
||||
for (bs, loc) in bss.into_iter().zip(locs.iter()) {
|
||||
let vec = bs
|
||||
.chunks_exact(std::mem::size_of::<u64>())
|
||||
.map(|chunk| u64::from_le_bytes(chunk.try_into().unwrap()))
|
||||
.collect();
|
||||
let bm = BloomFilter::from_vec(vec)
|
||||
.seed(&SEED)
|
||||
.expected_items(loc.elem_count);
|
||||
result.push(bm);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// `BloomFilterReaderImpl` reads the bloom filter from the file.
|
||||
@@ -105,23 +80,23 @@ impl<R: RangeReader> BloomFilterReaderImpl<R> {
|
||||
|
||||
#[async_trait]
|
||||
impl<R: RangeReader> BloomFilterReader for BloomFilterReaderImpl<R> {
|
||||
async fn range_read(&self, offset: u64, size: u32) -> Result<Bytes> {
|
||||
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes> {
|
||||
self.reader
|
||||
.read(offset..offset + size as u64)
|
||||
.await
|
||||
.context(IoSnafu)
|
||||
}
|
||||
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
self.reader.read_vec(ranges).await.context(IoSnafu)
|
||||
}
|
||||
|
||||
async fn metadata(&self) -> Result<BloomFilterMeta> {
|
||||
async fn metadata(&mut self) -> Result<BloomFilterMeta> {
|
||||
let metadata = self.reader.metadata().await.context(IoSnafu)?;
|
||||
let file_size = metadata.content_length;
|
||||
|
||||
let mut meta_reader =
|
||||
BloomFilterMetaReader::new(&self.reader, file_size, Some(DEFAULT_PREFETCH_SIZE));
|
||||
BloomFilterMetaReader::new(&mut self.reader, file_size, Some(DEFAULT_PREFETCH_SIZE));
|
||||
meta_reader.metadata().await
|
||||
}
|
||||
}
|
||||
@@ -275,7 +250,7 @@ mod tests {
|
||||
async fn test_bloom_filter_reader() {
|
||||
let bytes = mock_bloom_filter_bytes().await;
|
||||
|
||||
let reader = BloomFilterReaderImpl::new(bytes);
|
||||
let mut reader = BloomFilterReaderImpl::new(bytes);
|
||||
let meta = reader.metadata().await.unwrap();
|
||||
|
||||
assert_eq!(meta.bloom_filter_segments.len(), 2);
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -34,10 +33,10 @@ mod footer;
|
||||
#[async_trait]
|
||||
pub trait InvertedIndexReader: Send + Sync {
|
||||
/// Seeks to given offset and reads data with exact size as provided.
|
||||
async fn range_read(&self, offset: u64, size: u32) -> Result<Vec<u8>>;
|
||||
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>>;
|
||||
|
||||
/// Reads the bytes in the given ranges.
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
let mut result = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
let data = self
|
||||
@@ -49,35 +48,16 @@ pub trait InvertedIndexReader: Send + Sync {
|
||||
}
|
||||
|
||||
/// Retrieves metadata of all inverted indices stored within the blob.
|
||||
async fn metadata(&self) -> Result<Arc<InvertedIndexMetas>>;
|
||||
async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>>;
|
||||
|
||||
/// Retrieves the finite state transducer (FST) map from the given offset and size.
|
||||
async fn fst(&self, offset: u64, size: u32) -> Result<FstMap> {
|
||||
async fn fst(&mut self, offset: u64, size: u32) -> Result<FstMap> {
|
||||
let fst_data = self.range_read(offset, size).await?;
|
||||
FstMap::new(fst_data).context(DecodeFstSnafu)
|
||||
}
|
||||
|
||||
/// Retrieves the multiple finite state transducer (FST) maps from the given ranges.
|
||||
async fn fst_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<FstMap>> {
|
||||
self.read_vec(ranges)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|bytes| FstMap::new(bytes.to_vec()).context(DecodeFstSnafu))
|
||||
.collect::<Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
/// Retrieves the bitmap from the given offset and size.
|
||||
async fn bitmap(&self, offset: u64, size: u32) -> Result<BitVec> {
|
||||
async fn bitmap(&mut self, offset: u64, size: u32) -> Result<BitVec> {
|
||||
self.range_read(offset, size).await.map(BitVec::from_vec)
|
||||
}
|
||||
|
||||
/// Retrieves the multiple bitmaps from the given ranges.
|
||||
async fn bitmap_deque(&mut self, ranges: &[Range<u64>]) -> Result<VecDeque<BitVec>> {
|
||||
Ok(self
|
||||
.read_vec(ranges)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|bytes| BitVec::from_slice(bytes.as_ref()))
|
||||
.collect::<VecDeque<_>>())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ impl<R> InvertedIndexBlobReader<R> {
|
||||
|
||||
#[async_trait]
|
||||
impl<R: RangeReader + Sync> InvertedIndexReader for InvertedIndexBlobReader<R> {
|
||||
async fn range_read(&self, offset: u64, size: u32) -> Result<Vec<u8>> {
|
||||
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> {
|
||||
let buf = self
|
||||
.source
|
||||
.read(offset..offset + size as u64)
|
||||
@@ -61,16 +61,16 @@ impl<R: RangeReader + Sync> InvertedIndexReader for InvertedIndexBlobReader<R> {
|
||||
Ok(buf.into())
|
||||
}
|
||||
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
|
||||
self.source.read_vec(ranges).await.context(CommonIoSnafu)
|
||||
}
|
||||
|
||||
async fn metadata(&self) -> Result<Arc<InvertedIndexMetas>> {
|
||||
async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>> {
|
||||
let metadata = self.source.metadata().await.context(CommonIoSnafu)?;
|
||||
let blob_size = metadata.content_length;
|
||||
Self::validate_blob_size(blob_size)?;
|
||||
|
||||
let mut footer_reader = InvertedIndexFooterReader::new(&self.source, blob_size)
|
||||
let mut footer_reader = InvertedIndexFooterReader::new(&mut self.source, blob_size)
|
||||
.with_prefetch_size(DEFAULT_PREFETCH_SIZE);
|
||||
footer_reader.metadata().await.map(Arc::new)
|
||||
}
|
||||
@@ -160,7 +160,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_inverted_index_blob_reader_metadata() {
|
||||
let blob = create_inverted_index_blob();
|
||||
let blob_reader = InvertedIndexBlobReader::new(blob);
|
||||
let mut blob_reader = InvertedIndexBlobReader::new(blob);
|
||||
|
||||
let metas = blob_reader.metadata().await.unwrap();
|
||||
assert_eq!(metas.metas.len(), 2);
|
||||
@@ -187,7 +187,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_inverted_index_blob_reader_fst() {
|
||||
let blob = create_inverted_index_blob();
|
||||
let blob_reader = InvertedIndexBlobReader::new(blob);
|
||||
let mut blob_reader = InvertedIndexBlobReader::new(blob);
|
||||
|
||||
let metas = blob_reader.metadata().await.unwrap();
|
||||
let meta = metas.metas.get("tag0").unwrap();
|
||||
@@ -219,7 +219,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_inverted_index_blob_reader_bitmap() {
|
||||
let blob = create_inverted_index_blob();
|
||||
let blob_reader = InvertedIndexBlobReader::new(blob);
|
||||
let mut blob_reader = InvertedIndexBlobReader::new(blob);
|
||||
|
||||
let metas = blob_reader.metadata().await.unwrap();
|
||||
let meta = metas.metas.get("tag0").unwrap();
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::inverted_index::error::{
|
||||
};
|
||||
use crate::inverted_index::format::FOOTER_PAYLOAD_SIZE_SIZE;
|
||||
|
||||
pub const DEFAULT_PREFETCH_SIZE: u64 = 8192; // 8KiB
|
||||
pub const DEFAULT_PREFETCH_SIZE: u64 = 1024; // 1KiB
|
||||
|
||||
/// InvertedIndexFooterReader is for reading the footer section of the blob.
|
||||
pub struct InvertedIndexFooterReader<R> {
|
||||
@@ -177,11 +177,11 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let payload_buf = create_test_payload(meta);
|
||||
let mut payload_buf = create_test_payload(meta);
|
||||
let blob_size = payload_buf.len() as u64;
|
||||
|
||||
for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] {
|
||||
let mut reader = InvertedIndexFooterReader::new(&payload_buf, blob_size);
|
||||
let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size);
|
||||
if prefetch > 0 {
|
||||
reader = reader.with_prefetch_size(prefetch);
|
||||
}
|
||||
@@ -205,7 +205,7 @@ mod tests {
|
||||
|
||||
for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] {
|
||||
let blob_size = payload_buf.len() as u64;
|
||||
let mut reader = InvertedIndexFooterReader::new(&payload_buf, blob_size);
|
||||
let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size);
|
||||
if prefetch > 0 {
|
||||
reader = reader.with_prefetch_size(prefetch);
|
||||
}
|
||||
@@ -224,11 +224,11 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let payload_buf = create_test_payload(meta);
|
||||
let mut payload_buf = create_test_payload(meta);
|
||||
let blob_size = payload_buf.len() as u64;
|
||||
|
||||
for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] {
|
||||
let mut reader = InvertedIndexFooterReader::new(&payload_buf, blob_size);
|
||||
let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size);
|
||||
if prefetch > 0 {
|
||||
reader = reader.with_prefetch_size(prefetch);
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let reader = InvertedIndexBlobReader::new(blob);
|
||||
let mut reader = InvertedIndexBlobReader::new(blob);
|
||||
let metadata = reader.metadata().await.unwrap();
|
||||
assert_eq!(metadata.total_row_count, 8);
|
||||
assert_eq!(metadata.segment_row_count, 1);
|
||||
@@ -158,7 +158,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let reader = InvertedIndexBlobReader::new(blob);
|
||||
let mut reader = InvertedIndexBlobReader::new(blob);
|
||||
let metadata = reader.metadata().await.unwrap();
|
||||
assert_eq!(metadata.total_row_count, 8);
|
||||
assert_eq!(metadata.segment_row_count, 1);
|
||||
|
||||
@@ -18,75 +18,55 @@ use greptime_proto::v1::index::InvertedIndexMeta;
|
||||
use crate::inverted_index::error::Result;
|
||||
use crate::inverted_index::format::reader::InvertedIndexReader;
|
||||
|
||||
/// `ParallelFstValuesMapper` enables parallel mapping of multiple FST value groups to their
|
||||
/// corresponding bitmaps within an inverted index.
|
||||
/// `FstValuesMapper` maps FST-encoded u64 values to their corresponding bitmaps
|
||||
/// within an inverted index.
|
||||
///
|
||||
/// This mapper processes multiple groups of FST values in parallel, where each group is associated
|
||||
/// with its own metadata. It optimizes bitmap retrieval by batching requests across all groups
|
||||
/// before combining them into separate result bitmaps.
|
||||
pub struct ParallelFstValuesMapper<'a> {
|
||||
/// The higher 32 bits of each u64 value represent the
|
||||
/// bitmap offset and the lower 32 bits represent its size. This mapper uses these
|
||||
/// combined offset-size pairs to fetch and union multiple bitmaps into a single `BitVec`.
|
||||
pub struct FstValuesMapper<'a> {
|
||||
/// `reader` retrieves bitmap data using offsets and sizes from FST values.
|
||||
reader: &'a mut dyn InvertedIndexReader,
|
||||
|
||||
/// `metadata` provides context for interpreting the index structures.
|
||||
metadata: &'a InvertedIndexMeta,
|
||||
}
|
||||
|
||||
impl<'a> ParallelFstValuesMapper<'a> {
|
||||
pub fn new(reader: &'a mut dyn InvertedIndexReader) -> Self {
|
||||
Self { reader }
|
||||
impl<'a> FstValuesMapper<'a> {
|
||||
pub fn new(
|
||||
reader: &'a mut dyn InvertedIndexReader,
|
||||
metadata: &'a InvertedIndexMeta,
|
||||
) -> FstValuesMapper<'a> {
|
||||
FstValuesMapper { reader, metadata }
|
||||
}
|
||||
|
||||
pub async fn map_values_vec(
|
||||
&mut self,
|
||||
value_and_meta_vec: &[(Vec<u64>, &'a InvertedIndexMeta)],
|
||||
) -> Result<Vec<BitVec>> {
|
||||
let groups = value_and_meta_vec
|
||||
.iter()
|
||||
.map(|(values, _)| values.len())
|
||||
.collect::<Vec<_>>();
|
||||
let len = groups.iter().sum::<usize>();
|
||||
let mut fetch_ranges = Vec::with_capacity(len);
|
||||
/// Maps an array of FST values to a `BitVec` by retrieving and combining bitmaps.
|
||||
pub async fn map_values(&mut self, values: &[u64]) -> Result<BitVec> {
|
||||
let mut bitmap = BitVec::new();
|
||||
|
||||
for (values, meta) in value_and_meta_vec {
|
||||
for value in values {
|
||||
// The higher 32 bits of each u64 value represent the
|
||||
// bitmap offset and the lower 32 bits represent its size. This mapper uses these
|
||||
// combined offset-size pairs to fetch and union multiple bitmaps into a single `BitVec`.
|
||||
let [relative_offset, size] = bytemuck::cast::<u64, [u32; 2]>(*value);
|
||||
fetch_ranges.push(
|
||||
meta.base_offset + relative_offset as u64
|
||||
..meta.base_offset + relative_offset as u64 + size as u64,
|
||||
);
|
||||
for value in values {
|
||||
// relative_offset (higher 32 bits), size (lower 32 bits)
|
||||
let [relative_offset, size] = bytemuck::cast::<u64, [u32; 2]>(*value);
|
||||
|
||||
let bm = self
|
||||
.reader
|
||||
.bitmap(self.metadata.base_offset + relative_offset as u64, size)
|
||||
.await?;
|
||||
|
||||
// Ensure the longest BitVec is the left operand to prevent truncation during OR.
|
||||
if bm.len() > bitmap.len() {
|
||||
bitmap = bm | bitmap
|
||||
} else {
|
||||
bitmap |= bm
|
||||
}
|
||||
}
|
||||
|
||||
if fetch_ranges.is_empty() {
|
||||
return Ok(vec![BitVec::new()]);
|
||||
}
|
||||
|
||||
common_telemetry::debug!("fetch ranges: {:?}", fetch_ranges);
|
||||
let mut bitmaps = self.reader.bitmap_deque(&fetch_ranges).await?;
|
||||
let mut output = Vec::with_capacity(groups.len());
|
||||
|
||||
for counter in groups {
|
||||
let mut bitmap = BitVec::new();
|
||||
for _ in 0..counter {
|
||||
let bm = bitmaps.pop_front().unwrap();
|
||||
if bm.len() > bitmap.len() {
|
||||
bitmap = bm | bitmap
|
||||
} else {
|
||||
bitmap |= bm
|
||||
}
|
||||
}
|
||||
|
||||
output.push(bitmap);
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
Ok(bitmap)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use common_base::bit_vec::prelude::*;
|
||||
|
||||
use super::*;
|
||||
@@ -97,70 +77,38 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_map_values_vec() {
|
||||
async fn test_map_values() {
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader.expect_bitmap_deque().returning(|ranges| {
|
||||
let mut output = VecDeque::new();
|
||||
for range in ranges {
|
||||
let offset = range.start;
|
||||
let size = range.end - range.start;
|
||||
match (offset, size) {
|
||||
(1, 1) => output.push_back(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]),
|
||||
(2, 1) => output.push_back(bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
Ok(output)
|
||||
});
|
||||
mock_reader
|
||||
.expect_bitmap()
|
||||
.returning(|offset, size| match (offset, size) {
|
||||
(1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]),
|
||||
(2, 1) => Ok(bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
|
||||
let meta = InvertedIndexMeta::default();
|
||||
let mut values_mapper = ParallelFstValuesMapper::new(&mut mock_reader);
|
||||
let mut values_mapper = FstValuesMapper::new(&mut mock_reader, &meta);
|
||||
|
||||
let result = values_mapper.map_values(&[]).await.unwrap();
|
||||
assert_eq!(result.count_ones(), 0);
|
||||
|
||||
let result = values_mapper.map_values(&[value(1, 1)]).await.unwrap();
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper.map_values(&[value(2, 1)]).await.unwrap();
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[(vec![], &meta)])
|
||||
.map_values(&[value(1, 1), value(2, 1)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0].count_ones(), 0);
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[(vec![value(1, 1)], &meta)])
|
||||
.map_values(&[value(2, 1), value(1, 1)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0], bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[(vec![value(2, 1)], &meta)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0], bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[(vec![value(1, 1), value(2, 1)], &meta)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0], bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[(vec![value(2, 1), value(1, 1)], &meta)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0], bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[(vec![value(2, 1)], &meta), (vec![value(1, 1)], &meta)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0], bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
|
||||
assert_eq!(result[1], bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values_vec(&[
|
||||
(vec![value(2, 1), value(1, 1)], &meta),
|
||||
(vec![value(1, 1)], &meta),
|
||||
])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result[0], bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
assert_eq!(result[1], bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ use crate::inverted_index::format::reader::InvertedIndexReader;
|
||||
use crate::inverted_index::search::fst_apply::{
|
||||
FstApplier, IntersectionFstApplier, KeysFstApplier,
|
||||
};
|
||||
use crate::inverted_index::search::fst_values_mapper::ParallelFstValuesMapper;
|
||||
use crate::inverted_index::search::fst_values_mapper::FstValuesMapper;
|
||||
use crate::inverted_index::search::index_apply::{
|
||||
ApplyOutput, IndexApplier, IndexNotFoundStrategy, SearchContext,
|
||||
};
|
||||
@@ -57,10 +57,11 @@ impl IndexApplier for PredicatesIndexApplier {
|
||||
|
||||
let mut bitmap = Self::bitmap_full_range(&metadata);
|
||||
// TODO(zhongzc): optimize the order of applying to make it quicker to return empty.
|
||||
let mut appliers = Vec::with_capacity(self.fst_appliers.len());
|
||||
let mut fst_ranges = Vec::with_capacity(self.fst_appliers.len());
|
||||
|
||||
for (name, fst_applier) in &self.fst_appliers {
|
||||
if bitmap.count_ones() == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let Some(meta) = metadata.metas.get(name) else {
|
||||
match context.index_not_found_strategy {
|
||||
IndexNotFoundStrategy::ReturnEmpty => {
|
||||
@@ -74,31 +75,14 @@ impl IndexApplier for PredicatesIndexApplier {
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let fst_offset = meta.base_offset + meta.relative_fst_offset as u64;
|
||||
let fst_size = meta.fst_size as u64;
|
||||
appliers.push((fst_applier, meta));
|
||||
fst_ranges.push(fst_offset..fst_offset + fst_size);
|
||||
}
|
||||
let fst_size = meta.fst_size;
|
||||
let fst = reader.fst(fst_offset, fst_size).await?;
|
||||
let values = fst_applier.apply(&fst);
|
||||
|
||||
if fst_ranges.is_empty() {
|
||||
output.matched_segment_ids = bitmap;
|
||||
return Ok(output);
|
||||
}
|
||||
|
||||
let fsts = reader.fst_vec(&fst_ranges).await?;
|
||||
let value_and_meta_vec = fsts
|
||||
.into_iter()
|
||||
.zip(appliers)
|
||||
.map(|(fst, (fst_applier, meta))| (fst_applier.apply(&fst), meta))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut mapper = ParallelFstValuesMapper::new(reader);
|
||||
let bm_vec = mapper.map_values_vec(&value_and_meta_vec).await?;
|
||||
|
||||
for bm in bm_vec {
|
||||
if bitmap.count_ones() == 0 {
|
||||
break;
|
||||
}
|
||||
let mut mapper = FstValuesMapper::new(&mut *reader, meta);
|
||||
let bm = mapper.map_values(&values).await?;
|
||||
|
||||
bitmap &= bm;
|
||||
}
|
||||
@@ -164,7 +148,6 @@ impl TryFrom<Vec<(String, Vec<Predicate>)>> for PredicatesIndexApplier {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::bit_vec::prelude::*;
|
||||
@@ -221,19 +204,15 @@ mod tests {
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas([("tag-0", 0)])));
|
||||
mock_reader.expect_fst_vec().returning(|_ranges| {
|
||||
Ok(vec![FstMap::from_iter([(
|
||||
b"tag-0_value-0",
|
||||
fst_value(2, 1),
|
||||
)])
|
||||
.unwrap()])
|
||||
});
|
||||
|
||||
mock_reader.expect_bitmap_deque().returning(|range| {
|
||||
assert_eq!(range.len(), 1);
|
||||
assert_eq!(range[0], 2..3);
|
||||
Ok(VecDeque::from([bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]]))
|
||||
mock_reader.expect_fst().returning(|_offset, _size| {
|
||||
Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(2, 1))]).unwrap())
|
||||
});
|
||||
mock_reader
|
||||
.expect_bitmap()
|
||||
.returning(|offset, size| match (offset, size) {
|
||||
(2, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
let output = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
.await
|
||||
@@ -248,12 +227,8 @@ mod tests {
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas([("tag-0", 0)])));
|
||||
mock_reader.expect_fst_vec().returning(|_range| {
|
||||
Ok(vec![FstMap::from_iter([(
|
||||
b"tag-0_value-1",
|
||||
fst_value(2, 1),
|
||||
)])
|
||||
.unwrap()])
|
||||
mock_reader.expect_fst().returning(|_offset, _size| {
|
||||
Ok(FstMap::from_iter([(b"tag-0_value-1", fst_value(2, 1))]).unwrap())
|
||||
});
|
||||
let output = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
@@ -277,33 +252,20 @@ mod tests {
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas([("tag-0", 0), ("tag-1", 1)])));
|
||||
mock_reader.expect_fst_vec().returning(|ranges| {
|
||||
let mut output = vec![];
|
||||
for range in ranges {
|
||||
match range.start {
|
||||
0 => output
|
||||
.push(FstMap::from_iter([(b"tag-0_value-0", fst_value(1, 1))]).unwrap()),
|
||||
1 => output
|
||||
.push(FstMap::from_iter([(b"tag-1_value-a", fst_value(2, 1))]).unwrap()),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
Ok(output)
|
||||
});
|
||||
mock_reader.expect_bitmap_deque().returning(|ranges| {
|
||||
let mut output = VecDeque::new();
|
||||
for range in ranges {
|
||||
let offset = range.start;
|
||||
let size = range.end - range.start;
|
||||
match (offset, size) {
|
||||
(1, 1) => output.push_back(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
|
||||
(2, 1) => output.push_back(bitvec![u8, Lsb0; 1, 1, 0, 1, 1, 0, 1, 1]),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
});
|
||||
mock_reader
|
||||
.expect_fst()
|
||||
.returning(|offset, _size| match offset {
|
||||
0 => Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(1, 1))]).unwrap()),
|
||||
1 => Ok(FstMap::from_iter([(b"tag-1_value-a", fst_value(2, 1))]).unwrap()),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
mock_reader
|
||||
.expect_bitmap()
|
||||
.returning(|offset, size| match (offset, size) {
|
||||
(1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
|
||||
(2, 1) => Ok(bitvec![u8, Lsb0; 1, 1, 0, 1, 1, 0, 1, 1]),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
|
||||
let output = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
|
||||
@@ -644,7 +644,7 @@ mod tests {
|
||||
let dir = create_temp_dir("range2");
|
||||
let backend = build_kv_backend(dir.path().to_str().unwrap().to_string());
|
||||
|
||||
test_kv_range_2(&backend).await;
|
||||
test_kv_range_2(backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -671,7 +671,7 @@ mod tests {
|
||||
let backend = build_kv_backend(dir.path().to_str().unwrap().to_string());
|
||||
prepare_kv(&backend).await;
|
||||
|
||||
test_kv_batch_delete(&backend).await;
|
||||
test_kv_batch_delete(backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -680,7 +680,7 @@ mod tests {
|
||||
let backend = build_kv_backend(dir.path().to_str().unwrap().to_string());
|
||||
prepare_kv(&backend).await;
|
||||
|
||||
test_kv_delete_range(&backend).await;
|
||||
test_kv_delete_range(backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
|
||||
@@ -11,9 +11,6 @@ pg_kvbackend = ["dep:tokio-postgres", "common-meta/pg_kvbackend"]
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[target.'cfg(not(target_os = "android"))'.dependencies]
|
||||
local-ip-address.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait = "0.1"
|
||||
@@ -37,8 +34,6 @@ common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
dashmap.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
@@ -68,7 +63,6 @@ tonic.workspace = true
|
||||
tower.workspace = true
|
||||
typetag.workspace = true
|
||||
url = "2.3"
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -29,8 +29,6 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::info;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use deadpool_postgres::{Config, Runtime};
|
||||
use etcd_client::Client;
|
||||
use futures::future;
|
||||
use servers::configurator::ConfiguratorRef;
|
||||
@@ -50,9 +48,8 @@ use tonic::transport::server::{Router, TcpIncoming};
|
||||
|
||||
use crate::election::etcd::EtcdElection;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use crate::election::postgres::PgElection;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use crate::election::CANDIDATE_LEASE_SECS;
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::error::{InitExportMetricsTaskSnafu, TomlFormatSnafu};
|
||||
use crate::metasrv::builder::MetasrvBuilder;
|
||||
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
@@ -85,14 +82,14 @@ impl MetasrvInstance {
|
||||
let httpsrv = Arc::new(
|
||||
HttpServerBuilder::new(opts.http.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.with_greptime_config_options(opts.to_toml().context(error::TomlFormatSnafu)?)
|
||||
.with_greptime_config_options(opts.to_toml().context(TomlFormatSnafu)?)
|
||||
.build(),
|
||||
);
|
||||
let metasrv = Arc::new(metasrv);
|
||||
// put metasrv into plugins for later use
|
||||
plugins.insert::<Arc<Metasrv>>(metasrv.clone());
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::InitExportMetricsTaskSnafu)?;
|
||||
.context(InitExportMetricsTaskSnafu)?;
|
||||
Ok(MetasrvInstance {
|
||||
metasrv,
|
||||
httpsrv,
|
||||
@@ -107,7 +104,7 @@ impl MetasrvInstance {
|
||||
self.metasrv.try_start().await?;
|
||||
|
||||
if let Some(t) = self.export_metrics_task.as_ref() {
|
||||
t.start(None).context(error::InitExportMetricsTaskSnafu)?
|
||||
t.start(None).context(InitExportMetricsTaskSnafu)?
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel::<()>(1);
|
||||
@@ -228,23 +225,11 @@ pub async fn metasrv_builder(
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
(None, BackendImpl::PostgresStore) => {
|
||||
let pool = create_postgres_pool(opts).await?;
|
||||
// TODO(CookiePie): use table name from config.
|
||||
let kv_backend = PgStore::with_pg_pool(pool, &opts.meta_table_name, opts.max_txn_ops)
|
||||
let pg_client = create_postgres_client(opts).await?;
|
||||
let kv_backend = PgStore::with_pg_client(pg_client)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
// Client for election should be created separately since we need a different session keep-alive idle time.
|
||||
let election_client = create_postgres_client(opts).await?;
|
||||
let election = PgElection::with_pg_client(
|
||||
opts.server_addr.clone(),
|
||||
election_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
CANDIDATE_LEASE_SECS,
|
||||
&opts.meta_table_name,
|
||||
opts.meta_election_lock_id,
|
||||
)
|
||||
.await?;
|
||||
(kv_backend, Some(election))
|
||||
(kv_backend, None)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -290,12 +275,9 @@ async fn create_etcd_client(opts: &MetasrvOptions) -> Result<Client> {
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
async fn create_postgres_client(opts: &MetasrvOptions) -> Result<tokio_postgres::Client> {
|
||||
let postgres_url = opts
|
||||
.store_addrs
|
||||
.first()
|
||||
.context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let postgres_url = opts.store_addrs.first().context(InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let (client, connection) = tokio_postgres::connect(postgres_url, NoTls)
|
||||
.await
|
||||
.context(error::ConnectPostgresSnafu)?;
|
||||
@@ -307,19 +289,3 @@ async fn create_postgres_client(opts: &MetasrvOptions) -> Result<tokio_postgres:
|
||||
});
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
async fn create_postgres_pool(opts: &MetasrvOptions) -> Result<deadpool_postgres::Pool> {
|
||||
let postgres_url = opts
|
||||
.store_addrs
|
||||
.first()
|
||||
.context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(postgres_url.to_string());
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(error::CreatePostgresPoolSnafu)?;
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
@@ -19,9 +19,7 @@ pub mod postgres;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::{info, warn};
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::metasrv::MetasrvNodeInfo;
|
||||
@@ -77,37 +75,6 @@ impl fmt::Display for LeaderChangeMessage {
|
||||
}
|
||||
}
|
||||
|
||||
fn listen_leader_change(leader_value: String) -> Sender<LeaderChangeMessage> {
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(msg) => match msg {
|
||||
LeaderChangeMessage::Elected(key) => {
|
||||
info!(
|
||||
"[{leader_value}] is elected as leader: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
LeaderChangeMessage::StepDown(key) => {
|
||||
warn!(
|
||||
"[{leader_value}] is stepping down: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(RecvError::Lagged(_)) => {
|
||||
warn!("Log printing is too slow or leader changed too fast!");
|
||||
}
|
||||
Err(RecvError::Closed) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
tx
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Election: Send + Sync {
|
||||
type Leader;
|
||||
|
||||
@@ -23,12 +23,13 @@ use etcd_client::{
|
||||
};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
use tokio::time::{timeout, MissedTickBehavior};
|
||||
|
||||
use crate::election::{
|
||||
listen_leader_change, Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT,
|
||||
CANDIDATE_LEASE_SECS, ELECTION_KEY, KEEP_ALIVE_INTERVAL_SECS,
|
||||
Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT, CANDIDATE_LEASE_SECS, ELECTION_KEY,
|
||||
KEEP_ALIVE_INTERVAL_SECS,
|
||||
};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
@@ -87,7 +88,36 @@ impl EtcdElection {
|
||||
E: AsRef<str>,
|
||||
{
|
||||
let leader_value: String = leader_value.as_ref().into();
|
||||
let tx = listen_leader_change(leader_value.clone());
|
||||
|
||||
let leader_ident = leader_value.clone();
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(msg) => match msg {
|
||||
LeaderChangeMessage::Elected(key) => {
|
||||
info!(
|
||||
"[{leader_ident}] is elected as leader: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
LeaderChangeMessage::StepDown(key) => {
|
||||
warn!(
|
||||
"[{leader_ident}] is stepping down: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(RecvError::Lagged(_)) => {
|
||||
warn!("Log printing is too slow or leader changed too fast!");
|
||||
}
|
||||
Err(RecvError::Closed) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Arc::new(Self {
|
||||
leader_value,
|
||||
client,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -704,7 +704,7 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
#[snafu(display("Failed to connect to PostgresSQL"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
@@ -712,23 +712,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to create connection pool for Postgres"))]
|
||||
CreatePostgresPool {
|
||||
#[snafu(source)]
|
||||
error: deadpool_postgres::CreatePoolError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to get connection from Postgres pool: {}", reason))]
|
||||
GetPostgresConnection {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Handler not found: {}", name))]
|
||||
HandlerNotFound {
|
||||
name: String,
|
||||
@@ -860,10 +843,9 @@ impl ErrorExt for Error {
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::LookupPeer { source, .. } => source.status_code(),
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::CreatePostgresPool { .. }
|
||||
| Error::GetPostgresConnection { .. }
|
||||
| Error::PostgresExecution { .. }
|
||||
| Error::ConnectPostgres { .. } => StatusCode::Internal,
|
||||
Error::ConnectPostgres { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::PostgresExecution { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user