mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 05:12:54 +00:00
Compare commits
133 Commits
v0.12.0-ni
...
v0.11.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea0a347edc | ||
|
|
4d70589488 | ||
|
|
428f646fa3 | ||
|
|
1d1bb83a9f | ||
|
|
27918686d6 | ||
|
|
0f55afd167 | ||
|
|
ea02ddcde1 | ||
|
|
0404e2a132 | ||
|
|
7deb559a81 | ||
|
|
c470c6a172 | ||
|
|
efee2480d2 | ||
|
|
42aaf86c26 | ||
|
|
a952ebb2ff | ||
|
|
9a5b904db3 | ||
|
|
2e1a5d811a | ||
|
|
2d5824b3a5 | ||
|
|
5f67f2b58e | ||
|
|
c12fbcda9f | ||
|
|
7fe735009c | ||
|
|
f0298afaf0 | ||
|
|
5175dea6b3 | ||
|
|
7caa88abc7 | ||
|
|
eafb01dfff | ||
|
|
b0de816d3d | ||
|
|
5c6161a95e | ||
|
|
5e3c5945c4 | ||
|
|
f6feac26f5 | ||
|
|
4b2c59e626 | ||
|
|
cf605ecccc | ||
|
|
ab3f9c42f1 | ||
|
|
258fc6f31b | ||
|
|
e2dccc1d1a | ||
|
|
78c5707642 | ||
|
|
204b5e474f | ||
|
|
e9f1fa0b7d | ||
|
|
a988ff5acf | ||
|
|
ef0fca9388 | ||
|
|
b704e7f703 | ||
|
|
3a4c636e29 | ||
|
|
a22e8b421c | ||
|
|
5b42546204 | ||
|
|
0678a31ab1 | ||
|
|
589cc84048 | ||
|
|
ed8c072a5e | ||
|
|
9d172f1cae | ||
|
|
236888313d | ||
|
|
0b97ef0e4f | ||
|
|
316e6a83eb | ||
|
|
6dc57b7a6c | ||
|
|
1f5c2b32e5 | ||
|
|
01e907be40 | ||
|
|
e4dc5ea243 | ||
|
|
3ff5754b5a | ||
|
|
c22ca3ebd5 | ||
|
|
327d165ad9 | ||
|
|
fe63a620ef | ||
|
|
be81f0db5a | ||
|
|
6ca7a305ae | ||
|
|
1111a8bd57 | ||
|
|
66b21b29b5 | ||
|
|
31cfab81ad | ||
|
|
dd3a509607 | ||
|
|
d4cae6af1e | ||
|
|
3fec71b5c0 | ||
|
|
9e31a6478b | ||
|
|
bce291a8e1 | ||
|
|
c788eb67e2 | ||
|
|
0c32dcf46c | ||
|
|
68a05b38bd | ||
|
|
ee72ae8bd0 | ||
|
|
556bd796d8 | ||
|
|
1327e8809f | ||
|
|
17d75c767c | ||
|
|
a1ed450c0c | ||
|
|
ea4ce9d1e3 | ||
|
|
1f7d9666b7 | ||
|
|
9f1a0d78b2 | ||
|
|
ed8e418716 | ||
|
|
9e7121c1bb | ||
|
|
94a49ed4f0 | ||
|
|
f5e743379f | ||
|
|
6735e5867e | ||
|
|
925525726b | ||
|
|
6427682a9a | ||
|
|
55b0022676 | ||
|
|
2d84cc8d87 | ||
|
|
c030705b17 | ||
|
|
443c600bd0 | ||
|
|
39cadfe10b | ||
|
|
9b5e4e80f7 | ||
|
|
041a276b66 | ||
|
|
614a25ddc5 | ||
|
|
4337e20010 | ||
|
|
65c52cc698 | ||
|
|
50f31fd681 | ||
|
|
b5af5aaf8d | ||
|
|
27693c7f1e | ||
|
|
a59fef9ffb | ||
|
|
bcecd8ce52 | ||
|
|
ffdcb8c1ac | ||
|
|
554121ad79 | ||
|
|
43c12b4f2c | ||
|
|
7aa8c28fe4 | ||
|
|
34fbe7739e | ||
|
|
06d7bd99dd | ||
|
|
b71d842615 | ||
|
|
7f71693b8e | ||
|
|
615ea1a171 | ||
|
|
4e725d259d | ||
|
|
dc2252eb6d | ||
|
|
6d4cc2e070 | ||
|
|
6066ce2c4a | ||
|
|
b90d8f7dbd | ||
|
|
fdccf4ff84 | ||
|
|
8b1484c064 | ||
|
|
576e20ac78 | ||
|
|
10b3e3da0f | ||
|
|
4a3ef2d718 | ||
|
|
65eabb2a05 | ||
|
|
bc5a57f51f | ||
|
|
f24b9d8814 | ||
|
|
dd4d0a88ce | ||
|
|
3d2096fe9d | ||
|
|
35715bb710 | ||
|
|
08a3befa67 | ||
|
|
ca1758d4e7 | ||
|
|
42bf818167 | ||
|
|
2c9b117224 | ||
|
|
3edf2317e1 | ||
|
|
85d72a3cd0 | ||
|
|
928172bd82 | ||
|
|
e9f5bddeff | ||
|
|
486755d795 |
3
.github/workflows/dependency-check.yml
vendored
3
.github/workflows/dependency-check.yml
vendored
@@ -1,9 +1,6 @@
|
||||
name: Check Dependencies
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
2
.github/workflows/dev-build.yml
vendored
2
.github/workflows/dev-build.yml
vendored
@@ -29,7 +29,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
138
.github/workflows/develop.yml
vendored
138
.github/workflows/develop.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-2022, ubuntu-20.04 ]
|
||||
os: [ ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -57,6 +57,8 @@ jobs:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Clippy` job
|
||||
shared-key: "check-lint"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Run cargo check
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
@@ -67,11 +69,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-toml"
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
- name: Run taplo
|
||||
@@ -94,13 +91,15 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-binaries"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -142,11 +141,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -200,11 +194,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -255,13 +244,15 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-greptime-ci"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -317,11 +308,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -466,11 +452,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -578,8 +559,8 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait kafka
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -609,11 +590,6 @@ jobs:
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
|
||||
@@ -635,55 +611,99 @@ jobs:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Run cargo clippy
|
||||
run: make clippy
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
conflict-check:
|
||||
name: Check for conflict
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Merge Conflict Finder
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
test:
|
||||
if: github.event_name != 'merge_group'
|
||||
runs-on: ubuntu-24.04-arm
|
||||
timeout-minutes: 60
|
||||
needs: [clippy, fmt]
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: llvm-tools-preview
|
||||
cache: false
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
- name: Docker Cache
|
||||
uses: ScribeMD/docker-cache@0.3.7
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
coverage:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
key: docker-${{ runner.os }}-coverage
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: llvm-tools
|
||||
cache: false
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup postgres server
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
|
||||
2
.github/workflows/nightly-build.yml
vendored
2
.github/workflows/nightly-build.yml
vendored
@@ -27,7 +27,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
|
||||
46
.github/workflows/nightly-ci.yml
vendored
46
.github/workflows/nightly-ci.yml
vendored
@@ -108,7 +108,53 @@ jobs:
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
## this is designed for generating cache that usable for pull requests
|
||||
test-on-linux:
|
||||
name: Run tests on Linux
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
cleanbuild-linux-nix:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
|
||||
18
.github/workflows/release.yml
vendored
18
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
NEXT_RELEASE_VERSION: v0.11.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
@@ -436,6 +436,22 @@ jobs:
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
bump-doc-version:
|
||||
name: Bump doc version
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners]
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump doc version
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-doc-version.ts
|
||||
env:
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
name: Send notification to Greptime team
|
||||
|
||||
182
Cargo.lock
generated
182
Cargo.lock
generated
@@ -188,7 +188,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -773,7 +773,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1314,7 +1314,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1684,7 +1684,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1727,7 +1727,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1736,7 +1736,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1763,7 +1763,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1804,7 +1804,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1864,7 +1864,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1916,7 +1916,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1938,11 +1938,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1965,7 +1965,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -2001,7 +2001,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -2014,7 +2014,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"http 0.2.12",
|
||||
"snafu 0.8.5",
|
||||
@@ -2024,7 +2024,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2034,7 +2034,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"approx 0.5.1",
|
||||
@@ -2078,7 +2078,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2095,7 +2095,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2121,7 +2121,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2140,7 +2140,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2154,7 +2154,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2167,7 +2167,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2192,8 +2192,6 @@ dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datatypes",
|
||||
"deadpool",
|
||||
"deadpool-postgres",
|
||||
"derive_builder 0.12.0",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
@@ -2226,7 +2224,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2235,11 +2233,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2251,7 +2249,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2278,7 +2276,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2286,7 +2284,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2312,7 +2310,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2331,7 +2329,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2361,7 +2359,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2389,7 +2387,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2401,7 +2399,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2419,7 +2417,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2429,7 +2427,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3228,7 +3226,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3279,7 +3277,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3288,7 +3286,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3315,39 +3313,6 @@ dependencies = [
|
||||
"sqlparser_derive 0.1.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb84100978c1c7b37f09ed3ce3e5f843af02c2a2c431bae5b19230dad2c1b490"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"deadpool-runtime",
|
||||
"num_cpus",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool-postgres"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bda39fa1cfff190d8924d447ad04fd22772c250438ca5ce1dfb3c80621c05aaa"
|
||||
dependencies = [
|
||||
"deadpool",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool-runtime"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b"
|
||||
dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "debugid"
|
||||
version = "0.8.0"
|
||||
@@ -3945,7 +3910,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4061,7 +4026,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4120,7 +4085,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -4158,7 +4123,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -5308,7 +5273,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6158,7 +6123,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6170,7 +6135,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6514,7 +6479,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6541,7 +6506,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6566,8 +6531,6 @@ dependencies = [
|
||||
"common-wal",
|
||||
"dashmap",
|
||||
"datatypes",
|
||||
"deadpool",
|
||||
"deadpool-postgres",
|
||||
"derive_builder 0.12.0",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
@@ -6622,7 +6585,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6716,7 +6679,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7453,7 +7416,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7706,7 +7669,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7754,7 +7717,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8004,7 +7967,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8290,7 +8253,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8452,7 +8415,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -8740,7 +8703,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8975,7 +8938,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9100,7 +9063,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9165,7 +9128,7 @@ dependencies = [
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10649,7 +10612,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10941,7 +10904,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -11053,7 +11016,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11407,7 +11370,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11471,7 +11434,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11689,7 +11652,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11851,7 +11814,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -12050,7 +12013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12327,7 +12290,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12370,7 +12333,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12410,6 +12373,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"hex",
|
||||
"itertools 0.10.5",
|
||||
"log-query",
|
||||
"loki-api",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
@@ -12434,7 +12398,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.11.3",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
@@ -68,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.12.0"
|
||||
version = "0.11.3"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -118,8 +118,6 @@ datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
deadpool = "0.10"
|
||||
deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.13"
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -132,10 +132,10 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
@@ -466,10 +466,10 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
|
||||
@@ -475,18 +475,18 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
write_cache_size = "5GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -337,7 +337,7 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
@@ -518,18 +518,18 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
write_cache_size = "5GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
75
cyborg/bin/bump-doc-version.ts
Normal file
75
cyborg/bin/bump-doc-version.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
async function triggerWorkflow(workflowId: string, version: string) {
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
try {
|
||||
await docsClient.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: "docs",
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function determineWorkflow(version: string): [string, string] {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
try {
|
||||
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||
triggerWorkflow(workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing version: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-10-19"
|
||||
components = ["rust-analyzer"]
|
||||
components = ["rust-analyzer", "llvm-tools"]
|
||||
|
||||
@@ -11,11 +11,13 @@ pkgs.mkShell rec {
|
||||
clang
|
||||
gcc
|
||||
protobuf
|
||||
gnumake
|
||||
mold
|
||||
(fenix.fromToolchainFile {
|
||||
dir = ./.;
|
||||
})
|
||||
cargo-nextest
|
||||
cargo-llvm-cov
|
||||
taplo
|
||||
curl
|
||||
];
|
||||
|
||||
@@ -62,11 +62,6 @@ impl Instance {
|
||||
pub fn datanode(&self) -> &Datanode {
|
||||
&self.datanode
|
||||
}
|
||||
|
||||
/// allow customizing datanode for downstream projects
|
||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||
&mut self.datanode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -66,11 +66,6 @@ impl Instance {
|
||||
pub fn flownode(&self) -> &FlownodeInstance {
|
||||
&self.flownode
|
||||
}
|
||||
|
||||
/// allow customizing flownode for downstream projects
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -69,7 +69,7 @@ fn test_load_datanode_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
@@ -203,7 +203,7 @@ fn test_load_standalone_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
|
||||
@@ -725,7 +725,8 @@ struct Tokenizer {
|
||||
impl Tokenizer {
|
||||
pub fn tokenize(mut self, pattern: &str) -> Result<Vec<Token>> {
|
||||
let mut tokens = vec![];
|
||||
while self.cursor < pattern.len() {
|
||||
let char_len = pattern.chars().count();
|
||||
while self.cursor < char_len {
|
||||
// TODO: collect pattern into Vec<char> if this tokenizer is bottleneck in the future
|
||||
let c = pattern.chars().nth(self.cursor).unwrap();
|
||||
match c {
|
||||
@@ -794,7 +795,8 @@ impl Tokenizer {
|
||||
let mut phase = String::new();
|
||||
let mut is_quote_present = false;
|
||||
|
||||
while self.cursor < pattern.len() {
|
||||
let char_len = pattern.chars().count();
|
||||
while self.cursor < char_len {
|
||||
let mut c = pattern.chars().nth(self.cursor).unwrap();
|
||||
|
||||
match c {
|
||||
@@ -899,6 +901,26 @@ mod test {
|
||||
Phase("c".to_string()),
|
||||
],
|
||||
),
|
||||
(
|
||||
r#"中文 测试"#,
|
||||
vec![Phase("中文".to_string()), Phase("测试".to_string())],
|
||||
),
|
||||
(
|
||||
r#"中文 AND 测试"#,
|
||||
vec![Phase("中文".to_string()), And, Phase("测试".to_string())],
|
||||
),
|
||||
(
|
||||
r#"中文 +测试"#,
|
||||
vec![Phase("中文".to_string()), Must, Phase("测试".to_string())],
|
||||
),
|
||||
(
|
||||
r#"中文 -测试"#,
|
||||
vec![
|
||||
Phase("中文".to_string()),
|
||||
Negative,
|
||||
Phase("测试".to_string()),
|
||||
],
|
||||
),
|
||||
];
|
||||
|
||||
for (query, expected) in cases {
|
||||
@@ -1030,6 +1052,61 @@ mod test {
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
r#"中文 测试"#,
|
||||
PatternAst::Binary {
|
||||
op: BinaryOp::Or,
|
||||
children: vec![
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Optional,
|
||||
pattern: "中文".to_string(),
|
||||
},
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Optional,
|
||||
pattern: "测试".to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
r#"中文 AND 测试"#,
|
||||
PatternAst::Binary {
|
||||
op: BinaryOp::And,
|
||||
children: vec![
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Optional,
|
||||
pattern: "中文".to_string(),
|
||||
},
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Optional,
|
||||
pattern: "测试".to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
r#"中文 +测试"#,
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Must,
|
||||
pattern: "测试".to_string(),
|
||||
},
|
||||
),
|
||||
(
|
||||
r#"中文 -测试"#,
|
||||
PatternAst::Binary {
|
||||
op: BinaryOp::And,
|
||||
children: vec![
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Negative,
|
||||
pattern: "测试".to_string(),
|
||||
},
|
||||
PatternAst::Literal {
|
||||
op: UnaryOp::Optional,
|
||||
pattern: "中文".to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for (query, expected) in cases {
|
||||
|
||||
@@ -35,8 +35,6 @@ common-wal.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -667,18 +667,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to create connection pool for Postgres"))]
|
||||
CreatePostgresPool {
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(source)]
|
||||
error: deadpool_postgres::CreatePoolError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to get Postgres connection from pool: {}", reason))]
|
||||
GetPostgresConnection {
|
||||
reason: String,
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
@@ -794,9 +786,9 @@ impl ErrorExt for Error {
|
||||
| EmptyDdlTasks { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
PostgresExecution { .. } | CreatePostgresPool { .. } | GetPostgresConnection { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
PostgresExecution { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
ConnectPostgres { .. } => StatusCode::Internal,
|
||||
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,17 +16,15 @@ use std::any::Any;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deadpool_postgres::{Config, Pool, Runtime};
|
||||
use common_telemetry::error;
|
||||
use snafu::ResultExt;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::NoTls;
|
||||
use tokio_postgres::{Client, NoTls};
|
||||
|
||||
use crate::error::{
|
||||
CreatePostgresPoolSnafu, Error, GetPostgresConnectionSnafu, PostgresExecutionSnafu, Result,
|
||||
StrFromUtf8Snafu,
|
||||
};
|
||||
use super::{KvBackend, TxnService};
|
||||
use crate::error::{ConnectPostgresSnafu, Error, PostgresExecutionSnafu, Result, StrFromUtf8Snafu};
|
||||
use crate::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse};
|
||||
use crate::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
@@ -36,7 +34,8 @@ use crate::rpc::KeyValue;
|
||||
|
||||
/// Posgres backend store for metasrv
|
||||
pub struct PgStore {
|
||||
pool: Pool,
|
||||
// TODO: Consider using sqlx crate.
|
||||
client: Client,
|
||||
}
|
||||
|
||||
const EMPTY: &[u8] = &[0];
|
||||
@@ -95,49 +94,33 @@ SELECT k, v FROM prev;"#;
|
||||
impl PgStore {
|
||||
/// Create pgstore impl of KvBackendRef from url.
|
||||
pub async fn with_url(url: &str) -> Result<KvBackendRef> {
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(url.to_string());
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)?;
|
||||
Self::with_pg_pool(pool).await
|
||||
// TODO: support tls.
|
||||
let (client, conn) = tokio_postgres::connect(url, NoTls)
|
||||
.await
|
||||
.context(ConnectPostgresSnafu)?;
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
error!(e; "connection error");
|
||||
}
|
||||
});
|
||||
Self::with_pg_client(client).await
|
||||
}
|
||||
|
||||
/// Create pgstore impl of KvBackendRef from tokio-postgres client.
|
||||
pub async fn with_pg_pool(pool: Pool) -> Result<KvBackendRef> {
|
||||
pub async fn with_pg_client(client: Client) -> Result<KvBackendRef> {
|
||||
// This step ensures the postgres metadata backend is ready to use.
|
||||
// We check if greptime_metakv table exists, and we will create a new table
|
||||
// if it does not exist.
|
||||
let client = match pool.get().await {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
return GetPostgresConnectionSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
client
|
||||
.execute(METADKV_CREATION, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
Ok(Arc::new(Self { pool }))
|
||||
}
|
||||
|
||||
async fn get_client(&self) -> Result<deadpool::managed::Object<deadpool_postgres::Manager>> {
|
||||
match self.pool.get().await {
|
||||
Ok(client) => Ok(client),
|
||||
Err(e) => GetPostgresConnectionSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
Ok(Arc::new(Self { client }))
|
||||
}
|
||||
|
||||
async fn put_if_not_exists(&self, key: &str, value: &str) -> Result<bool> {
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(PUT_IF_NOT_EXISTS, &[&key, &value])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -276,8 +259,7 @@ impl KvBackend for PgStore {
|
||||
})
|
||||
.collect();
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(&template, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -345,10 +327,8 @@ impl KvBackend for PgStore {
|
||||
in_params.iter().map(|x| x as &(dyn ToSql + Sync)).collect();
|
||||
|
||||
let query = generate_batch_upsert_query(req.kvs.len());
|
||||
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(&query, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -385,10 +365,8 @@ impl KvBackend for PgStore {
|
||||
.iter()
|
||||
.map(|x| x as &(dyn ToSql + Sync))
|
||||
.collect();
|
||||
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(&query, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -431,8 +409,7 @@ impl KvBackend for PgStore {
|
||||
.collect();
|
||||
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(template, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -476,10 +453,8 @@ impl KvBackend for PgStore {
|
||||
.iter()
|
||||
.map(|x| x as &(dyn ToSql + Sync))
|
||||
.collect();
|
||||
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(&query, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -513,8 +488,7 @@ impl KvBackend for PgStore {
|
||||
let expect = process_bytes(&req.expect, "CASExpect")?;
|
||||
|
||||
let res = self
|
||||
.get_client()
|
||||
.await?
|
||||
.client
|
||||
.query(CAS, &[&key, &value, &expect])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -586,19 +560,10 @@ mod tests {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(endpoints);
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)
|
||||
.unwrap();
|
||||
let client = pool.get().await.unwrap();
|
||||
client
|
||||
.execute(METADKV_CREATION, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)
|
||||
.unwrap();
|
||||
Some(PgStore { pool })
|
||||
let (client, connection) = tokio_postgres::connect(&endpoints, NoTls).await.unwrap();
|
||||
tokio::spawn(connection);
|
||||
let _ = client.execute(METADKV_CREATION, &[]).await;
|
||||
Some(PgStore { client })
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -433,8 +433,8 @@ impl DatanodeBuilder {
|
||||
) -> Result<MitoEngine> {
|
||||
if opts.storage.is_object_storage() {
|
||||
// Enable the write cache when setting object storage
|
||||
config.enable_write_cache = true;
|
||||
info!("Configured 'enable_write_cache=true' for mito engine.");
|
||||
config.enable_experimental_write_cache = true;
|
||||
info!("Configured 'enable_experimental_write_cache=true' for mito engine.");
|
||||
}
|
||||
|
||||
let mito_engine = match &opts.wal {
|
||||
|
||||
@@ -123,6 +123,14 @@ impl ColumnSchema {
|
||||
self.default_constraint.as_ref()
|
||||
}
|
||||
|
||||
/// Check if the default constraint is a impure function.
|
||||
pub fn is_default_impure(&self) -> bool {
|
||||
self.default_constraint
|
||||
.as_ref()
|
||||
.map(|c| c.is_function())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn metadata(&self) -> &Metadata {
|
||||
&self.metadata
|
||||
@@ -283,6 +291,15 @@ impl ColumnSchema {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an impure default value for this column, only if it have a impure default constraint.
|
||||
/// Otherwise, returns `Ok(None)`.
|
||||
pub fn create_impure_default(&self) -> Result<Option<Value>> {
|
||||
match &self.default_constraint {
|
||||
Some(c) => c.create_impure_default(&self.data_type),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the fulltext options for the column.
|
||||
pub fn fulltext_options(&self) -> Result<Option<FulltextOptions>> {
|
||||
match self.metadata.get(FULLTEXT_KEY) {
|
||||
|
||||
@@ -178,12 +178,63 @@ impl ColumnDefaultConstraint {
|
||||
}
|
||||
}
|
||||
|
||||
/// Only create default vector if it's impure, i.e., it's a function.
|
||||
///
|
||||
/// This helps to delay creating constant default values to mito engine while also keeps impure default have consistent values
|
||||
pub fn create_impure_default_vector(
|
||||
&self,
|
||||
data_type: &ConcreteDataType,
|
||||
num_rows: usize,
|
||||
) -> Result<Option<VectorRef>> {
|
||||
assert!(num_rows > 0);
|
||||
|
||||
match self {
|
||||
ColumnDefaultConstraint::Function(expr) => {
|
||||
// Functions should also ensure its return value is not null when
|
||||
// is_nullable is true.
|
||||
match &expr[..] {
|
||||
// TODO(dennis): we only supports current_timestamp right now,
|
||||
// it's better to use a expression framework in future.
|
||||
CURRENT_TIMESTAMP | CURRENT_TIMESTAMP_FN | NOW_FN => {
|
||||
create_current_timestamp_vector(data_type, num_rows).map(Some)
|
||||
}
|
||||
_ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
|
||||
}
|
||||
}
|
||||
ColumnDefaultConstraint::Value(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Only create default value if it's impure, i.e., it's a function.
|
||||
///
|
||||
/// This helps to delay creating constant default values to mito engine while also keeps impure default have consistent values
|
||||
pub fn create_impure_default(&self, data_type: &ConcreteDataType) -> Result<Option<Value>> {
|
||||
match self {
|
||||
ColumnDefaultConstraint::Function(expr) => {
|
||||
// Functions should also ensure its return value is not null when
|
||||
// is_nullable is true.
|
||||
match &expr[..] {
|
||||
CURRENT_TIMESTAMP | CURRENT_TIMESTAMP_FN | NOW_FN => {
|
||||
create_current_timestamp(data_type).map(Some)
|
||||
}
|
||||
_ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
|
||||
}
|
||||
}
|
||||
ColumnDefaultConstraint::Value(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this constraint might creates NULL.
|
||||
fn maybe_null(&self) -> bool {
|
||||
// Once we support more functions, we may return true if given function
|
||||
// could return null.
|
||||
matches!(self, ColumnDefaultConstraint::Value(Value::Null))
|
||||
}
|
||||
|
||||
/// Returns true if this constraint is a function.
|
||||
pub fn is_function(&self) -> bool {
|
||||
matches!(self, ColumnDefaultConstraint::Function(_))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_current_timestamp(data_type: &ConcreteDataType) -> Result<Value> {
|
||||
|
||||
@@ -24,6 +24,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
|
||||
use common_meta::node_manager::Flownode;
|
||||
use common_telemetry::{debug, trace};
|
||||
use datatypes::value::Value;
|
||||
use itertools::Itertools;
|
||||
use snafu::{IntoError, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
@@ -178,14 +179,32 @@ impl Flownode for FlowWorkerManager {
|
||||
.table_from_id(&table_id)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let default_vals = table_schema
|
||||
.default_values
|
||||
.iter()
|
||||
.zip(table_schema.relation_desc.typ().column_types.iter())
|
||||
.map(|(v, ty)| {
|
||||
v.as_ref().and_then(|v| {
|
||||
match v.create_default(ty.scalar_type(), ty.nullable()) {
|
||||
Ok(v) => Some(v),
|
||||
Err(err) => {
|
||||
common_telemetry::error!(err; "Failed to create default value");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let table_types = table_schema
|
||||
.relation_desc
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|t| t.scalar_type)
|
||||
.collect_vec();
|
||||
let table_col_names = table_schema.names;
|
||||
let table_col_names = table_schema.relation_desc.names;
|
||||
let table_col_names = table_col_names
|
||||
.iter().enumerate()
|
||||
.map(|(idx,name)| match name {
|
||||
@@ -202,31 +221,35 @@ impl Flownode for FlowWorkerManager {
|
||||
.enumerate()
|
||||
.map(|(i, name)| (&name.column_name, i)),
|
||||
);
|
||||
let fetch_order: Vec<usize> = table_col_names
|
||||
|
||||
let fetch_order: Vec<FetchFromRow> = table_col_names
|
||||
.iter()
|
||||
.map(|col_name| {
|
||||
.zip(default_vals.into_iter())
|
||||
.map(|(col_name, col_default_val)| {
|
||||
name_to_col
|
||||
.get(col_name)
|
||||
.copied()
|
||||
.map(FetchFromRow::Idx)
|
||||
.or_else(|| col_default_val.clone().map(FetchFromRow::Default))
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
err_msg: format!("Column not found: {}", col_name),
|
||||
err_msg: format!(
|
||||
"Column not found: {}, default_value: {:?}",
|
||||
col_name, col_default_val
|
||||
),
|
||||
})
|
||||
})
|
||||
.try_collect()?;
|
||||
if !fetch_order.iter().enumerate().all(|(i, &v)| i == v) {
|
||||
trace!("Reordering columns: {:?}", fetch_order)
|
||||
}
|
||||
|
||||
trace!("Reordering columns: {:?}", fetch_order);
|
||||
(table_types, fetch_order)
|
||||
};
|
||||
|
||||
// TODO(discord9): use column instead of row
|
||||
let rows: Vec<DiffRow> = rows_proto
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let r = repr::Row::from(r);
|
||||
let reordered = fetch_order
|
||||
.iter()
|
||||
.map(|&i| r.inner[i].clone())
|
||||
.collect_vec();
|
||||
let reordered = fetch_order.iter().map(|i| i.fetch(&r)).collect_vec();
|
||||
repr::Row::new(reordered)
|
||||
})
|
||||
.map(|r| (r, now, 1))
|
||||
@@ -258,3 +281,20 @@ impl Flownode for FlowWorkerManager {
|
||||
Ok(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple helper enum for fetching value from row with default value
|
||||
#[derive(Debug, Clone)]
|
||||
enum FetchFromRow {
|
||||
Idx(usize),
|
||||
Default(Value),
|
||||
}
|
||||
|
||||
impl FetchFromRow {
|
||||
/// Panic if idx is out of bound
|
||||
fn fetch(&self, row: &repr::Row) -> Value {
|
||||
match self {
|
||||
FetchFromRow::Idx(idx) => row.get(*idx).unwrap().clone(),
|
||||
FetchFromRow::Default(v) => v.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,7 +313,7 @@ impl FlownodeContext {
|
||||
name: name.join("."),
|
||||
})?;
|
||||
let schema = self.table_source.table(name).await?;
|
||||
Ok((id, schema))
|
||||
Ok((id, schema.relation_desc))
|
||||
}
|
||||
|
||||
/// Assign a global id to a table, if already assigned, return the existing global id
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
||||
use datatypes::schema::ColumnDefaultConstraint;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
@@ -27,6 +29,32 @@ use crate::error::{
|
||||
};
|
||||
use crate::repr::RelationDesc;
|
||||
|
||||
/// Table description, include relation desc and default values, which is the minimal information flow needed for table
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TableDesc {
|
||||
pub relation_desc: RelationDesc,
|
||||
pub default_values: Vec<Option<ColumnDefaultConstraint>>,
|
||||
}
|
||||
|
||||
impl TableDesc {
|
||||
pub fn new(
|
||||
relation_desc: RelationDesc,
|
||||
default_values: Vec<Option<ColumnDefaultConstraint>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
relation_desc,
|
||||
default_values,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_no_default(relation_desc: RelationDesc) -> Self {
|
||||
Self {
|
||||
relation_desc,
|
||||
default_values: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Table source but for flow, provide table schema by table name/id
|
||||
#[async_trait::async_trait]
|
||||
pub trait FlowTableSource: Send + Sync + std::fmt::Debug {
|
||||
@@ -34,11 +62,11 @@ pub trait FlowTableSource: Send + Sync + std::fmt::Debug {
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error>;
|
||||
|
||||
/// Get the table schema by table name
|
||||
async fn table(&self, name: &TableName) -> Result<RelationDesc, Error> {
|
||||
async fn table(&self, name: &TableName) -> Result<TableDesc, Error> {
|
||||
let id = self.table_id_from_name(name).await?;
|
||||
self.table_from_id(&id).await
|
||||
}
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<RelationDesc, Error>;
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<TableDesc, Error>;
|
||||
}
|
||||
|
||||
/// managed table source information, query from table info manager and table name manager
|
||||
@@ -51,7 +79,7 @@ pub struct ManagedTableSource {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl FlowTableSource for ManagedTableSource {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<RelationDesc, Error> {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<TableDesc, Error> {
|
||||
let table_info_value = self
|
||||
.get_table_info_value(table_id)
|
||||
.await?
|
||||
@@ -150,7 +178,7 @@ impl ManagedTableSource {
|
||||
pub async fn get_table_name_schema(
|
||||
&self,
|
||||
table_id: &TableId,
|
||||
) -> Result<(TableName, RelationDesc), Error> {
|
||||
) -> Result<(TableName, TableDesc), Error> {
|
||||
let table_info_value = self
|
||||
.get_table_info_value(table_id)
|
||||
.await?
|
||||
@@ -186,7 +214,7 @@ pub(crate) mod test {
|
||||
use crate::repr::{ColumnType, RelationType};
|
||||
|
||||
pub struct FlowDummyTableSource {
|
||||
pub id_names_to_desc: Vec<(TableId, TableName, RelationDesc)>,
|
||||
pub id_names_to_desc: Vec<(TableId, TableName, TableDesc)>,
|
||||
id_to_idx: HashMap<TableId, usize>,
|
||||
name_to_idx: HashMap<TableName, usize>,
|
||||
}
|
||||
@@ -201,8 +229,10 @@ pub(crate) mod test {
|
||||
"public".to_string(),
|
||||
"numbers".to_string(),
|
||||
],
|
||||
RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)])
|
||||
.into_named(vec![Some("number".to_string())]),
|
||||
TableDesc::new_no_default(
|
||||
RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)])
|
||||
.into_named(vec![Some("number".to_string())]),
|
||||
),
|
||||
),
|
||||
(
|
||||
1025,
|
||||
@@ -211,11 +241,13 @@ pub(crate) mod test {
|
||||
"public".to_string(),
|
||||
"numbers_with_ts".to_string(),
|
||||
],
|
||||
RelationType::new(vec![
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), false),
|
||||
])
|
||||
.into_named(vec![Some("number".to_string()), Some("ts".to_string())]),
|
||||
TableDesc::new_no_default(
|
||||
RelationType::new(vec![
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), false),
|
||||
])
|
||||
.into_named(vec![Some("number".to_string()), Some("ts".to_string())]),
|
||||
),
|
||||
),
|
||||
];
|
||||
let id_to_idx = id_names_to_desc
|
||||
@@ -238,7 +270,7 @@ pub(crate) mod test {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl FlowTableSource for FlowDummyTableSource {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<RelationDesc, Error> {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<TableDesc, Error> {
|
||||
let idx = self.id_to_idx.get(table_id).context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
|
||||
@@ -27,6 +27,7 @@ use session::context::QueryContextBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::adapter::table_source::TableDesc;
|
||||
use crate::adapter::{TableName, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||
@@ -126,7 +127,7 @@ impl FlowWorkerManager {
|
||||
|
||||
pub fn table_info_value_to_relation_desc(
|
||||
table_info_value: TableInfoValue,
|
||||
) -> Result<RelationDesc, Error> {
|
||||
) -> Result<TableDesc, Error> {
|
||||
let raw_schema = table_info_value.table_info.meta.schema;
|
||||
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
|
||||
.column_schemas
|
||||
@@ -147,8 +148,7 @@ pub fn table_info_value_to_relation_desc(
|
||||
let keys = vec![crate::repr::Key::from(key)];
|
||||
|
||||
let time_index = raw_schema.timestamp_index;
|
||||
|
||||
Ok(RelationDesc {
|
||||
let relation_desc = RelationDesc {
|
||||
typ: RelationType {
|
||||
column_types,
|
||||
keys,
|
||||
@@ -157,7 +157,14 @@ pub fn table_info_value_to_relation_desc(
|
||||
auto_columns: vec![],
|
||||
},
|
||||
names: col_names,
|
||||
})
|
||||
};
|
||||
let default_values = raw_schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| c.default_constraint().cloned())
|
||||
.collect_vec();
|
||||
|
||||
Ok(TableDesc::new(relation_desc, default_values))
|
||||
}
|
||||
|
||||
pub fn from_proto_to_data_type(
|
||||
|
||||
@@ -34,8 +34,6 @@ common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
dashmap.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -29,8 +29,6 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::info;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use deadpool_postgres::{Config, Runtime};
|
||||
use etcd_client::Client;
|
||||
use futures::future;
|
||||
use servers::configurator::ConfiguratorRef;
|
||||
@@ -50,9 +48,8 @@ use tonic::transport::server::{Router, TcpIncoming};
|
||||
|
||||
use crate::election::etcd::EtcdElection;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use crate::election::postgres::PgElection;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use crate::election::CANDIDATE_LEASE_SECS;
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::error::{InitExportMetricsTaskSnafu, TomlFormatSnafu};
|
||||
use crate::metasrv::builder::MetasrvBuilder;
|
||||
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
@@ -85,14 +82,14 @@ impl MetasrvInstance {
|
||||
let httpsrv = Arc::new(
|
||||
HttpServerBuilder::new(opts.http.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.with_greptime_config_options(opts.to_toml().context(error::TomlFormatSnafu)?)
|
||||
.with_greptime_config_options(opts.to_toml().context(TomlFormatSnafu)?)
|
||||
.build(),
|
||||
);
|
||||
let metasrv = Arc::new(metasrv);
|
||||
// put metasrv into plugins for later use
|
||||
plugins.insert::<Arc<Metasrv>>(metasrv.clone());
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::InitExportMetricsTaskSnafu)?;
|
||||
.context(InitExportMetricsTaskSnafu)?;
|
||||
Ok(MetasrvInstance {
|
||||
metasrv,
|
||||
httpsrv,
|
||||
@@ -107,7 +104,7 @@ impl MetasrvInstance {
|
||||
self.metasrv.try_start().await?;
|
||||
|
||||
if let Some(t) = self.export_metrics_task.as_ref() {
|
||||
t.start(None).context(error::InitExportMetricsTaskSnafu)?
|
||||
t.start(None).context(InitExportMetricsTaskSnafu)?
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel::<()>(1);
|
||||
@@ -228,20 +225,11 @@ pub async fn metasrv_builder(
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
(None, BackendImpl::PostgresStore) => {
|
||||
let pool = create_postgres_pool(opts).await?;
|
||||
let kv_backend = PgStore::with_pg_pool(pool)
|
||||
let pg_client = create_postgres_client(opts).await?;
|
||||
let kv_backend = PgStore::with_pg_client(pg_client)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
// Client for election should be created separately since we need a different session keep-alive idle time.
|
||||
let election_client = create_postgres_client(opts).await?;
|
||||
let election = PgElection::with_pg_client(
|
||||
opts.server_addr.clone(),
|
||||
election_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
CANDIDATE_LEASE_SECS,
|
||||
)
|
||||
.await?;
|
||||
(kv_backend, Some(election))
|
||||
(kv_backend, None)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -287,12 +275,9 @@ async fn create_etcd_client(opts: &MetasrvOptions) -> Result<Client> {
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
async fn create_postgres_client(opts: &MetasrvOptions) -> Result<tokio_postgres::Client> {
|
||||
let postgres_url = opts
|
||||
.store_addrs
|
||||
.first()
|
||||
.context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let postgres_url = opts.store_addrs.first().context(InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let (client, connection) = tokio_postgres::connect(postgres_url, NoTls)
|
||||
.await
|
||||
.context(error::ConnectPostgresSnafu)?;
|
||||
@@ -304,19 +289,3 @@ async fn create_postgres_client(opts: &MetasrvOptions) -> Result<tokio_postgres:
|
||||
});
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
async fn create_postgres_pool(opts: &MetasrvOptions) -> Result<deadpool_postgres::Pool> {
|
||||
let postgres_url = opts
|
||||
.store_addrs
|
||||
.first()
|
||||
.context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(postgres_url.to_string());
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(error::CreatePostgresPoolSnafu)?;
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
@@ -19,9 +19,7 @@ pub mod postgres;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::{info, warn};
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::metasrv::MetasrvNodeInfo;
|
||||
@@ -77,37 +75,6 @@ impl fmt::Display for LeaderChangeMessage {
|
||||
}
|
||||
}
|
||||
|
||||
fn listen_leader_change(leader_value: String) -> Sender<LeaderChangeMessage> {
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(msg) => match msg {
|
||||
LeaderChangeMessage::Elected(key) => {
|
||||
info!(
|
||||
"[{leader_value}] is elected as leader: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
LeaderChangeMessage::StepDown(key) => {
|
||||
warn!(
|
||||
"[{leader_value}] is stepping down: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(RecvError::Lagged(_)) => {
|
||||
warn!("Log printing is too slow or leader changed too fast!");
|
||||
}
|
||||
Err(RecvError::Closed) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
tx
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Election: Send + Sync {
|
||||
type Leader;
|
||||
|
||||
@@ -23,12 +23,13 @@ use etcd_client::{
|
||||
};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
use tokio::time::{timeout, MissedTickBehavior};
|
||||
|
||||
use crate::election::{
|
||||
listen_leader_change, Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT,
|
||||
CANDIDATE_LEASE_SECS, ELECTION_KEY, KEEP_ALIVE_INTERVAL_SECS,
|
||||
Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT, CANDIDATE_LEASE_SECS, ELECTION_KEY,
|
||||
KEEP_ALIVE_INTERVAL_SECS,
|
||||
};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
@@ -87,7 +88,36 @@ impl EtcdElection {
|
||||
E: AsRef<str>,
|
||||
{
|
||||
let leader_value: String = leader_value.as_ref().into();
|
||||
let tx = listen_leader_change(leader_value.clone());
|
||||
|
||||
let leader_ident = leader_value.clone();
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(msg) => match msg {
|
||||
LeaderChangeMessage::Elected(key) => {
|
||||
info!(
|
||||
"[{leader_ident}] is elected as leader: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
LeaderChangeMessage::StepDown(key) => {
|
||||
warn!(
|
||||
"[{leader_ident}] is stepping down: {:?}, lease: {}",
|
||||
String::from_utf8_lossy(key.name()),
|
||||
key.lease_id()
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(RecvError::Lagged(_)) => {
|
||||
warn!("Log printing is too slow or leader changed too fast!");
|
||||
}
|
||||
Err(RecvError::Closed) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Arc::new(Self {
|
||||
leader_value,
|
||||
client,
|
||||
|
||||
@@ -16,32 +16,18 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_meta::distributed_time_constants::{META_KEEP_ALIVE_INTERVAL_SECS, META_LEASE_SECS};
|
||||
use common_telemetry::{error, warn};
|
||||
use common_time::Timestamp;
|
||||
use itertools::Itertools;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::time::MissedTickBehavior;
|
||||
use tokio_postgres::Client;
|
||||
|
||||
use crate::election::{
|
||||
listen_leader_change, Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT, ELECTION_KEY,
|
||||
};
|
||||
use crate::election::{Election, LeaderChangeMessage, CANDIDATES_ROOT, ELECTION_KEY};
|
||||
use crate::error::{
|
||||
DeserializeFromJsonSnafu, NoLeaderSnafu, PostgresExecutionSnafu, Result, SerializeToJsonSnafu,
|
||||
UnexpectedSnafu,
|
||||
DeserializeFromJsonSnafu, PostgresExecutionSnafu, Result, SerializeToJsonSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metasrv::{ElectionRef, LeaderValue, MetasrvNodeInfo};
|
||||
|
||||
// TODO(CookiePie): The lock id should be configurable.
|
||||
const CAMPAIGN: &str = "SELECT pg_try_advisory_lock(28319)";
|
||||
const STEP_DOWN: &str = "SELECT pg_advisory_unlock(28319)";
|
||||
const SET_IDLE_SESSION_TIMEOUT: &str = "SET idle_in_transaction_session_timeout = $1";
|
||||
// Currently the session timeout is longer than the leader lease time, so the leader lease may expire while the session is still alive.
|
||||
// Either the leader reconnects and step down or the session expires and the lock is released.
|
||||
const IDLE_SESSION_TIMEOUT: &str = "10s";
|
||||
|
||||
// Separator between value and expire time.
|
||||
const LEASE_SEP: &str = r#"||__metadata_lease_sep||"#;
|
||||
|
||||
@@ -95,33 +81,8 @@ fn parse_value_and_expire_time(value: &str) -> Result<(String, Timestamp)> {
|
||||
Ok((value.to_string(), expire_time))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct PgLeaderKey {
|
||||
name: Vec<u8>,
|
||||
key: Vec<u8>,
|
||||
rev: i64,
|
||||
lease: i64,
|
||||
}
|
||||
|
||||
impl LeaderKey for PgLeaderKey {
|
||||
fn name(&self) -> &[u8] {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn key(&self) -> &[u8] {
|
||||
&self.key
|
||||
}
|
||||
|
||||
fn revision(&self) -> i64 {
|
||||
self.rev
|
||||
}
|
||||
|
||||
fn lease_id(&self) -> i64 {
|
||||
self.lease
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSql implementation of Election.
|
||||
/// TODO(CookiePie): Currently only support candidate registration. Add election logic.
|
||||
pub struct PgElection {
|
||||
leader_value: String,
|
||||
client: Client,
|
||||
@@ -139,13 +100,7 @@ impl PgElection {
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
) -> Result<ElectionRef> {
|
||||
// Set idle session timeout to IDLE_SESSION_TIMEOUT to avoid dead advisory lock.
|
||||
client
|
||||
.execute(SET_IDLE_SESSION_TIMEOUT, &[&IDLE_SESSION_TIMEOUT])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
|
||||
let tx = listen_leader_change(leader_value.clone());
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
Ok(Arc::new(Self {
|
||||
leader_value,
|
||||
client,
|
||||
@@ -157,7 +112,7 @@ impl PgElection {
|
||||
}))
|
||||
}
|
||||
|
||||
fn election_key(&self) -> String {
|
||||
fn _election_key(&self) -> String {
|
||||
format!("{}{}", self.store_key_prefix, ELECTION_KEY)
|
||||
}
|
||||
|
||||
@@ -191,14 +146,11 @@ impl Election for PgElection {
|
||||
serde_json::to_string(node_info).with_context(|_| SerializeToJsonSnafu {
|
||||
input: format!("{node_info:?}"),
|
||||
})?;
|
||||
let res = self
|
||||
.put_value_with_lease(&key, &node_info, self.candidate_lease_ttl_secs)
|
||||
.await?;
|
||||
let res = self.put_value_with_lease(&key, &node_info).await?;
|
||||
// May registered before, just update the lease.
|
||||
if !res {
|
||||
self.delete_value(&key).await?;
|
||||
self.put_value_with_lease(&key, &node_info, self.candidate_lease_ttl_secs)
|
||||
.await?;
|
||||
self.put_value_with_lease(&key, &node_info).await?;
|
||||
}
|
||||
|
||||
// Check if the current lease has expired and renew the lease.
|
||||
@@ -245,65 +197,12 @@ impl Election for PgElection {
|
||||
Ok(valid_candidates)
|
||||
}
|
||||
|
||||
/// Attempts to acquire leadership by executing a campaign. This function continuously checks
|
||||
/// if the current instance can become the leader by acquiring an advisory lock in the PostgreSQL database.
|
||||
///
|
||||
/// The function operates in a loop, where it:
|
||||
///
|
||||
/// 1. Waits for a predefined interval before attempting to acquire the lock again.
|
||||
/// 2. Executes the `CAMPAIGN` SQL query to try to acquire the advisory lock.
|
||||
/// 3. Checks the result of the query:
|
||||
/// - If the lock is successfully acquired (result is true), it calls the `leader_action` method
|
||||
/// to perform actions as the leader.
|
||||
/// - If the lock is not acquired (result is false), it calls the `follower_action` method
|
||||
/// to perform actions as a follower.
|
||||
async fn campaign(&self) -> Result<()> {
|
||||
let mut keep_alive_interval =
|
||||
tokio::time::interval(Duration::from_secs(META_KEEP_ALIVE_INTERVAL_SECS));
|
||||
keep_alive_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
||||
|
||||
loop {
|
||||
let res = self
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
if let Some(row) = res.first() {
|
||||
match row.try_get(0) {
|
||||
Ok(true) => self.leader_action().await?,
|
||||
Ok(false) => self.follower_action().await?,
|
||||
Err(_) => {
|
||||
return UnexpectedSnafu {
|
||||
violated: "Failed to get the result of acquiring advisory lock"
|
||||
.to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return UnexpectedSnafu {
|
||||
violated: "Failed to get the result of acquiring advisory lock".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let _ = keep_alive_interval.tick().await;
|
||||
}
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn leader(&self) -> Result<Self::Leader> {
|
||||
if self.is_leader.load(Ordering::Relaxed) {
|
||||
Ok(self.leader_value.as_bytes().into())
|
||||
} else {
|
||||
let key = self.election_key();
|
||||
if let Some((leader, expire_time, current, _)) =
|
||||
self.get_value_with_lease(&key, false).await?
|
||||
{
|
||||
ensure!(expire_time > current, NoLeaderSnafu);
|
||||
Ok(leader.as_bytes().into())
|
||||
} else {
|
||||
NoLeaderSnafu.fail()
|
||||
}
|
||||
}
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn resign(&self) -> Result<()> {
|
||||
@@ -416,17 +315,17 @@ impl PgElection {
|
||||
}
|
||||
|
||||
/// Returns `true` if the insertion is successful
|
||||
async fn put_value_with_lease(
|
||||
&self,
|
||||
key: &str,
|
||||
value: &str,
|
||||
lease_ttl_secs: u64,
|
||||
) -> Result<bool> {
|
||||
async fn put_value_with_lease(&self, key: &str, value: &str) -> Result<bool> {
|
||||
let res = self
|
||||
.client
|
||||
.query(
|
||||
PUT_IF_NOT_EXISTS_WITH_EXPIRE_TIME,
|
||||
&[&key, &value, &LEASE_SEP, &(lease_ttl_secs as f64)],
|
||||
&[
|
||||
&key,
|
||||
&value,
|
||||
&LEASE_SEP,
|
||||
&(self.candidate_lease_ttl_secs as f64),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
@@ -444,177 +343,6 @@ impl PgElection {
|
||||
|
||||
Ok(res.len() == 1)
|
||||
}
|
||||
|
||||
/// Handles the actions of a leader in the election process.
|
||||
///
|
||||
/// This function performs the following checks and actions:
|
||||
///
|
||||
/// - **Case 1**: If the current instance believes it is the leader from the previous term,
|
||||
/// it attempts to renew the lease. It checks if the lease is still valid and either renews it
|
||||
/// or steps down if it has expired.
|
||||
///
|
||||
/// - **Case 1.1**: If the instance is still the leader and the lease is valid, it renews the lease
|
||||
/// by updating the value associated with the election key.
|
||||
/// - **Case 1.2**: If the instance is still the leader but the lease has expired, it logs a warning
|
||||
/// and steps down, initiating a new campaign for leadership.
|
||||
/// - **Case 1.3**: If the instance is not the leader (which is a rare scenario), it logs a warning
|
||||
/// indicating that it still holds the lock and steps down to re-initiate the campaign. This may
|
||||
/// happen if the leader has failed to renew the lease and the session has expired, and recovery
|
||||
/// after a period of time during which other leaders have been elected and stepped down.
|
||||
/// - **Case 1.4**: If no lease information is found, it also steps down and re-initiates the campaign.
|
||||
///
|
||||
/// - **Case 2**: If the current instance is not leader previously, it calls the
|
||||
/// `elected` method as a newly elected leader.
|
||||
async fn leader_action(&self) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
// Case 1
|
||||
if self.is_leader() {
|
||||
match self.get_value_with_lease(&key, true).await? {
|
||||
Some((prev_leader, expire_time, current, prev)) => {
|
||||
match (prev_leader == self.leader_value, expire_time > current) {
|
||||
// Case 1.1
|
||||
(true, true) => {
|
||||
// Safety: prev is Some since we are using `get_value_with_lease` with `true`.
|
||||
let prev = prev.unwrap();
|
||||
self.update_value_with_lease(&key, &prev, &self.leader_value)
|
||||
.await?;
|
||||
}
|
||||
// Case 1.2
|
||||
(true, false) => {
|
||||
warn!("Leader lease expired, now stepping down.");
|
||||
self.step_down().await?;
|
||||
}
|
||||
// Case 1.3
|
||||
(false, _) => {
|
||||
warn!("Leader lease not found, but still hold the lock. Now stepping down.");
|
||||
self.step_down().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Case 1.4
|
||||
None => {
|
||||
warn!("Leader lease not found, but still hold the lock. Now stepping down.");
|
||||
self.step_down().await?;
|
||||
}
|
||||
}
|
||||
// Case 2
|
||||
} else {
|
||||
self.elected().await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles the actions of a follower in the election process.
|
||||
///
|
||||
/// This function performs the following checks and actions:
|
||||
///
|
||||
/// - **Case 1**: If the current instance believes it is the leader from the previous term,
|
||||
/// it steps down without deleting the key.
|
||||
/// - **Case 2**: If the current instance is not the leader but the lease has expired, it raises an error
|
||||
/// to re-initiate the campaign. If the leader failed to renew the lease, its session will expire and the lock
|
||||
/// will be released.
|
||||
/// - **Case 3**: If all checks pass, the function returns without performing any actions.
|
||||
async fn follower_action(&self) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
// Case 1
|
||||
if self.is_leader() {
|
||||
self.step_down_without_lock().await?;
|
||||
}
|
||||
let (_, expire_time, current, _) = self
|
||||
.get_value_with_lease(&key, false)
|
||||
.await?
|
||||
.context(NoLeaderSnafu)?;
|
||||
// Case 2
|
||||
ensure!(expire_time > current, NoLeaderSnafu);
|
||||
// Case 3
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Step down the leader. The leader should delete the key and notify the leader watcher.
|
||||
///
|
||||
/// __DO NOT__ check if the deletion is successful, since the key may be deleted by others elected.
|
||||
///
|
||||
/// ## Caution:
|
||||
/// Should only step down while holding the advisory lock.
|
||||
async fn step_down(&self) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
let leader_key = PgLeaderKey {
|
||||
name: self.leader_value.clone().into_bytes(),
|
||||
key: key.clone().into_bytes(),
|
||||
..Default::default()
|
||||
};
|
||||
if self
|
||||
.is_leader
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
self.delete_value(&key).await?;
|
||||
self.client
|
||||
.query(STEP_DOWN, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
if let Err(e) = self
|
||||
.leader_watcher
|
||||
.send(LeaderChangeMessage::StepDown(Arc::new(leader_key)))
|
||||
{
|
||||
error!(e; "Failed to send leader change message");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Still consider itself as the leader locally but failed to acquire the lock. Step down without deleting the key.
|
||||
async fn step_down_without_lock(&self) -> Result<()> {
|
||||
let key = self.election_key().into_bytes();
|
||||
let leader_key = PgLeaderKey {
|
||||
name: self.leader_value.clone().into_bytes(),
|
||||
key: key.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
if self
|
||||
.is_leader
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
if let Err(e) = self
|
||||
.leader_watcher
|
||||
.send(LeaderChangeMessage::StepDown(Arc::new(leader_key)))
|
||||
{
|
||||
error!(e; "Failed to send leader change message");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Elected as leader. The leader should put the key and notify the leader watcher.
|
||||
/// Caution: Should only elected while holding the advisory lock.
|
||||
async fn elected(&self) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
let leader_key = PgLeaderKey {
|
||||
name: self.leader_value.clone().into_bytes(),
|
||||
key: key.clone().into_bytes(),
|
||||
..Default::default()
|
||||
};
|
||||
self.delete_value(&key).await?;
|
||||
self.put_value_with_lease(&key, &self.leader_value, META_LEASE_SECS)
|
||||
.await?;
|
||||
|
||||
if self
|
||||
.is_leader
|
||||
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
self.leader_infancy.store(true, Ordering::Relaxed);
|
||||
|
||||
if let Err(e) = self
|
||||
.leader_watcher
|
||||
.send(LeaderChangeMessage::Elected(Arc::new(leader_key)))
|
||||
{
|
||||
error!(e; "Failed to send leader change message");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -662,7 +390,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = pg_election
|
||||
.put_value_with_lease(&key, &value, 10)
|
||||
.put_value_with_lease(&key, &value)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res);
|
||||
@@ -690,7 +418,7 @@ mod tests {
|
||||
let key = format!("test_key_{}", i);
|
||||
let value = format!("test_value_{}", i);
|
||||
pg_election
|
||||
.put_value_with_lease(&key, &value, 10)
|
||||
.put_value_with_lease(&key, &value)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -750,7 +478,7 @@ mod tests {
|
||||
handles.push(handle);
|
||||
}
|
||||
// Wait for candidates to registrate themselves and renew their leases at least once.
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
|
||||
let client = create_postgres_client().await.unwrap();
|
||||
|
||||
@@ -788,402 +516,4 @@ mod tests {
|
||||
assert!(res);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_elected_and_step_down() {
|
||||
let leader_value = "test_leader".to_string();
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let client = create_postgres_client().await.unwrap();
|
||||
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let leader_pg_election = PgElection {
|
||||
leader_value: leader_value.clone(),
|
||||
client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs,
|
||||
};
|
||||
|
||||
leader_pg_election.elected().await.unwrap();
|
||||
let (leader, expire_time, current, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(expire_time > current);
|
||||
assert!(leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::Elected(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::Elected"),
|
||||
}
|
||||
|
||||
leader_pg_election.step_down_without_lock().await.unwrap();
|
||||
let (leader, _, _, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(!leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::StepDown(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
|
||||
leader_pg_election.elected().await.unwrap();
|
||||
let (leader, expire_time, current, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(expire_time > current);
|
||||
assert!(leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::Elected(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::Elected"),
|
||||
}
|
||||
|
||||
leader_pg_election.step_down().await.unwrap();
|
||||
let res = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.is_none());
|
||||
assert!(!leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::StepDown(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_leader_action() {
|
||||
let leader_value = "test_leader".to_string();
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
let client = create_postgres_client().await.unwrap();
|
||||
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let leader_pg_election = PgElection {
|
||||
leader_value: leader_value.clone(),
|
||||
client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs,
|
||||
};
|
||||
|
||||
// Step 1: No leader exists, campaign and elected.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
let res: bool = res[0].get(0);
|
||||
assert!(res);
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let (leader, expire_time, current, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(expire_time > current);
|
||||
assert!(leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::Elected(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::Elected"),
|
||||
}
|
||||
|
||||
// Step 2: As a leader, renew the lease.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
let res: bool = res[0].get(0);
|
||||
assert!(res);
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let (leader, new_expire_time, current, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(new_expire_time > current && new_expire_time > expire_time);
|
||||
assert!(leader_pg_election.is_leader());
|
||||
|
||||
// Step 3: Something wrong, the leader lease expired.
|
||||
tokio::time::sleep(Duration::from_secs(META_LEASE_SECS)).await;
|
||||
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
let res: bool = res[0].get(0);
|
||||
assert!(res);
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let res = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.is_none());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::StepDown(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
|
||||
// Step 4: Re-campaign and elected.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
let res: bool = res[0].get(0);
|
||||
assert!(res);
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let (leader, expire_time, current, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(expire_time > current);
|
||||
assert!(leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::Elected(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::Elected"),
|
||||
}
|
||||
|
||||
// Step 5: Something wrong, the leader key is deleted by other followers.
|
||||
leader_pg_election
|
||||
.delete_value(&leader_pg_election.election_key())
|
||||
.await
|
||||
.unwrap();
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let res = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.is_none());
|
||||
assert!(!leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::StepDown(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
|
||||
// Step 6: Re-campaign and elected.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
let res: bool = res[0].get(0);
|
||||
assert!(res);
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let (leader, expire_time, current, _) = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(leader == leader_value);
|
||||
assert!(expire_time > current);
|
||||
assert!(leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::Elected(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::Elected"),
|
||||
}
|
||||
|
||||
// Step 7: Something wrong, the leader key changed by others.
|
||||
let res = leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
let res: bool = res[0].get(0);
|
||||
assert!(res);
|
||||
leader_pg_election
|
||||
.delete_value(&leader_pg_election.election_key())
|
||||
.await
|
||||
.unwrap();
|
||||
leader_pg_election
|
||||
.put_value_with_lease(&leader_pg_election.election_key(), "test", 10)
|
||||
.await
|
||||
.unwrap();
|
||||
leader_pg_election.leader_action().await.unwrap();
|
||||
let res = leader_pg_election
|
||||
.get_value_with_lease(&leader_pg_election.election_key(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.is_none());
|
||||
assert!(!leader_pg_election.is_leader());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::StepDown(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), leader_value);
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
leader_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_follower_action() {
|
||||
let candidate_lease_ttl_secs = 5;
|
||||
|
||||
let follower_client = create_postgres_client().await.unwrap();
|
||||
let (tx, mut rx) = broadcast::channel(100);
|
||||
let follower_pg_election = PgElection {
|
||||
leader_value: "test_follower".to_string(),
|
||||
client: follower_client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs,
|
||||
};
|
||||
|
||||
let leader_client = create_postgres_client().await.unwrap();
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let leader_pg_election = PgElection {
|
||||
leader_value: "test_leader".to_string(),
|
||||
client: leader_client,
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(true),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix: "test_prefix".to_string(),
|
||||
candidate_lease_ttl_secs,
|
||||
};
|
||||
|
||||
leader_pg_election
|
||||
.client
|
||||
.query(CAMPAIGN, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
leader_pg_election.elected().await.unwrap();
|
||||
|
||||
// Step 1: As a follower, the leader exists and the lease is not expired.
|
||||
follower_pg_election.follower_action().await.unwrap();
|
||||
|
||||
// Step 2: As a follower, the leader exists but the lease expired.
|
||||
tokio::time::sleep(Duration::from_secs(META_LEASE_SECS)).await;
|
||||
assert!(follower_pg_election.follower_action().await.is_err());
|
||||
|
||||
// Step 3: As a follower, the leader does not exist.
|
||||
leader_pg_election
|
||||
.delete_value(&leader_pg_election.election_key())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(follower_pg_election.follower_action().await.is_err());
|
||||
|
||||
// Step 4: Follower thinks it's the leader but failed to acquire the lock.
|
||||
follower_pg_election
|
||||
.is_leader
|
||||
.store(true, Ordering::Relaxed);
|
||||
assert!(follower_pg_election.follower_action().await.is_err());
|
||||
|
||||
match rx.recv().await {
|
||||
Ok(LeaderChangeMessage::StepDown(key)) => {
|
||||
assert_eq!(String::from_utf8_lossy(key.name()), "test_follower");
|
||||
assert_eq!(
|
||||
String::from_utf8_lossy(key.key()),
|
||||
follower_pg_election.election_key()
|
||||
);
|
||||
assert_eq!(key.lease_id(), i64::default());
|
||||
assert_eq!(key.revision(), i64::default());
|
||||
}
|
||||
_ => panic!("Expected LeaderChangeMessage::StepDown"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,7 +704,7 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
#[snafu(display("Failed to connect to PostgresSQL"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
@@ -712,23 +712,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to create connection pool for Postgres"))]
|
||||
CreatePostgresPool {
|
||||
#[snafu(source)]
|
||||
error: deadpool_postgres::CreatePoolError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to get connection from Postgres pool: {}", reason))]
|
||||
GetPostgresConnection {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Handler not found: {}", name))]
|
||||
HandlerNotFound {
|
||||
name: String,
|
||||
@@ -860,10 +843,9 @@ impl ErrorExt for Error {
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::LookupPeer { source, .. } => source.status_code(),
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::CreatePostgresPool { .. }
|
||||
| Error::GetPostgresConnection { .. }
|
||||
| Error::PostgresExecution { .. }
|
||||
| Error::ConnectPostgres { .. } => StatusCode::Internal,
|
||||
Error::ConnectPostgres { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::PostgresExecution { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use api::v1::region::compact_request;
|
||||
use api::v1::region::compact_request::Options;
|
||||
use common_base::Plugins;
|
||||
use common_meta::key::SchemaMetadataManagerRef;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
@@ -40,6 +41,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::predicate::Predicate;
|
||||
use task::MAX_PARALLEL_COMPACTION;
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
|
||||
use crate::access_layer::AccessLayerRef;
|
||||
@@ -49,9 +51,9 @@ use crate::compaction::picker::{new_picker, CompactionTask};
|
||||
use crate::compaction::task::CompactionTaskImpl;
|
||||
use crate::config::MitoConfig;
|
||||
use crate::error::{
|
||||
CompactRegionSnafu, Error, GetSchemaMetadataSnafu, RegionClosedSnafu, RegionDroppedSnafu,
|
||||
RegionTruncatedSnafu, RemoteCompactionSnafu, Result, TimeRangePredicateOverflowSnafu,
|
||||
TimeoutSnafu,
|
||||
CompactRegionSnafu, Error, GetSchemaMetadataSnafu, ManualCompactionOverrideSnafu,
|
||||
RegionClosedSnafu, RegionDroppedSnafu, RegionTruncatedSnafu, RemoteCompactionSnafu, Result,
|
||||
TimeRangePredicateOverflowSnafu, TimeoutSnafu,
|
||||
};
|
||||
use crate::metrics::{COMPACTION_STAGE_ELAPSED, INFLIGHT_COMPACTION_COUNT};
|
||||
use crate::read::projection::ProjectionMapper;
|
||||
@@ -85,19 +87,13 @@ pub struct CompactionRequest {
|
||||
pub(crate) manifest_ctx: ManifestContextRef,
|
||||
pub(crate) listener: WorkerListener,
|
||||
pub(crate) schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
pub(crate) max_parallelism: usize,
|
||||
}
|
||||
|
||||
impl CompactionRequest {
|
||||
pub(crate) fn region_id(&self) -> RegionId {
|
||||
self.current_version.metadata.region_id
|
||||
}
|
||||
|
||||
/// Push waiter to the request.
|
||||
pub(crate) fn push_waiter(&mut self, mut waiter: OptionOutputTx) {
|
||||
if let Some(waiter) = waiter.take_inner() {
|
||||
self.waiters.push(waiter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compaction scheduler tracks and manages compaction tasks.
|
||||
@@ -145,10 +141,27 @@ impl CompactionScheduler {
|
||||
waiter: OptionOutputTx,
|
||||
manifest_ctx: &ManifestContextRef,
|
||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
max_parallelism: usize,
|
||||
) -> Result<()> {
|
||||
if let Some(status) = self.region_status.get_mut(®ion_id) {
|
||||
// Region is compacting. Add the waiter to pending list.
|
||||
status.merge_waiter(waiter);
|
||||
match compact_options {
|
||||
Options::Regular(_) => {
|
||||
// Region is compacting. Add the waiter to pending list.
|
||||
status.merge_waiter(waiter);
|
||||
}
|
||||
options @ Options::StrictWindow(_) => {
|
||||
// Incoming compaction request is manually triggered.
|
||||
status.set_pending_request(PendingCompaction {
|
||||
options,
|
||||
waiter,
|
||||
max_parallelism,
|
||||
});
|
||||
info!(
|
||||
"Region {} is compacting, manually compaction will be re-scheduled.",
|
||||
region_id
|
||||
);
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -163,6 +176,7 @@ impl CompactionScheduler {
|
||||
manifest_ctx,
|
||||
self.listener.clone(),
|
||||
schema_metadata_manager,
|
||||
max_parallelism,
|
||||
);
|
||||
self.region_status.insert(region_id, status);
|
||||
let result = self
|
||||
@@ -184,6 +198,35 @@ impl CompactionScheduler {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(pending_request) = std::mem::take(&mut status.pending_request) {
|
||||
let PendingCompaction {
|
||||
options,
|
||||
waiter,
|
||||
max_parallelism,
|
||||
} = pending_request;
|
||||
|
||||
let request = status.new_compaction_request(
|
||||
self.request_sender.clone(),
|
||||
waiter,
|
||||
self.engine_config.clone(),
|
||||
self.cache_manager.clone(),
|
||||
manifest_ctx,
|
||||
self.listener.clone(),
|
||||
schema_metadata_manager,
|
||||
max_parallelism,
|
||||
);
|
||||
|
||||
if let Err(e) = self.schedule_compaction_request(request, options).await {
|
||||
error!(e; "Failed to continue pending manual compaction for region id: {}", region_id);
|
||||
} else {
|
||||
debug!(
|
||||
"Successfully scheduled manual compaction for region id: {}",
|
||||
region_id
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// We should always try to compact the region until picker returns None.
|
||||
let request = status.new_compaction_request(
|
||||
self.request_sender.clone(),
|
||||
@@ -193,6 +236,7 @@ impl CompactionScheduler {
|
||||
manifest_ctx,
|
||||
self.listener.clone(),
|
||||
schema_metadata_manager,
|
||||
MAX_PARALLEL_COMPACTION,
|
||||
);
|
||||
// Try to schedule next compaction task for this region.
|
||||
if let Err(e) = self
|
||||
@@ -264,6 +308,7 @@ impl CompactionScheduler {
|
||||
manifest_ctx,
|
||||
listener,
|
||||
schema_metadata_manager,
|
||||
max_parallelism,
|
||||
} = request;
|
||||
|
||||
let ttl = find_ttl(
|
||||
@@ -294,6 +339,7 @@ impl CompactionScheduler {
|
||||
manifest_ctx: manifest_ctx.clone(),
|
||||
file_purger: None,
|
||||
ttl: Some(ttl),
|
||||
max_parallelism,
|
||||
};
|
||||
|
||||
let picker_output = {
|
||||
@@ -417,27 +463,6 @@ impl Drop for CompactionScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
/// Pending compaction tasks.
|
||||
struct PendingCompaction {
|
||||
waiters: Vec<OutputTx>,
|
||||
}
|
||||
|
||||
impl PendingCompaction {
|
||||
/// Push waiter to the request.
|
||||
fn push_waiter(&mut self, mut waiter: OptionOutputTx) {
|
||||
if let Some(waiter) = waiter.take_inner() {
|
||||
self.waiters.push(waiter);
|
||||
}
|
||||
}
|
||||
|
||||
/// Send compaction error to waiter.
|
||||
fn on_failure(&mut self, region_id: RegionId, err: Arc<Error>) {
|
||||
for waiter in self.waiters.drain(..) {
|
||||
waiter.send(Err(err.clone()).context(CompactRegionSnafu { region_id }));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds TTL of table by first examine table options then database options.
|
||||
async fn find_ttl(
|
||||
table_id: TableId,
|
||||
@@ -471,10 +496,10 @@ struct CompactionStatus {
|
||||
version_control: VersionControlRef,
|
||||
/// Access layer of the region.
|
||||
access_layer: AccessLayerRef,
|
||||
/// Compaction pending to schedule.
|
||||
///
|
||||
/// For simplicity, we merge all pending compaction requests into one.
|
||||
pending_compaction: Option<PendingCompaction>,
|
||||
/// Pending waiters for compaction.
|
||||
waiters: Vec<OutputTx>,
|
||||
/// Pending compactions that are supposed to run as soon as current compaction task finished.
|
||||
pending_request: Option<PendingCompaction>,
|
||||
}
|
||||
|
||||
impl CompactionStatus {
|
||||
@@ -488,23 +513,44 @@ impl CompactionStatus {
|
||||
region_id,
|
||||
version_control,
|
||||
access_layer,
|
||||
pending_compaction: None,
|
||||
waiters: Vec::new(),
|
||||
pending_request: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge the watier to the pending compaction.
|
||||
fn merge_waiter(&mut self, waiter: OptionOutputTx) {
|
||||
let pending = self
|
||||
.pending_compaction
|
||||
.get_or_insert_with(|| PendingCompaction {
|
||||
waiters: Vec::new(),
|
||||
});
|
||||
pending.push_waiter(waiter);
|
||||
/// Merge the waiter to the pending compaction.
|
||||
fn merge_waiter(&mut self, mut waiter: OptionOutputTx) {
|
||||
if let Some(waiter) = waiter.take_inner() {
|
||||
self.waiters.push(waiter);
|
||||
}
|
||||
}
|
||||
|
||||
fn on_failure(self, err: Arc<Error>) {
|
||||
if let Some(mut pending) = self.pending_compaction {
|
||||
pending.on_failure(self.region_id, err.clone());
|
||||
/// Set pending compaction request or replace current value if already exist.
|
||||
fn set_pending_request(&mut self, pending: PendingCompaction) {
|
||||
if let Some(mut prev) = self.pending_request.replace(pending) {
|
||||
debug!(
|
||||
"Replace pending compaction options with new request {:?} for region: {}",
|
||||
prev.options, self.region_id
|
||||
);
|
||||
if let Some(waiter) = prev.waiter.take_inner() {
|
||||
waiter.send(ManualCompactionOverrideSnafu.fail());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_failure(mut self, err: Arc<Error>) {
|
||||
for waiter in self.waiters.drain(..) {
|
||||
waiter.send(Err(err.clone()).context(CompactRegionSnafu {
|
||||
region_id: self.region_id,
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(pending_compaction) = self.pending_request {
|
||||
pending_compaction
|
||||
.waiter
|
||||
.send(Err(err.clone()).context(CompactRegionSnafu {
|
||||
region_id: self.region_id,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,34 +561,36 @@ impl CompactionStatus {
|
||||
fn new_compaction_request(
|
||||
&mut self,
|
||||
request_sender: Sender<WorkerRequest>,
|
||||
waiter: OptionOutputTx,
|
||||
mut waiter: OptionOutputTx,
|
||||
engine_config: Arc<MitoConfig>,
|
||||
cache_manager: CacheManagerRef,
|
||||
manifest_ctx: &ManifestContextRef,
|
||||
listener: WorkerListener,
|
||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
max_parallelism: usize,
|
||||
) -> CompactionRequest {
|
||||
let current_version = CompactionVersion::from(self.version_control.current().version);
|
||||
let start_time = Instant::now();
|
||||
let mut req = CompactionRequest {
|
||||
let mut waiters = Vec::with_capacity(self.waiters.len() + 1);
|
||||
waiters.extend(std::mem::take(&mut self.waiters));
|
||||
|
||||
if let Some(waiter) = waiter.take_inner() {
|
||||
waiters.push(waiter);
|
||||
}
|
||||
|
||||
CompactionRequest {
|
||||
engine_config,
|
||||
current_version,
|
||||
access_layer: self.access_layer.clone(),
|
||||
request_sender: request_sender.clone(),
|
||||
waiters: Vec::new(),
|
||||
waiters,
|
||||
start_time,
|
||||
cache_manager,
|
||||
manifest_ctx: manifest_ctx.clone(),
|
||||
listener,
|
||||
schema_metadata_manager,
|
||||
};
|
||||
|
||||
if let Some(pending) = self.pending_compaction.take() {
|
||||
req.waiters = pending.waiters;
|
||||
max_parallelism,
|
||||
}
|
||||
req.push_waiter(waiter);
|
||||
|
||||
req
|
||||
}
|
||||
}
|
||||
|
||||
@@ -680,8 +728,20 @@ fn get_expired_ssts(
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Pending compaction request that is supposed to run after current task is finished,
|
||||
/// typically used for manual compactions.
|
||||
struct PendingCompaction {
|
||||
/// Compaction options. Currently, it can only be [StrictWindow].
|
||||
pub(crate) options: compact_request::Options,
|
||||
/// Waiters of pending requests.
|
||||
pub(crate) waiter: OptionOutputTx,
|
||||
/// Max parallelism for pending compaction.
|
||||
pub(crate) max_parallelism: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::region::StrictWindow;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use super::*;
|
||||
@@ -722,6 +782,7 @@ mod tests {
|
||||
waiter,
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager.clone(),
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -742,6 +803,7 @@ mod tests {
|
||||
waiter,
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager,
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -752,6 +814,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_schedule_on_finished() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let job_scheduler = Arc::new(VecScheduler::default());
|
||||
let env = SchedulerEnv::new().await.scheduler(job_scheduler.clone());
|
||||
let (tx, _rx) = mpsc::channel(4);
|
||||
@@ -795,6 +858,7 @@ mod tests {
|
||||
OptionOutputTx::none(),
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager.clone(),
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -816,6 +880,119 @@ mod tests {
|
||||
purger.clone(),
|
||||
);
|
||||
// The task is pending.
|
||||
let (tx, _rx) = oneshot::channel();
|
||||
scheduler
|
||||
.schedule_compaction(
|
||||
region_id,
|
||||
compact_request::Options::Regular(Default::default()),
|
||||
&version_control,
|
||||
&env.access_layer,
|
||||
OptionOutputTx::new(Some(OutputTx::new(tx))),
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager.clone(),
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(1, scheduler.region_status.len());
|
||||
assert_eq!(1, job_scheduler.num_jobs());
|
||||
assert!(!scheduler
|
||||
.region_status
|
||||
.get(&builder.region_id())
|
||||
.unwrap()
|
||||
.waiters
|
||||
.is_empty());
|
||||
|
||||
// On compaction finished and schedule next compaction.
|
||||
scheduler
|
||||
.on_compaction_finished(region_id, &manifest_ctx, schema_metadata_manager.clone())
|
||||
.await;
|
||||
assert_eq!(1, scheduler.region_status.len());
|
||||
assert_eq!(2, job_scheduler.num_jobs());
|
||||
|
||||
// 5 files for next compaction.
|
||||
apply_edit(
|
||||
&version_control,
|
||||
&[(0, end), (20, end), (40, end), (60, end), (80, end)],
|
||||
&[],
|
||||
purger.clone(),
|
||||
);
|
||||
let (tx, _rx) = oneshot::channel();
|
||||
// The task is pending.
|
||||
scheduler
|
||||
.schedule_compaction(
|
||||
region_id,
|
||||
compact_request::Options::Regular(Default::default()),
|
||||
&version_control,
|
||||
&env.access_layer,
|
||||
OptionOutputTx::new(Some(OutputTx::new(tx))),
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager,
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(2, job_scheduler.num_jobs());
|
||||
assert!(!scheduler
|
||||
.region_status
|
||||
.get(&builder.region_id())
|
||||
.unwrap()
|
||||
.waiters
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_manual_compaction_when_compaction_in_progress() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let job_scheduler = Arc::new(VecScheduler::default());
|
||||
let env = SchedulerEnv::new().await.scheduler(job_scheduler.clone());
|
||||
let (tx, _rx) = mpsc::channel(4);
|
||||
let mut scheduler = env.mock_compaction_scheduler(tx);
|
||||
let mut builder = VersionControlBuilder::new();
|
||||
let purger = builder.file_purger();
|
||||
let region_id = builder.region_id();
|
||||
|
||||
let (schema_metadata_manager, kv_backend) = mock_schema_metadata_manager();
|
||||
schema_metadata_manager
|
||||
.register_region_table_info(
|
||||
builder.region_id().table_id(),
|
||||
"test_table",
|
||||
"test_catalog",
|
||||
"test_schema",
|
||||
None,
|
||||
kv_backend,
|
||||
)
|
||||
.await;
|
||||
|
||||
// 5 files to compact.
|
||||
let end = 1000 * 1000;
|
||||
let version_control = Arc::new(
|
||||
builder
|
||||
.push_l0_file(0, end)
|
||||
.push_l0_file(10, end)
|
||||
.push_l0_file(50, end)
|
||||
.push_l0_file(80, end)
|
||||
.push_l0_file(90, end)
|
||||
.build(),
|
||||
);
|
||||
let manifest_ctx = env
|
||||
.mock_manifest_context(version_control.current().version.metadata.clone())
|
||||
.await;
|
||||
|
||||
let file_metas: Vec<_> = version_control.current().version.ssts.levels()[0]
|
||||
.files
|
||||
.values()
|
||||
.map(|file| file.meta_ref().clone())
|
||||
.collect();
|
||||
|
||||
// 5 files for next compaction and removes old files.
|
||||
apply_edit(
|
||||
&version_control,
|
||||
&[(0, end), (20, end), (40, end), (60, end), (80, end)],
|
||||
&file_metas,
|
||||
purger.clone(),
|
||||
);
|
||||
|
||||
scheduler
|
||||
.schedule_compaction(
|
||||
region_id,
|
||||
@@ -825,17 +1002,40 @@ mod tests {
|
||||
OptionOutputTx::none(),
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager.clone(),
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
// Should schedule 1 compaction.
|
||||
assert_eq!(1, scheduler.region_status.len());
|
||||
assert_eq!(1, job_scheduler.num_jobs());
|
||||
assert!(scheduler
|
||||
.region_status
|
||||
.get(&builder.region_id())
|
||||
.get(®ion_id)
|
||||
.unwrap()
|
||||
.pending_compaction
|
||||
.is_some());
|
||||
.pending_request
|
||||
.is_none());
|
||||
|
||||
// Schedule another manual compaction.
|
||||
let (tx, _rx) = oneshot::channel();
|
||||
scheduler
|
||||
.schedule_compaction(
|
||||
region_id,
|
||||
compact_request::Options::StrictWindow(StrictWindow { window_seconds: 60 }),
|
||||
&version_control,
|
||||
&env.access_layer,
|
||||
OptionOutputTx::new(Some(OutputTx::new(tx))),
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager.clone(),
|
||||
1,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(1, scheduler.region_status.len());
|
||||
// Current job num should be 1 since compaction is in progress.
|
||||
assert_eq!(1, job_scheduler.num_jobs());
|
||||
let status = scheduler.region_status.get(&builder.region_id()).unwrap();
|
||||
assert!(status.pending_request.is_some());
|
||||
|
||||
// On compaction finished and schedule next compaction.
|
||||
scheduler
|
||||
@@ -843,32 +1043,8 @@ mod tests {
|
||||
.await;
|
||||
assert_eq!(1, scheduler.region_status.len());
|
||||
assert_eq!(2, job_scheduler.num_jobs());
|
||||
// 5 files for next compaction.
|
||||
apply_edit(
|
||||
&version_control,
|
||||
&[(0, end), (20, end), (40, end), (60, end), (80, end)],
|
||||
&[],
|
||||
purger.clone(),
|
||||
);
|
||||
// The task is pending.
|
||||
scheduler
|
||||
.schedule_compaction(
|
||||
region_id,
|
||||
compact_request::Options::Regular(Default::default()),
|
||||
&version_control,
|
||||
&env.access_layer,
|
||||
OptionOutputTx::none(),
|
||||
&manifest_ctx,
|
||||
schema_metadata_manager,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(2, job_scheduler.num_jobs());
|
||||
assert!(scheduler
|
||||
.region_status
|
||||
.get(&builder.region_id())
|
||||
.unwrap()
|
||||
.pending_compaction
|
||||
.is_some());
|
||||
|
||||
let status = scheduler.region_status.get(&builder.region_id()).unwrap();
|
||||
assert!(status.pending_request.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,6 +91,12 @@ pub struct CompactionRegion {
|
||||
pub(crate) current_version: CompactionVersion,
|
||||
pub(crate) file_purger: Option<Arc<LocalFilePurger>>,
|
||||
pub(crate) ttl: Option<TimeToLive>,
|
||||
|
||||
/// Controls the parallelism of this compaction task. Default is 1.
|
||||
///
|
||||
/// The parallel is inside this compaction task, not across different compaction tasks.
|
||||
/// It can be different windows of the same compaction task or something like this.
|
||||
pub max_parallelism: usize,
|
||||
}
|
||||
|
||||
/// OpenCompactionRegionRequest represents the request to open a compaction region.
|
||||
@@ -99,6 +105,7 @@ pub struct OpenCompactionRegionRequest {
|
||||
pub region_id: RegionId,
|
||||
pub region_dir: String,
|
||||
pub region_options: RegionOptions,
|
||||
pub max_parallelism: usize,
|
||||
}
|
||||
|
||||
/// Open a compaction region from a compaction request.
|
||||
@@ -205,6 +212,7 @@ pub async fn open_compaction_region(
|
||||
current_version,
|
||||
file_purger: Some(file_purger),
|
||||
ttl: Some(ttl),
|
||||
max_parallelism: req.max_parallelism,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -266,6 +274,7 @@ impl Compactor for DefaultCompactor {
|
||||
let mut futs = Vec::with_capacity(picker_output.outputs.len());
|
||||
let mut compacted_inputs =
|
||||
Vec::with_capacity(picker_output.outputs.iter().map(|o| o.inputs.len()).sum());
|
||||
let internal_parallelism = compaction_region.max_parallelism.max(1);
|
||||
|
||||
for output in picker_output.outputs.drain(..) {
|
||||
compacted_inputs.extend(output.inputs.iter().map(|f| f.meta_ref().clone()));
|
||||
@@ -358,9 +367,8 @@ impl Compactor for DefaultCompactor {
|
||||
}
|
||||
let mut output_files = Vec::with_capacity(futs.len());
|
||||
while !futs.is_empty() {
|
||||
let mut task_chunk =
|
||||
Vec::with_capacity(crate::compaction::task::MAX_PARALLEL_COMPACTION);
|
||||
for _ in 0..crate::compaction::task::MAX_PARALLEL_COMPACTION {
|
||||
let mut task_chunk = Vec::with_capacity(internal_parallelism);
|
||||
for _ in 0..internal_parallelism {
|
||||
if let Some(task) = futs.pop() {
|
||||
task_chunk.push(common_runtime::spawn_compact(task));
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ use crate::request::{
|
||||
use crate::worker::WorkerListener;
|
||||
|
||||
/// Maximum number of compaction tasks in parallel.
|
||||
pub const MAX_PARALLEL_COMPACTION: usize = 8;
|
||||
pub const MAX_PARALLEL_COMPACTION: usize = 1;
|
||||
|
||||
pub(crate) struct CompactionTaskImpl {
|
||||
pub compaction_region: CompactionRegion,
|
||||
|
||||
@@ -93,15 +93,15 @@ pub struct MitoConfig {
|
||||
pub page_cache_size: ReadableSize,
|
||||
/// Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
pub selector_result_cache_size: ReadableSize,
|
||||
/// Whether to enable the write cache.
|
||||
pub enable_write_cache: bool,
|
||||
/// Whether to enable the experimental write cache.
|
||||
pub enable_experimental_write_cache: bool,
|
||||
/// File system path for write cache dir's root, defaults to `{data_home}`.
|
||||
pub write_cache_path: String,
|
||||
pub experimental_write_cache_path: String,
|
||||
/// Capacity for write cache.
|
||||
pub write_cache_size: ReadableSize,
|
||||
pub experimental_write_cache_size: ReadableSize,
|
||||
/// TTL for write cache.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub write_cache_ttl: Option<Duration>,
|
||||
pub experimental_write_cache_ttl: Option<Duration>,
|
||||
|
||||
// Other configs:
|
||||
/// Buffer size for SST writing.
|
||||
@@ -147,10 +147,10 @@ impl Default for MitoConfig {
|
||||
vector_cache_size: ReadableSize::mb(512),
|
||||
page_cache_size: ReadableSize::mb(512),
|
||||
selector_result_cache_size: ReadableSize::mb(512),
|
||||
enable_write_cache: false,
|
||||
write_cache_path: String::new(),
|
||||
write_cache_size: ReadableSize::gb(5),
|
||||
write_cache_ttl: None,
|
||||
enable_experimental_write_cache: false,
|
||||
experimental_write_cache_path: String::new(),
|
||||
experimental_write_cache_size: ReadableSize::gb(5),
|
||||
experimental_write_cache_ttl: None,
|
||||
sst_write_buffer_size: DEFAULT_WRITE_BUFFER_SIZE,
|
||||
parallel_scan_channel_size: DEFAULT_SCAN_CHANNEL_SIZE,
|
||||
allow_stale_entries: false,
|
||||
@@ -234,8 +234,8 @@ impl MitoConfig {
|
||||
}
|
||||
|
||||
// Sets write cache path if it is empty.
|
||||
if self.write_cache_path.trim().is_empty() {
|
||||
self.write_cache_path = data_home.to_string();
|
||||
if self.experimental_write_cache_path.trim().is_empty() {
|
||||
self.experimental_write_cache_path = data_home.to_string();
|
||||
}
|
||||
|
||||
self.index.sanitize(data_home, &self.inverted_index)?;
|
||||
@@ -268,7 +268,7 @@ impl MitoConfig {
|
||||
self.selector_result_cache_size = mem_cache_size;
|
||||
}
|
||||
|
||||
/// Enable write cache.
|
||||
/// Enable experimental write cache.
|
||||
#[cfg(test)]
|
||||
pub fn enable_write_cache(
|
||||
mut self,
|
||||
@@ -276,10 +276,10 @@ impl MitoConfig {
|
||||
size: ReadableSize,
|
||||
ttl: Option<Duration>,
|
||||
) -> Self {
|
||||
self.enable_write_cache = true;
|
||||
self.write_cache_path = path;
|
||||
self.write_cache_size = size;
|
||||
self.write_cache_ttl = ttl;
|
||||
self.enable_experimental_write_cache = true;
|
||||
self.experimental_write_cache_path = path;
|
||||
self.experimental_write_cache_size = size;
|
||||
self.experimental_write_cache_ttl = ttl;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,16 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::{ColumnSchema, Rows};
|
||||
use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::TimestampMillisecondVector;
|
||||
use store_api::region_engine::{RegionEngine, RegionRole};
|
||||
use store_api::region_request::AlterKind::SetRegionOptions;
|
||||
use store_api::region_request::{
|
||||
RegionCompactRequest, RegionDeleteRequest, RegionFlushRequest, RegionRequest,
|
||||
RegionAlterRequest, RegionCompactRequest, RegionDeleteRequest, RegionFlushRequest,
|
||||
RegionOpenRequest, RegionRequest, SetRegionOption,
|
||||
};
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
use tokio::sync::Notify;
|
||||
@@ -466,3 +470,219 @@ async fn test_compaction_update_time_window() {
|
||||
let vec = collect_stream_ts(stream).await;
|
||||
assert_eq!((0..4000).map(|v| v * 1000).collect::<Vec<_>>(), vec);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_change_region_compaction_window() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
|
||||
env.get_schema_metadata_manager()
|
||||
.register_region_table_info(
|
||||
region_id.table_id(),
|
||||
"test_table",
|
||||
"test_catalog",
|
||||
"test_schema",
|
||||
None,
|
||||
env.get_kv_backend(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let request = CreateRequestBuilder::new()
|
||||
.insert_option("compaction.type", "twcs")
|
||||
.insert_option("compaction.twcs.max_active_window_runs", "1")
|
||||
.insert_option("compaction.twcs.max_active_window_files", "1")
|
||||
.insert_option("compaction.twcs.max_inactive_window_runs", "1")
|
||||
.insert_option("compaction.twcs.max_inactive_window_files", "1")
|
||||
.build();
|
||||
let region_dir = request.region_dir.clone();
|
||||
let column_schemas = request
|
||||
.column_metadatas
|
||||
.iter()
|
||||
.map(column_metadata_to_column_schema)
|
||||
.collect::<Vec<_>>();
|
||||
engine
|
||||
.handle_request(region_id, RegionRequest::Create(request))
|
||||
.await
|
||||
.unwrap();
|
||||
// Flush 2 SSTs for compaction.
|
||||
put_and_flush(&engine, region_id, &column_schemas, 0..1200).await; // window 3600
|
||||
put_and_flush(&engine, region_id, &column_schemas, 1200..2400).await; // window 3600
|
||||
|
||||
engine
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Compact(RegionCompactRequest::default()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Put window 7200
|
||||
put_and_flush(&engine, region_id, &column_schemas, 4000..5000).await; // window 3600
|
||||
|
||||
// Check compaction window.
|
||||
let region = engine.get_region(region_id).unwrap();
|
||||
{
|
||||
let version = region.version();
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(3600)),
|
||||
version.compaction_time_window,
|
||||
);
|
||||
assert!(version.options.compaction.time_window().is_none());
|
||||
}
|
||||
|
||||
// Change compaction window.
|
||||
let request = RegionRequest::Alter(RegionAlterRequest {
|
||||
schema_version: region.metadata().schema_version,
|
||||
kind: SetRegionOptions {
|
||||
options: vec![SetRegionOption::Twsc(
|
||||
"compaction.twcs.time_window".to_string(),
|
||||
"2h".to_string(),
|
||||
)],
|
||||
},
|
||||
});
|
||||
engine.handle_request(region_id, request).await.unwrap();
|
||||
|
||||
// Compaction again. It should compacts window 3600 and 7200
|
||||
// into 7200.
|
||||
engine
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Compact(RegionCompactRequest::default()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
// Check compaction window.
|
||||
{
|
||||
let region = engine.get_region(region_id).unwrap();
|
||||
let version = region.version();
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(7200)),
|
||||
version.compaction_time_window,
|
||||
);
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(7200)),
|
||||
version.options.compaction.time_window()
|
||||
);
|
||||
}
|
||||
|
||||
// Reopen region.
|
||||
let engine = env.reopen_engine(engine, MitoConfig::default()).await;
|
||||
engine
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Open(RegionOpenRequest {
|
||||
engine: String::new(),
|
||||
region_dir,
|
||||
options: Default::default(),
|
||||
skip_wal_replay: false,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
// Check compaction window.
|
||||
{
|
||||
let region = engine.get_region(region_id).unwrap();
|
||||
let version = region.version();
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(7200)),
|
||||
version.compaction_time_window,
|
||||
);
|
||||
// We open the region without options, so the time window should be None.
|
||||
assert!(version.options.compaction.time_window().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open_overwrite_compaction_window() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
|
||||
env.get_schema_metadata_manager()
|
||||
.register_region_table_info(
|
||||
region_id.table_id(),
|
||||
"test_table",
|
||||
"test_catalog",
|
||||
"test_schema",
|
||||
None,
|
||||
env.get_kv_backend(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let request = CreateRequestBuilder::new()
|
||||
.insert_option("compaction.type", "twcs")
|
||||
.insert_option("compaction.twcs.max_active_window_runs", "1")
|
||||
.insert_option("compaction.twcs.max_active_window_files", "1")
|
||||
.insert_option("compaction.twcs.max_inactive_window_runs", "1")
|
||||
.insert_option("compaction.twcs.max_inactive_window_files", "1")
|
||||
.build();
|
||||
let region_dir = request.region_dir.clone();
|
||||
let column_schemas = request
|
||||
.column_metadatas
|
||||
.iter()
|
||||
.map(column_metadata_to_column_schema)
|
||||
.collect::<Vec<_>>();
|
||||
engine
|
||||
.handle_request(region_id, RegionRequest::Create(request))
|
||||
.await
|
||||
.unwrap();
|
||||
// Flush 2 SSTs for compaction.
|
||||
put_and_flush(&engine, region_id, &column_schemas, 0..1200).await; // window 3600
|
||||
put_and_flush(&engine, region_id, &column_schemas, 1200..2400).await; // window 3600
|
||||
|
||||
engine
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Compact(RegionCompactRequest::default()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Check compaction window.
|
||||
{
|
||||
let region = engine.get_region(region_id).unwrap();
|
||||
let version = region.version();
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(3600)),
|
||||
version.compaction_time_window,
|
||||
);
|
||||
assert!(version.options.compaction.time_window().is_none());
|
||||
}
|
||||
|
||||
// Reopen region.
|
||||
let options = HashMap::from([
|
||||
("compaction.type".to_string(), "twcs".to_string()),
|
||||
("compaction.twcs.time_window".to_string(), "2h".to_string()),
|
||||
]);
|
||||
let engine = env.reopen_engine(engine, MitoConfig::default()).await;
|
||||
engine
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Open(RegionOpenRequest {
|
||||
engine: String::new(),
|
||||
region_dir,
|
||||
options,
|
||||
skip_wal_replay: false,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
// Check compaction window.
|
||||
{
|
||||
let region = engine.get_region(region_id).unwrap();
|
||||
let version = region.version();
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(7200)),
|
||||
version.compaction_time_window,
|
||||
);
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(7200)),
|
||||
version.options.compaction.time_window()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ async fn test_edit_region_fill_cache() {
|
||||
.create_engine_with(
|
||||
MitoConfig {
|
||||
// Write cache must be enabled to download the ingested SST file.
|
||||
enable_write_cache: true,
|
||||
enable_experimental_write_cache: true,
|
||||
..Default::default()
|
||||
},
|
||||
None,
|
||||
|
||||
@@ -464,6 +464,7 @@ async fn test_open_compaction_region() {
|
||||
region_id,
|
||||
region_dir: region_dir.clone(),
|
||||
region_options: RegionOptions::default(),
|
||||
max_parallelism: 1,
|
||||
};
|
||||
|
||||
let compaction_region = open_compaction_region(
|
||||
|
||||
@@ -925,6 +925,23 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Unexpected impure default value with region_id: {}, column: {}, default_value: {}",
|
||||
region_id,
|
||||
column,
|
||||
default_value
|
||||
))]
|
||||
UnexpectedImpureDefault {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
region_id: RegionId,
|
||||
column: String,
|
||||
default_value: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Manual compaction is override by following operations."))]
|
||||
ManualCompactionOverride {},
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
@@ -964,7 +981,8 @@ impl ErrorExt for Error {
|
||||
| InvalidParquet { .. }
|
||||
| OperateAbortedIndex { .. }
|
||||
| UnexpectedReplay { .. }
|
||||
| IndexEncodeNull { .. } => StatusCode::Unexpected,
|
||||
| IndexEncodeNull { .. }
|
||||
| UnexpectedImpureDefault { .. } => StatusCode::Unexpected,
|
||||
RegionNotFound { .. } => StatusCode::RegionNotFound,
|
||||
ObjectStoreNotFound { .. }
|
||||
| InvalidScanIndex { .. }
|
||||
@@ -1067,6 +1085,8 @@ impl ErrorExt for Error {
|
||||
PushBloomFilterValue { source, .. } | BloomFilterFinish { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
ManualCompactionOverride {} => StatusCode::Cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::info;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::storage::SequenceNumber;
|
||||
|
||||
@@ -253,7 +254,10 @@ pub(crate) struct Version {
|
||||
///
|
||||
/// Used to check if it is a flush task during the truncating table.
|
||||
pub(crate) truncated_entry_id: Option<EntryId>,
|
||||
/// Inferred compaction time window.
|
||||
/// Inferred compaction time window from flush.
|
||||
///
|
||||
/// If compaction options contain a time window, it will overwrite this value
|
||||
/// when creating a new version from the [VersionBuilder].
|
||||
pub(crate) compaction_time_window: Option<Duration>,
|
||||
/// Options of the region.
|
||||
pub(crate) options: RegionOptions,
|
||||
@@ -389,7 +393,24 @@ impl VersionBuilder {
|
||||
}
|
||||
|
||||
/// Builds a new [Version] from the builder.
|
||||
/// It overwrites the window size by compaction option.
|
||||
pub(crate) fn build(self) -> Version {
|
||||
let compaction_time_window = self
|
||||
.options
|
||||
.compaction
|
||||
.time_window()
|
||||
.or(self.compaction_time_window);
|
||||
if self.compaction_time_window.is_some()
|
||||
&& compaction_time_window != self.compaction_time_window
|
||||
{
|
||||
info!(
|
||||
"VersionBuilder overwrites region compaction time window from {:?} to {:?}, region: {}",
|
||||
self.compaction_time_window,
|
||||
compaction_time_window,
|
||||
self.metadata.region_id
|
||||
);
|
||||
}
|
||||
|
||||
Version {
|
||||
metadata: self.metadata,
|
||||
memtables: self.memtables,
|
||||
@@ -397,7 +418,7 @@ impl VersionBuilder {
|
||||
flushed_entry_id: self.flushed_entry_id,
|
||||
flushed_sequence: self.flushed_sequence,
|
||||
truncated_entry_id: self.truncated_entry_id,
|
||||
compaction_time_window: self.compaction_time_window,
|
||||
compaction_time_window,
|
||||
options: self.options,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ use tokio::sync::oneshot::{self, Receiver, Sender};
|
||||
|
||||
use crate::error::{
|
||||
CompactRegionSnafu, ConvertColumnDataTypeSnafu, CreateDefaultSnafu, Error, FillDefaultSnafu,
|
||||
FlushRegionSnafu, InvalidRequestSnafu, Result,
|
||||
FlushRegionSnafu, InvalidRequestSnafu, Result, UnexpectedImpureDefaultSnafu,
|
||||
};
|
||||
use crate::manifest::action::RegionEdit;
|
||||
use crate::memtable::MemtableId;
|
||||
@@ -333,6 +333,14 @@ impl WriteRequest {
|
||||
}
|
||||
OpType::Put => {
|
||||
// For put requests, we use the default value from column schema.
|
||||
if column.column_schema.is_default_impure() {
|
||||
UnexpectedImpureDefaultSnafu {
|
||||
region_id: self.region_id,
|
||||
column: &column.column_schema.name,
|
||||
default_value: format!("{:?}", column.column_schema.default_constraint()),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
column
|
||||
.column_schema
|
||||
.create_default()
|
||||
@@ -1039,6 +1047,57 @@ mod tests {
|
||||
check_invalid_request(&err, r#"unknown columns: ["k1"]"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fill_impure_columns_err() {
|
||||
let rows = Rows {
|
||||
schema: vec![new_column_schema(
|
||||
"k0",
|
||||
ColumnDataType::Int64,
|
||||
SemanticType::Tag,
|
||||
)],
|
||||
rows: vec![Row {
|
||||
values: vec![i64_value(1)],
|
||||
}],
|
||||
};
|
||||
let metadata = {
|
||||
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
|
||||
builder
|
||||
.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_default_constraint(Some(ColumnDefaultConstraint::Function(
|
||||
"now()".to_string(),
|
||||
)))
|
||||
.unwrap(),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 1,
|
||||
})
|
||||
.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"k0",
|
||||
ConcreteDataType::int64_datatype(),
|
||||
true,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 2,
|
||||
})
|
||||
.primary_key(vec![2]);
|
||||
builder.build().unwrap()
|
||||
};
|
||||
|
||||
let mut request = WriteRequest::new(RegionId::new(1, 1), OpType::Put, rows).unwrap();
|
||||
let err = request.check_schema(&metadata).unwrap_err();
|
||||
assert!(err.is_fill_default());
|
||||
assert!(request
|
||||
.fill_missing_columns(&metadata)
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Unexpected impure default value with region_id"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fill_missing_columns() {
|
||||
let rows = Rows {
|
||||
|
||||
@@ -365,20 +365,23 @@ async fn write_cache_from_config(
|
||||
puffin_manager_factory: PuffinManagerFactory,
|
||||
intermediate_manager: IntermediateManager,
|
||||
) -> Result<Option<WriteCacheRef>> {
|
||||
if !config.enable_write_cache {
|
||||
if !config.enable_experimental_write_cache {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
tokio::fs::create_dir_all(Path::new(&config.write_cache_path))
|
||||
// TODO(yingwen): Remove this and document the config once the write cache is ready.
|
||||
warn!("Write cache is an experimental feature");
|
||||
|
||||
tokio::fs::create_dir_all(Path::new(&config.experimental_write_cache_path))
|
||||
.await
|
||||
.context(CreateDirSnafu {
|
||||
dir: &config.write_cache_path,
|
||||
dir: &config.experimental_write_cache_path,
|
||||
})?;
|
||||
|
||||
let cache = WriteCache::new_fs(
|
||||
&config.write_cache_path,
|
||||
config.write_cache_size,
|
||||
config.write_cache_ttl,
|
||||
&config.experimental_write_cache_path,
|
||||
config.experimental_write_cache_size,
|
||||
config.experimental_write_cache_ttl,
|
||||
puffin_manager_factory,
|
||||
intermediate_manager,
|
||||
)
|
||||
|
||||
@@ -45,6 +45,8 @@ impl<S> RegionWorkerLoop<S> {
|
||||
sender,
|
||||
®ion.manifest_ctx,
|
||||
self.schema_metadata_manager.clone(),
|
||||
// TODO(yingwen): expose this to frontend
|
||||
1,
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -113,6 +115,7 @@ impl<S> RegionWorkerLoop<S> {
|
||||
OptionOutputTx::none(),
|
||||
®ion.manifest_ctx,
|
||||
self.schema_metadata_manager.clone(),
|
||||
1,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -47,6 +47,7 @@ use store_api::metric_engine_consts::{
|
||||
};
|
||||
use store_api::mito_engine_options::{APPEND_MODE_KEY, MERGE_MODE_KEY};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::TableInfo;
|
||||
use table::requests::{InsertRequest as TableInsertRequest, AUTO_CREATE_TABLE_KEY, TTL_KEY};
|
||||
use table::table_reference::TableReference;
|
||||
use table::TableRef;
|
||||
@@ -58,7 +59,9 @@ use crate::error::{
|
||||
use crate::expr_factory::CreateExprFactory;
|
||||
use crate::region_req_factory::RegionRequestFactory;
|
||||
use crate::req_convert::common::preprocess_row_insert_requests;
|
||||
use crate::req_convert::insert::{ColumnToRow, RowToRegion, StatementToRegion, TableToRegion};
|
||||
use crate::req_convert::insert::{
|
||||
fill_reqs_with_impure_default, ColumnToRow, RowToRegion, StatementToRegion, TableToRegion,
|
||||
};
|
||||
use crate::statement::StatementExecutor;
|
||||
|
||||
pub struct Inserter {
|
||||
@@ -200,18 +203,26 @@ impl Inserter {
|
||||
});
|
||||
validate_column_count_match(&requests)?;
|
||||
|
||||
let (table_name_to_ids, instant_table_ids) = self
|
||||
let CreateAlterTableResult {
|
||||
instant_table_ids,
|
||||
table_infos,
|
||||
} = self
|
||||
.create_or_alter_tables_on_demand(&requests, &ctx, create_type, statement_executor)
|
||||
.await?;
|
||||
|
||||
let name_to_info = table_infos
|
||||
.values()
|
||||
.map(|info| (info.name.clone(), info.clone()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let inserts = RowToRegion::new(
|
||||
table_name_to_ids,
|
||||
name_to_info,
|
||||
instant_table_ids,
|
||||
self.partition_manager.as_ref(),
|
||||
)
|
||||
.convert(requests)
|
||||
.await?;
|
||||
|
||||
self.do_request(inserts, &ctx).await
|
||||
self.do_request(inserts, &table_infos, &ctx).await
|
||||
}
|
||||
|
||||
/// Handles row inserts request with metric engine.
|
||||
@@ -236,7 +247,10 @@ impl Inserter {
|
||||
.await?;
|
||||
|
||||
// check and create logical tables
|
||||
let (table_name_to_ids, instant_table_ids) = self
|
||||
let CreateAlterTableResult {
|
||||
instant_table_ids,
|
||||
table_infos,
|
||||
} = self
|
||||
.create_or_alter_tables_on_demand(
|
||||
&requests,
|
||||
&ctx,
|
||||
@@ -244,15 +258,15 @@ impl Inserter {
|
||||
statement_executor,
|
||||
)
|
||||
.await?;
|
||||
let inserts = RowToRegion::new(
|
||||
table_name_to_ids,
|
||||
instant_table_ids,
|
||||
&self.partition_manager,
|
||||
)
|
||||
.convert(requests)
|
||||
.await?;
|
||||
let name_to_info = table_infos
|
||||
.values()
|
||||
.map(|info| (info.name.clone(), info.clone()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let inserts = RowToRegion::new(name_to_info, instant_table_ids, &self.partition_manager)
|
||||
.convert(requests)
|
||||
.await?;
|
||||
|
||||
self.do_request(inserts, &ctx).await
|
||||
self.do_request(inserts, &table_infos, &ctx).await
|
||||
}
|
||||
|
||||
pub async fn handle_table_insert(
|
||||
@@ -273,7 +287,10 @@ impl Inserter {
|
||||
.convert(request)
|
||||
.await?;
|
||||
|
||||
self.do_request(inserts, &ctx).await
|
||||
let table_infos =
|
||||
HashMap::from_iter([(table_info.table_id(), table_info.clone())].into_iter());
|
||||
|
||||
self.do_request(inserts, &table_infos, &ctx).await
|
||||
}
|
||||
|
||||
pub async fn handle_statement_insert(
|
||||
@@ -281,12 +298,15 @@ impl Inserter {
|
||||
insert: &Insert,
|
||||
ctx: &QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let inserts =
|
||||
let (inserts, table_info) =
|
||||
StatementToRegion::new(self.catalog_manager.as_ref(), &self.partition_manager, ctx)
|
||||
.convert(insert, ctx)
|
||||
.await?;
|
||||
|
||||
self.do_request(inserts, ctx).await
|
||||
let table_infos =
|
||||
HashMap::from_iter([(table_info.table_id(), table_info.clone())].into_iter());
|
||||
|
||||
self.do_request(inserts, &table_infos, ctx).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,8 +314,12 @@ impl Inserter {
|
||||
async fn do_request(
|
||||
&self,
|
||||
requests: InstantAndNormalInsertRequests,
|
||||
table_infos: &HashMap<TableId, Arc<TableInfo>>,
|
||||
ctx: &QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
// Fill impure default values in the request
|
||||
let requests = fill_reqs_with_impure_default(table_infos, requests)?;
|
||||
|
||||
let write_cost = write_meter!(
|
||||
ctx.current_catalog(),
|
||||
ctx.current_schema(),
|
||||
@@ -499,14 +523,15 @@ impl Inserter {
|
||||
ctx: &QueryContextRef,
|
||||
auto_create_table_type: AutoCreateTableType,
|
||||
statement_executor: &StatementExecutor,
|
||||
) -> Result<(HashMap<String, TableId>, HashSet<TableId>)> {
|
||||
) -> Result<CreateAlterTableResult> {
|
||||
let _timer = crate::metrics::CREATE_ALTER_ON_DEMAND
|
||||
.with_label_values(&[auto_create_table_type.as_str()])
|
||||
.start_timer();
|
||||
|
||||
let catalog = ctx.current_catalog();
|
||||
let schema = ctx.current_schema();
|
||||
let mut table_name_to_ids = HashMap::with_capacity(requests.inserts.len());
|
||||
|
||||
let mut table_infos = HashMap::new();
|
||||
// If `auto_create_table` hint is disabled, skip creating/altering tables.
|
||||
let auto_create_table_hint = ctx
|
||||
.extension(AUTO_CREATE_TABLE_KEY)
|
||||
@@ -535,9 +560,13 @@ impl Inserter {
|
||||
if table_info.is_ttl_instant_table() {
|
||||
instant_table_ids.insert(table_info.table_id());
|
||||
}
|
||||
table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
|
||||
table_infos.insert(table_info.table_id(), table.table_info());
|
||||
}
|
||||
return Ok((table_name_to_ids, instant_table_ids));
|
||||
let ret = CreateAlterTableResult {
|
||||
instant_table_ids,
|
||||
table_infos,
|
||||
};
|
||||
return Ok(ret);
|
||||
}
|
||||
|
||||
let mut create_tables = vec![];
|
||||
@@ -551,7 +580,7 @@ impl Inserter {
|
||||
if table_info.is_ttl_instant_table() {
|
||||
instant_table_ids.insert(table_info.table_id());
|
||||
}
|
||||
table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
|
||||
table_infos.insert(table_info.table_id(), table.table_info());
|
||||
if let Some(alter_expr) =
|
||||
self.get_alter_table_expr_on_demand(req, &table, ctx)?
|
||||
{
|
||||
@@ -579,7 +608,7 @@ impl Inserter {
|
||||
if table_info.is_ttl_instant_table() {
|
||||
instant_table_ids.insert(table_info.table_id());
|
||||
}
|
||||
table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
|
||||
table_infos.insert(table_info.table_id(), table.table_info());
|
||||
}
|
||||
}
|
||||
if !alter_tables.is_empty() {
|
||||
@@ -602,7 +631,7 @@ impl Inserter {
|
||||
if table_info.is_ttl_instant_table() {
|
||||
instant_table_ids.insert(table_info.table_id());
|
||||
}
|
||||
table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
|
||||
table_infos.insert(table_info.table_id(), table.table_info());
|
||||
}
|
||||
for alter_expr in alter_tables.into_iter() {
|
||||
statement_executor
|
||||
@@ -612,7 +641,10 @@ impl Inserter {
|
||||
}
|
||||
}
|
||||
|
||||
Ok((table_name_to_ids, instant_table_ids))
|
||||
Ok(CreateAlterTableResult {
|
||||
instant_table_ids,
|
||||
table_infos,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_physical_table_on_demand(
|
||||
@@ -874,3 +906,11 @@ fn build_create_table_expr(
|
||||
) -> Result<CreateTableExpr> {
|
||||
CreateExprFactory.create_table_expr_by_column_schemas(table, request_schema, engine, None)
|
||||
}
|
||||
|
||||
/// Result of `create_or_alter_tables_on_demand`.
|
||||
struct CreateAlterTableResult {
|
||||
/// table ids of ttl=instant tables.
|
||||
instant_table_ids: HashSet<TableId>,
|
||||
/// Table Info of the created tables.
|
||||
table_infos: HashMap<TableId, Arc<TableInfo>>,
|
||||
}
|
||||
|
||||
@@ -13,12 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod column_to_row;
|
||||
mod fill_impure_default;
|
||||
mod row_to_region;
|
||||
mod stmt_to_region;
|
||||
mod table_to_region;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
pub use column_to_row::ColumnToRow;
|
||||
pub use fill_impure_default::fill_reqs_with_impure_default;
|
||||
pub use row_to_region::RowToRegion;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
pub use stmt_to_region::StatementToRegion;
|
||||
|
||||
242
src/operator/src/req_convert/insert/fill_impure_default.rs
Normal file
242
src/operator/src/req_convert/insert/fill_impure_default.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Util functions to help with fill impure default values columns in request
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use ahash::{HashMap, HashMapExt, HashSet};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::{TableInfo, TableInfoRef};
|
||||
|
||||
use crate::error::{ConvertColumnDefaultConstraintSnafu, Result, UnexpectedSnafu};
|
||||
use crate::expr_factory::column_schemas_to_defs;
|
||||
use crate::insert::InstantAndNormalInsertRequests;
|
||||
|
||||
/// Find all columns that have impure default values
|
||||
pub fn find_all_impure_columns(table_info: &TableInfo) -> Vec<ColumnSchema> {
|
||||
let columns = table_info.meta.schema.column_schemas();
|
||||
columns
|
||||
.iter()
|
||||
.filter(|column| column.is_default_impure())
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Fill impure default values in the request
|
||||
pub struct ImpureDefaultFiller {
|
||||
impure_columns: HashMap<String, (api::v1::ColumnSchema, Option<api::v1::Value>)>,
|
||||
}
|
||||
|
||||
impl ImpureDefaultFiller {
|
||||
pub fn new(table_info: TableInfoRef) -> Result<Self> {
|
||||
let impure_column_list = find_all_impure_columns(&table_info);
|
||||
let pks = &table_info.meta.primary_key_indices;
|
||||
let pk_names = pks
|
||||
.iter()
|
||||
.map(|&i| table_info.meta.schema.column_name_by_index(i).to_string())
|
||||
.collect::<Vec<_>>();
|
||||
let mut impure_columns = HashMap::new();
|
||||
for column in impure_column_list {
|
||||
let default_value = column
|
||||
.create_impure_default()
|
||||
.with_context(|_| ConvertColumnDefaultConstraintSnafu {
|
||||
column_name: column.name.clone(),
|
||||
})?
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Expect default value to be impure, found {:?}",
|
||||
column.default_constraint()
|
||||
),
|
||||
})?;
|
||||
let grpc_default_value = api::helper::to_proto_value(default_value);
|
||||
let def = column_schemas_to_defs(vec![column], &pk_names)?.swap_remove(0);
|
||||
let grpc_column_schema = api::v1::ColumnSchema {
|
||||
column_name: def.name,
|
||||
datatype: def.data_type,
|
||||
semantic_type: def.semantic_type,
|
||||
datatype_extension: def.datatype_extension,
|
||||
options: def.options,
|
||||
};
|
||||
impure_columns.insert(
|
||||
grpc_column_schema.column_name.clone(),
|
||||
(grpc_column_schema, grpc_default_value),
|
||||
);
|
||||
}
|
||||
Ok(Self { impure_columns })
|
||||
}
|
||||
|
||||
/// Fill impure default values in the request
|
||||
pub fn fill_rows(&self, rows: &mut api::v1::Rows) {
|
||||
let impure_columns_in_reqs: HashSet<_> = rows
|
||||
.schema
|
||||
.iter()
|
||||
.filter_map(|schema| {
|
||||
if self.impure_columns.contains_key(&schema.column_name) {
|
||||
Some(&schema.column_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
if self.impure_columns.len() == impure_columns_in_reqs.len() {
|
||||
return;
|
||||
}
|
||||
|
||||
let (schema_append, row_append): (Vec<_>, Vec<_>) = self
|
||||
.impure_columns
|
||||
.iter()
|
||||
.filter_map(|(name, (schema, val))| {
|
||||
if !impure_columns_in_reqs.contains(name) {
|
||||
Some((schema.clone(), val.clone().unwrap_or_default()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unzip();
|
||||
|
||||
rows.schema.extend(schema_append);
|
||||
for row in rows.rows.iter_mut() {
|
||||
row.values.extend_from_slice(row_append.as_slice());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Fill impure default values in the request(only for normal insert requests, since instant insert can be filled in flownode directly as a single source of truth)
|
||||
pub fn fill_reqs_with_impure_default(
|
||||
table_infos: &HashMap<TableId, Arc<TableInfo>>,
|
||||
mut inserts: InstantAndNormalInsertRequests,
|
||||
) -> Result<InstantAndNormalInsertRequests> {
|
||||
let fillers = table_infos
|
||||
.iter()
|
||||
.map(|(table_id, table_info)| {
|
||||
let table_id = *table_id;
|
||||
ImpureDefaultFiller::new(table_info.clone()).map(|filler| (table_id, filler))
|
||||
})
|
||||
.collect::<Result<HashMap<TableId, ImpureDefaultFiller>>>()?;
|
||||
|
||||
let normal_inserts = &mut inserts.normal_requests;
|
||||
for request in normal_inserts.requests.iter_mut() {
|
||||
let region_id = RegionId::from(request.region_id);
|
||||
let table_id = region_id.table_id();
|
||||
let filler = fillers.get(&table_id).with_context(|| UnexpectedSnafu {
|
||||
violated: format!("impure default filler for table_id: {} not found", table_id),
|
||||
})?;
|
||||
|
||||
if let Some(rows) = &mut request.rows {
|
||||
filler.fill_rows(rows);
|
||||
}
|
||||
}
|
||||
Ok(inserts)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::value::ValueData;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder};
|
||||
use datatypes::value::Value;
|
||||
use table::metadata::{TableInfoBuilder, TableMetaBuilder};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Create a test schema with 3 columns: `[col1 int32, ts timestampmills DEFAULT now(), col2 int32]`.
|
||||
fn new_test_schema() -> Schema {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_time_index(true)
|
||||
.with_default_constraint(Some(datatypes::schema::ColumnDefaultConstraint::Function(
|
||||
"now()".to_string(),
|
||||
)))
|
||||
.unwrap(),
|
||||
ColumnSchema::new("col2", ConcreteDataType::int32_datatype(), true)
|
||||
.with_default_constraint(Some(datatypes::schema::ColumnDefaultConstraint::Value(
|
||||
Value::from(1i32),
|
||||
)))
|
||||
.unwrap(),
|
||||
];
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.version(123)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn new_table_info() -> TableInfo {
|
||||
let schema = Arc::new(new_test_schema());
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(schema)
|
||||
.primary_key_indices(vec![0])
|
||||
.engine("engine")
|
||||
.next_column_id(3)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
TableInfoBuilder::default()
|
||||
.table_id(10)
|
||||
.table_version(5)
|
||||
.name("mytable")
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn column_schema_to_proto(
|
||||
column_schema: &[ColumnSchema],
|
||||
pk_names: &[String],
|
||||
) -> Vec<api::v1::ColumnSchema> {
|
||||
column_schemas_to_defs(column_schema.to_vec(), pk_names)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|def| api::v1::ColumnSchema {
|
||||
column_name: def.name,
|
||||
datatype: def.data_type,
|
||||
semantic_type: def.semantic_type,
|
||||
datatype_extension: def.datatype_extension,
|
||||
options: def.options,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_impure_append() {
|
||||
let row = api::v1::Row {
|
||||
values: vec![api::v1::Value {
|
||||
value_data: Some(ValueData::I32Value(42)),
|
||||
}],
|
||||
};
|
||||
let schema = new_test_schema().column_schemas()[0].clone();
|
||||
let col_schemas = column_schema_to_proto(&[schema], &["col1".to_string()]);
|
||||
|
||||
let mut rows = api::v1::Rows {
|
||||
schema: col_schemas,
|
||||
rows: vec![row],
|
||||
};
|
||||
|
||||
let info = new_table_info();
|
||||
let filler = ImpureDefaultFiller::new(Arc::new(info)).unwrap();
|
||||
filler.fill_rows(&mut rows);
|
||||
|
||||
assert_eq!(rows.schema[1].column_name, "ts");
|
||||
assert!(rows.schema.len() == 2 && rows.rows[0].values.len() == 2);
|
||||
}
|
||||
}
|
||||
@@ -13,30 +13,31 @@
|
||||
// limitations under the License.
|
||||
|
||||
use ahash::{HashMap, HashSet};
|
||||
use api::v1::region::InsertRequests as RegionInsertRequests;
|
||||
use api::v1::region::{InsertRequest, InsertRequests as RegionInsertRequests};
|
||||
use api::v1::RowInsertRequests;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::insert::InstantAndNormalInsertRequests;
|
||||
use crate::req_convert::common::partitioner::Partitioner;
|
||||
|
||||
pub struct RowToRegion<'a> {
|
||||
table_name_to_ids: HashMap<String, TableId>,
|
||||
tables_info: HashMap<String, TableInfoRef>,
|
||||
instant_table_ids: HashSet<TableId>,
|
||||
partition_manager: &'a PartitionRuleManager,
|
||||
}
|
||||
|
||||
impl<'a> RowToRegion<'a> {
|
||||
pub fn new(
|
||||
table_name_to_ids: HashMap<String, TableId>,
|
||||
tables_info: HashMap<String, TableInfoRef>,
|
||||
instant_table_ids: HashSet<TableId>,
|
||||
partition_manager: &'a PartitionRuleManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_name_to_ids,
|
||||
tables_info,
|
||||
instant_table_ids,
|
||||
partition_manager,
|
||||
}
|
||||
@@ -49,10 +50,24 @@ impl<'a> RowToRegion<'a> {
|
||||
let mut region_request = Vec::with_capacity(requests.inserts.len());
|
||||
let mut instant_request = Vec::with_capacity(requests.inserts.len());
|
||||
for request in requests.inserts {
|
||||
let Some(rows) = request.rows else { continue };
|
||||
|
||||
let table_id = self.get_table_id(&request.table_name)?;
|
||||
let requests = Partitioner::new(self.partition_manager)
|
||||
.partition_insert_requests(table_id, request.rows.unwrap_or_default())
|
||||
.await?;
|
||||
let region_numbers = self.region_numbers(&request.table_name)?;
|
||||
let requests = if let Some(region_id) = match region_numbers[..] {
|
||||
[singular] => Some(RegionId::new(table_id, singular)),
|
||||
_ => None,
|
||||
} {
|
||||
vec![InsertRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
rows: Some(rows),
|
||||
}]
|
||||
} else {
|
||||
Partitioner::new(self.partition_manager)
|
||||
.partition_insert_requests(table_id, rows)
|
||||
.await?
|
||||
};
|
||||
|
||||
if self.instant_table_ids.contains(&table_id) {
|
||||
instant_request.extend(requests);
|
||||
} else {
|
||||
@@ -71,9 +86,16 @@ impl<'a> RowToRegion<'a> {
|
||||
}
|
||||
|
||||
fn get_table_id(&self, table_name: &str) -> Result<TableId> {
|
||||
self.table_name_to_ids
|
||||
self.tables_info
|
||||
.get(table_name)
|
||||
.cloned()
|
||||
.map(|x| x.table_id())
|
||||
.context(TableNotFoundSnafu { table_name })
|
||||
}
|
||||
|
||||
fn region_numbers(&self, table_name: &str) -> Result<&Vec<RegionNumber>> {
|
||||
self.tables_info
|
||||
.get(table_name)
|
||||
.map(|x| &x.meta.region_numbers)
|
||||
.context(TableNotFoundSnafu { table_name })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements;
|
||||
use sql::statements::insert::Insert;
|
||||
use sqlparser::ast::{ObjectName, Value as SqlValue};
|
||||
use table::metadata::TableInfoRef;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
@@ -61,7 +62,7 @@ impl<'a> StatementToRegion<'a> {
|
||||
&self,
|
||||
stmt: &Insert,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<InstantAndNormalInsertRequests> {
|
||||
) -> Result<(InstantAndNormalInsertRequests, TableInfoRef)> {
|
||||
let (catalog, schema, table_name) = self.get_full_name(stmt.table_name())?;
|
||||
let table = self.get_table(&catalog, &schema, &table_name).await?;
|
||||
let table_schema = table.schema();
|
||||
@@ -137,15 +138,21 @@ impl<'a> StatementToRegion<'a> {
|
||||
.await?;
|
||||
let requests = RegionInsertRequests { requests };
|
||||
if table_info.is_ttl_instant_table() {
|
||||
Ok(InstantAndNormalInsertRequests {
|
||||
normal_requests: Default::default(),
|
||||
instant_requests: requests,
|
||||
})
|
||||
Ok((
|
||||
InstantAndNormalInsertRequests {
|
||||
normal_requests: Default::default(),
|
||||
instant_requests: requests,
|
||||
},
|
||||
table_info,
|
||||
))
|
||||
} else {
|
||||
Ok(InstantAndNormalInsertRequests {
|
||||
normal_requests: requests,
|
||||
instant_requests: Default::default(),
|
||||
})
|
||||
Ok((
|
||||
InstantAndNormalInsertRequests {
|
||||
normal_requests: requests,
|
||||
instant_requests: Default::default(),
|
||||
},
|
||||
table_info,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -80,35 +80,20 @@ impl<'a> SplitReadRowHelper<'a> {
|
||||
|
||||
fn split_rows(mut self) -> Result<HashMap<RegionNumber, Rows>> {
|
||||
let regions = self.split_to_regions()?;
|
||||
let request_splits = if regions.len() == 1 {
|
||||
// fast path, zero copy
|
||||
regions
|
||||
.into_keys()
|
||||
.map(|region_number| {
|
||||
let rows = std::mem::take(&mut self.rows);
|
||||
let rows = Rows {
|
||||
schema: self.schema.clone(),
|
||||
rows,
|
||||
};
|
||||
(region_number, rows)
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
} else {
|
||||
regions
|
||||
.into_iter()
|
||||
.map(|(region_number, row_indexes)| {
|
||||
let rows = row_indexes
|
||||
.into_iter()
|
||||
.map(|row_idx| std::mem::take(&mut self.rows[row_idx]))
|
||||
.collect();
|
||||
let rows = Rows {
|
||||
schema: self.schema.clone(),
|
||||
rows,
|
||||
};
|
||||
(region_number, rows)
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
};
|
||||
let request_splits = regions
|
||||
.into_iter()
|
||||
.map(|(region_number, row_indexes)| {
|
||||
let rows = row_indexes
|
||||
.into_iter()
|
||||
.map(|row_idx| std::mem::take(&mut self.rows[row_idx]))
|
||||
.collect();
|
||||
let rows = Rows {
|
||||
schema: self.schema.clone(),
|
||||
rows,
|
||||
};
|
||||
(region_number, rows)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
Ok(request_splits)
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ pub async fn logs(
|
||||
query_ctx.set_channel(Channel::Http);
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
|
||||
let _timer = crate::metrics::METRIC_HTTP_LOGS_INGESTION_ELAPSED
|
||||
let _timer = crate::metrics::METRIC_HTTP_LOGS_ELAPSED
|
||||
.with_label_values(&[db.as_str()])
|
||||
.start_timer();
|
||||
|
||||
|
||||
@@ -273,8 +273,11 @@ pub(crate) fn check(
|
||||
) -> Option<Output> {
|
||||
// INSERT don't need MySQL federated check. We assume the query doesn't contain
|
||||
// federated or driver setup command if it starts with a 'INSERT' statement.
|
||||
if query.len() > 6 && query[..6].eq_ignore_ascii_case("INSERT") {
|
||||
return None;
|
||||
let the_6th_index = query.char_indices().nth(6).map(|(i, _)| i);
|
||||
if let Some(index) = the_6th_index {
|
||||
if query[..index].eq_ignore_ascii_case("INSERT") {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// First to check the query is like "select @@variables".
|
||||
@@ -295,6 +298,15 @@ mod test {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_check_abnormal() {
|
||||
let session = Arc::new(Session::new(None, Channel::Mysql, Default::default()));
|
||||
let query = "🫣一点不正常的东西🫣";
|
||||
let output = check(query, QueryContext::arc(), session.clone());
|
||||
|
||||
assert!(output.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check() {
|
||||
let session = Arc::new(Session::new(None, Channel::Mysql, Default::default()));
|
||||
|
||||
@@ -45,6 +45,7 @@ flow.workspace = true
|
||||
frontend = { workspace = true, features = ["testing"] }
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
log-query = { workspace = true }
|
||||
loki-api = "0.1"
|
||||
meta-client.workspace = true
|
||||
meta-srv = { workspace = true, features = ["mock"] }
|
||||
|
||||
@@ -47,13 +47,10 @@ To run the integration test, please copy `.env.example` to `.env` in the project
|
||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
```
|
||||
|
||||
### Setup kafka standalone
|
||||
### Setup kafka standalone
|
||||
|
||||
```
|
||||
cd tests-integration/fixtures/kafka
|
||||
cd tests-integration/fixtures
|
||||
|
||||
docker compose -f docker-compose-standalone.yml up
|
||||
docker compose -f docker-compose-standalone.yml up kafka
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
72
tests-integration/fixtures/docker-compose.yml
Normal file
72
tests-integration/fixtures/docker-compose.yml
Normal file
@@ -0,0 +1,72 @@
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: docker.io/bitnami/zookeeper:3.7
|
||||
ports:
|
||||
- '2181:2181'
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
kafka:
|
||||
image: docker.io/bitnami/kafka:3.6.0
|
||||
container_name: kafka
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9093:9093
|
||||
environment:
|
||||
# KRaft settings
|
||||
KAFKA_CFG_NODE_ID: "1"
|
||||
KAFKA_CFG_PROCESS_ROLES: broker,controller
|
||||
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:2181
|
||||
# Listeners
|
||||
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092,SECURE://localhost:9093
|
||||
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
|
||||
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SECURE:SASL_PLAINTEXT
|
||||
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:2181,SECURE://:9093
|
||||
ALLOW_PLAINTEXT_LISTENER: "yes"
|
||||
KAFKA_BROKER_ID: "1"
|
||||
KAFKA_CLIENT_USERS: "user_kafka"
|
||||
KAFKA_CLIENT_PASSWORDS: "secret"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_started
|
||||
|
||||
etcd:
|
||||
image: docker.io/bitnami/etcd:3.5
|
||||
ports:
|
||||
- "2379:2379"
|
||||
- "2380:2380"
|
||||
environment:
|
||||
ALLOW_NONE_AUTHENTICATION: "yes"
|
||||
ETCD_NAME: etcd
|
||||
ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379
|
||||
ETCD_ADVERTISE_CLIENT_URLS: http://etcd:2379
|
||||
ETCD_MAX_REQUEST_BYTES: 10485760
|
||||
|
||||
minio:
|
||||
image: docker.io/bitnami/minio:2024
|
||||
ports:
|
||||
- '9000:9000'
|
||||
- '9001:9001'
|
||||
environment:
|
||||
- MINIO_ROOT_USER=superpower_ci_user
|
||||
- MINIO_ROOT_PASSWORD=superpower_password
|
||||
- MINIO_DEFAULT_BUCKETS=greptime
|
||||
- BITNAMI_DEBUG=true
|
||||
volumes:
|
||||
- 'minio_data:/bitnami/minio/data'
|
||||
|
||||
postgres:
|
||||
image: docker.io/postgres:14-alpine
|
||||
ports:
|
||||
- 5432:5432
|
||||
volumes:
|
||||
- ~/apps/postgres:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_USER=greptimedb
|
||||
- POSTGRES_DB=postgres
|
||||
- POSTGRES_PASSWORD=admin
|
||||
|
||||
volumes:
|
||||
minio_data:
|
||||
driver: local
|
||||
@@ -1,13 +0,0 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
etcd:
|
||||
image: ghcr.io/zcube/bitnami-compat/etcd:3.5
|
||||
ports:
|
||||
- "2379:2379"
|
||||
- "2380:2380"
|
||||
environment:
|
||||
ALLOW_NONE_AUTHENTICATION: "yes"
|
||||
ETCD_NAME: etcd
|
||||
ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379
|
||||
ETCD_ADVERTISE_CLIENT_URLS: http://etcd:2379
|
||||
ETCD_MAX_REQUEST_BYTES: 10485760
|
||||
@@ -1,19 +0,0 @@
|
||||
## Starts a standalone kafka
|
||||
```bash
|
||||
docker compose -f docker-compose-standalone.yml up kafka -d
|
||||
```
|
||||
|
||||
## Lists running services
|
||||
```bash
|
||||
docker compose -f docker-compose-standalone.yml ps
|
||||
```
|
||||
|
||||
## Stops the standalone kafka
|
||||
```bash
|
||||
docker compose -f docker-compose-standalone.yml stop kafka
|
||||
```
|
||||
|
||||
## Stops and removes the standalone kafka
|
||||
```bash
|
||||
docker compose -f docker-compose-standalone.yml down kafka
|
||||
```
|
||||
@@ -1,28 +0,0 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.7
|
||||
ports:
|
||||
- '2181:2181'
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
kafka:
|
||||
image: bitnami/kafka:3.6.0
|
||||
container_name: kafka
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9093:9093
|
||||
environment:
|
||||
# KRaft settings
|
||||
KAFKA_CFG_NODE_ID: "1"
|
||||
KAFKA_CFG_PROCESS_ROLES: broker,controller
|
||||
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:2181
|
||||
# Listeners
|
||||
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092,SECURE://localhost:9093
|
||||
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
|
||||
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SECURE:SASL_PLAINTEXT
|
||||
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:2181,SECURE://:9093
|
||||
ALLOW_PLAINTEXT_LISTENER: "yes"
|
||||
KAFKA_BROKER_ID: "1"
|
||||
KAFKA_CLIENT_USERS: "user_kafka"
|
||||
KAFKA_CLIENT_PASSWORDS: "secret"
|
||||
@@ -1,18 +0,0 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
minio:
|
||||
image: bitnami/minio:2024
|
||||
ports:
|
||||
- '9000:9000'
|
||||
- '9001:9001'
|
||||
environment:
|
||||
- MINIO_ROOT_USER=superpower_ci_user
|
||||
- MINIO_ROOT_PASSWORD=superpower_password
|
||||
- MINIO_DEFAULT_BUCKETS=greptime
|
||||
- BITNAMI_DEBUG=true
|
||||
volumes:
|
||||
- 'minio_data:/bitnami/minio/data'
|
||||
|
||||
volumes:
|
||||
minio_data:
|
||||
driver: local
|
||||
@@ -1,12 +0,0 @@
|
||||
version: '3.9'
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:14-alpine
|
||||
ports:
|
||||
- 5432:5432
|
||||
volumes:
|
||||
- ~/apps/postgres:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_USER=greptimedb
|
||||
- POSTGRES_DB=postgres
|
||||
- POSTGRES_PASSWORD=admin
|
||||
@@ -394,6 +394,7 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router
|
||||
ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()),
|
||||
None,
|
||||
)
|
||||
.with_logs_handler(instance.instance.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.with_greptime_config_options(instance.opts.datanode_options().to_toml().unwrap())
|
||||
.build();
|
||||
@@ -429,6 +430,7 @@ pub async fn setup_test_http_app_with_frontend_and_user_provider(
|
||||
Some(instance.instance.clone()),
|
||||
)
|
||||
.with_log_ingest_handler(instance.instance.clone(), None, None)
|
||||
.with_logs_handler(instance.instance.clone())
|
||||
.with_otlp_handler(instance.instance.clone())
|
||||
.with_greptime_config_options(instance.opts.to_toml().unwrap());
|
||||
|
||||
@@ -467,6 +469,7 @@ pub async fn setup_test_prom_app_with_frontend(
|
||||
ServerSqlQueryHandlerAdapter::arc(frontend_ref.clone()),
|
||||
Some(frontend_ref.clone()),
|
||||
)
|
||||
.with_logs_handler(instance.instance.clone())
|
||||
.with_prom_handler(frontend_ref.clone(), true, is_strict_mode)
|
||||
.with_prometheus_handler(frontend_ref)
|
||||
.with_greptime_config_options(instance.opts.datanode_options().to_toml().unwrap())
|
||||
|
||||
@@ -22,6 +22,7 @@ use axum::http::{HeaderName, HeaderValue, StatusCode};
|
||||
use common_error::status_code::StatusCode as ErrorCode;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use log_query::{ColumnFilters, Context, Limit, LogQuery, TimeFilter};
|
||||
use loki_api::logproto::{EntryAdapter, PushRequest, StreamAdapter};
|
||||
use loki_api::prost_types::Timestamp;
|
||||
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
|
||||
@@ -40,6 +41,7 @@ use servers::http::result::influxdb_result_v1::{InfluxdbOutput, InfluxdbV1Respon
|
||||
use servers::http::test_helpers::{TestClient, TestResponse};
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use servers::prom_store;
|
||||
use table::table_name::TableName;
|
||||
use tests_integration::test_util::{
|
||||
setup_test_http_app, setup_test_http_app_with_frontend,
|
||||
setup_test_http_app_with_frontend_and_user_provider, setup_test_prom_app_with_frontend,
|
||||
@@ -97,6 +99,7 @@ macro_rules! http_tests {
|
||||
test_otlp_traces,
|
||||
test_otlp_logs,
|
||||
test_loki_logs,
|
||||
test_log_query,
|
||||
);
|
||||
)*
|
||||
};
|
||||
@@ -928,9 +931,9 @@ worker_request_batch_size = 64
|
||||
manifest_checkpoint_distance = 10
|
||||
compress_manifest = false
|
||||
auto_flush_interval = "30m"
|
||||
enable_write_cache = false
|
||||
write_cache_path = ""
|
||||
write_cache_size = "5GiB"
|
||||
enable_experimental_write_cache = false
|
||||
experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "5GiB"
|
||||
sst_write_buffer_size = "8MiB"
|
||||
parallel_scan_channel_size = 32
|
||||
allow_stale_entries = false
|
||||
@@ -1882,6 +1885,68 @@ pub async fn test_loki_logs(store_type: StorageType) {
|
||||
guard.remove_all().await;
|
||||
}
|
||||
|
||||
pub async fn test_log_query(store_type: StorageType) {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "test_log_query").await;
|
||||
|
||||
let client = TestClient::new(app);
|
||||
|
||||
// prepare data with SQL API
|
||||
let res = client
|
||||
.get("/v1/sql?sql=create table logs (`ts` timestamp time index, message string);")
|
||||
.send()
|
||||
.await;
|
||||
assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
|
||||
let res = client
|
||||
.post("/v1/sql?sql=insert into logs values ('2024-11-07 10:53:50', 'hello');")
|
||||
.header("Content-Type", "application/x-www-form-urlencoded")
|
||||
.send()
|
||||
.await;
|
||||
assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
|
||||
|
||||
// test log query
|
||||
let log_query = LogQuery {
|
||||
table: TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "logs".to_string(),
|
||||
},
|
||||
time_filter: TimeFilter {
|
||||
start: Some("2024-11-07".to_string()),
|
||||
end: None,
|
||||
span: None,
|
||||
},
|
||||
limit: Limit {
|
||||
skip: None,
|
||||
fetch: Some(1),
|
||||
},
|
||||
columns: vec![
|
||||
ColumnFilters {
|
||||
column_name: "ts".to_string(),
|
||||
filters: vec![],
|
||||
},
|
||||
ColumnFilters {
|
||||
column_name: "message".to_string(),
|
||||
filters: vec![],
|
||||
},
|
||||
],
|
||||
context: Context::None,
|
||||
};
|
||||
let res = client
|
||||
.post("/v1/logs")
|
||||
.header("Content-Type", "application/json")
|
||||
.body(serde_json::to_string(&log_query).unwrap())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
|
||||
let resp = res.text().await;
|
||||
let v = get_rows_from_output(&resp);
|
||||
assert_eq!(v, "[[1730976830000,\"hello\"]]");
|
||||
|
||||
guard.remove_all().await;
|
||||
}
|
||||
|
||||
async fn validate_data(test_name: &str, client: &TestClient, sql: &str, expected: &str) {
|
||||
let res = client
|
||||
.get(format!("/v1/sql?sql={sql}").as_str())
|
||||
|
||||
70
tests/cases/standalone/common/flow/flow_ins_default.result
Normal file
70
tests/cases/standalone/common/flow/flow_ins_default.result
Normal file
@@ -0,0 +1,70 @@
|
||||
CREATE TABLE bytes_log (
|
||||
byte INT,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
-- event time
|
||||
TIME INDEX(ts)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE approx_rate (
|
||||
rate DOUBLE,
|
||||
time_window TIMESTAMP,
|
||||
update_at TIMESTAMP,
|
||||
TIME INDEX(time_window)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW find_approx_rate SINK TO approx_rate AS
|
||||
SELECT
|
||||
(max(byte) - min(byte)) / 30.0 as rate,
|
||||
date_bin(INTERVAL '30 second', ts) as time_window
|
||||
from
|
||||
bytes_log
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO
|
||||
bytes_log (byte)
|
||||
VALUES
|
||||
(NULL),
|
||||
(300);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('find_approx_rate');
|
||||
|
||||
+--------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('find_approx_rate') |
|
||||
+--------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+--------------------------------------+
|
||||
|
||||
-- since ts is default to now(), omit it when querying
|
||||
SELECT
|
||||
rate
|
||||
FROM
|
||||
approx_rate;
|
||||
|
||||
+------+
|
||||
| rate |
|
||||
+------+
|
||||
| 0.0 |
|
||||
+------+
|
||||
|
||||
DROP FLOW find_approx_rate;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE bytes_log;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE approx_rate;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
41
tests/cases/standalone/common/flow/flow_ins_default.sql
Normal file
41
tests/cases/standalone/common/flow/flow_ins_default.sql
Normal file
@@ -0,0 +1,41 @@
|
||||
CREATE TABLE bytes_log (
|
||||
byte INT,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
-- event time
|
||||
TIME INDEX(ts)
|
||||
);
|
||||
|
||||
CREATE TABLE approx_rate (
|
||||
rate DOUBLE,
|
||||
time_window TIMESTAMP,
|
||||
update_at TIMESTAMP,
|
||||
TIME INDEX(time_window)
|
||||
);
|
||||
|
||||
CREATE FLOW find_approx_rate SINK TO approx_rate AS
|
||||
SELECT
|
||||
(max(byte) - min(byte)) / 30.0 as rate,
|
||||
date_bin(INTERVAL '30 second', ts) as time_window
|
||||
from
|
||||
bytes_log
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
INSERT INTO
|
||||
bytes_log (byte)
|
||||
VALUES
|
||||
(NULL),
|
||||
(300);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('find_approx_rate');
|
||||
|
||||
-- since ts is default to now(), omit it when querying
|
||||
SELECT
|
||||
rate
|
||||
FROM
|
||||
approx_rate;
|
||||
|
||||
DROP FLOW find_approx_rate;
|
||||
DROP TABLE bytes_log;
|
||||
DROP TABLE approx_rate;
|
||||
@@ -257,3 +257,149 @@ drop table fox;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
create table fox_zh (
|
||||
ts timestamp time index,
|
||||
fox string,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into fox_zh values
|
||||
(1, '快速的棕色狐狸跳过了懒狗'),
|
||||
(2, '这只狐狸非常聪明,跳过了高高的栅栏'),
|
||||
(3, '狐狸和狗是好朋友,它们一起玩耍'),
|
||||
(4, '狐狸跳过了一条小溪,狗在后面追赶'),
|
||||
(5, '狐狸和狗都喜欢在森林里探险'),
|
||||
(6, '狐狸跳过了一个大石头,狗却没有跳过去'),
|
||||
(7, '狐狸和狗在阳光下休息,享受着温暖的时光'),
|
||||
(8, '狐狸跳过了一个小山坡,狗在后面慢慢地走'),
|
||||
(9, '狐狸和狗一起找到了一颗闪闪发光的宝石'),
|
||||
(10, '狐狸跳过了一个小水坑,狗在旁边看着');
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 快速的棕色狐狸跳过了懒狗 |
|
||||
| 这只狐狸非常聪明,跳过了高高的栅栏 |
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸跳过了一个大石头,狗却没有跳过去 |
|
||||
| 狐狸跳过了一个小山坡,狗在后面慢慢地走 |
|
||||
| 狐狸跳过了一个小水坑,狗在旁边看着 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 OR 狗') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 快速的棕色狐狸跳过了懒狗 |
|
||||
| 这只狐狸非常聪明,跳过了高高的栅栏 |
|
||||
| 狐狸和狗是好朋友,它们一起玩耍 |
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸和狗都喜欢在森林里探险 |
|
||||
| 狐狸跳过了一个大石头,狗却没有跳过去 |
|
||||
| 狐狸和狗在阳光下休息,享受着温暖的时光 |
|
||||
| 狐狸跳过了一个小山坡,狗在后面慢慢地走 |
|
||||
| 狐狸和狗一起找到了一颗闪闪发光的宝石 |
|
||||
| 狐狸跳过了一个小水坑,狗在旁边看着 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 狗') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 快速的棕色狐狸跳过了懒狗 |
|
||||
| 狐狸和狗是好朋友,它们一起玩耍 |
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸和狗都喜欢在森林里探险 |
|
||||
| 狐狸跳过了一个大石头,狗却没有跳过去 |
|
||||
| 狐狸和狗在阳光下休息,享受着温暖的时光 |
|
||||
| 狐狸跳过了一个小山坡,狗在后面慢慢地走 |
|
||||
| 狐狸和狗一起找到了一颗闪闪发光的宝石 |
|
||||
| 狐狸跳过了一个小水坑,狗在旁边看着 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 -跳过') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 狐狸和狗是好朋友,它们一起玩耍 |
|
||||
| 狐狸和狗都喜欢在森林里探险 |
|
||||
| 狐狸和狗在阳光下休息,享受着温暖的时光 |
|
||||
| 狐狸和狗一起找到了一颗闪闪发光的宝石 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过 -石头') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 快速的棕色狐狸跳过了懒狗 |
|
||||
| 这只狐狸非常聪明,跳过了高高的栅栏 |
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸跳过了一个小山坡,狗在后面慢慢地走 |
|
||||
| 狐狸跳过了一个小水坑,狗在旁边看着 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '(狐狸 OR 狗) AND 森林') order by ts;
|
||||
|
||||
+----------------------------+
|
||||
| fox |
|
||||
+----------------------------+
|
||||
| 狐狸和狗都喜欢在森林里探险 |
|
||||
+----------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND (跳过 OR 追赶)') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 快速的棕色狐狸跳过了懒狗 |
|
||||
| 这只狐狸非常聪明,跳过了高高的栅栏 |
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸跳过了一个大石头,狗却没有跳过去 |
|
||||
| 狐狸跳过了一个小山坡,狗在后面慢慢地走 |
|
||||
| 狐狸跳过了一个小水坑,狗在旁边看着 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND -(跳过 OR 追赶)') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 狐狸和狗是好朋友,它们一起玩耍 |
|
||||
| 狐狸和狗都喜欢在森林里探险 |
|
||||
| 狐狸和狗在阳光下休息,享受着温暖的时光 |
|
||||
| 狐狸和狗一起找到了一颗闪闪发光的宝石 |
|
||||
+----------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过 AND (小溪 OR 石头)') order by ts;
|
||||
|
||||
+--------------------------------------+
|
||||
| fox |
|
||||
+--------------------------------------+
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸跳过了一个大石头,狗却没有跳过去 |
|
||||
+--------------------------------------+
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过 AND -(石头 OR 栅栏)') order by ts;
|
||||
|
||||
+----------------------------------------+
|
||||
| fox |
|
||||
+----------------------------------------+
|
||||
| 快速的棕色狐狸跳过了懒狗 |
|
||||
| 狐狸跳过了一条小溪,狗在后面追赶 |
|
||||
| 狐狸跳过了一个小山坡,狗在后面慢慢地走 |
|
||||
| 狐狸跳过了一个小水坑,狗在旁边看着 |
|
||||
+----------------------------------------+
|
||||
|
||||
drop table fox_zh;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -55,3 +55,42 @@ select fox from fox where matches(fox, 'over -(fox AND jumps)') order by ts;
|
||||
select fox from fox where matches(fox, 'over AND -(-(fox OR jumps))') order by ts;
|
||||
|
||||
drop table fox;
|
||||
|
||||
create table fox_zh (
|
||||
ts timestamp time index,
|
||||
fox string,
|
||||
);
|
||||
|
||||
insert into fox_zh values
|
||||
(1, '快速的棕色狐狸跳过了懒狗'),
|
||||
(2, '这只狐狸非常聪明,跳过了高高的栅栏'),
|
||||
(3, '狐狸和狗是好朋友,它们一起玩耍'),
|
||||
(4, '狐狸跳过了一条小溪,狗在后面追赶'),
|
||||
(5, '狐狸和狗都喜欢在森林里探险'),
|
||||
(6, '狐狸跳过了一个大石头,狗却没有跳过去'),
|
||||
(7, '狐狸和狗在阳光下休息,享受着温暖的时光'),
|
||||
(8, '狐狸跳过了一个小山坡,狗在后面慢慢地走'),
|
||||
(9, '狐狸和狗一起找到了一颗闪闪发光的宝石'),
|
||||
(10, '狐狸跳过了一个小水坑,狗在旁边看着');
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 OR 狗') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 狗') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 -跳过') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过 -石头') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '(狐狸 OR 狗) AND 森林') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND (跳过 OR 追赶)') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND -(跳过 OR 追赶)') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过 AND (小溪 OR 石头)') order by ts;
|
||||
|
||||
select fox from fox_zh where matches(fox, '狐狸 AND 跳过 AND -(石头 OR 栅栏)') order by ts;
|
||||
|
||||
drop table fox_zh;
|
||||
|
||||
Reference in New Issue
Block a user