mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 12:52:57 +00:00
Compare commits
95 Commits
chore/debu
...
v0.12.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87c21e2baa | ||
|
|
d072801ad6 | ||
|
|
0607b38a20 | ||
|
|
e0384a7d46 | ||
|
|
d73815ba84 | ||
|
|
c78a492863 | ||
|
|
859717c309 | ||
|
|
52697a9e66 | ||
|
|
f8d26b433e | ||
|
|
1acfb6ed1c | ||
|
|
7eaabb3ca2 | ||
|
|
3a55f5d17c | ||
|
|
8d5d4000e6 | ||
|
|
a598008ec3 | ||
|
|
86bd54194a | ||
|
|
ccd2b06b7a | ||
|
|
0db10a33d0 | ||
|
|
317fe9eaa5 | ||
|
|
a4761d6245 | ||
|
|
758aef39d8 | ||
|
|
4e3dd04f42 | ||
|
|
18b77408ae | ||
|
|
725d5a9e68 | ||
|
|
4f29e50ef3 | ||
|
|
121ec7936f | ||
|
|
0185a65905 | ||
|
|
f0d30a0f26 | ||
|
|
7e61d1ae27 | ||
|
|
e56dd20426 | ||
|
|
b64c075cdb | ||
|
|
57f8afcb70 | ||
|
|
bd37e086c2 | ||
|
|
66f63ae981 | ||
|
|
95b20592ac | ||
|
|
1855dccdf1 | ||
|
|
5efcb41310 | ||
|
|
f5829364a2 | ||
|
|
87bd12d6df | ||
|
|
c370b4b40d | ||
|
|
3f01f67f94 | ||
|
|
6eb746d994 | ||
|
|
03a144fa56 | ||
|
|
f069ea082f | ||
|
|
9ae48010f0 | ||
|
|
3a996c2f00 | ||
|
|
45d4065fd6 | ||
|
|
9e09be7ba6 | ||
|
|
50583815de | ||
|
|
24ea9cf215 | ||
|
|
78d0fa75c9 | ||
|
|
0685ba265c | ||
|
|
be22da775a | ||
|
|
d33309be2b | ||
|
|
fdbfebf4be | ||
|
|
812a775b3d | ||
|
|
751fa4ede9 | ||
|
|
03a2e6d0c1 | ||
|
|
815ce59a3a | ||
|
|
c19a56c79f | ||
|
|
7f307a4cac | ||
|
|
52eebfce77 | ||
|
|
e18416a726 | ||
|
|
d1f8ea7880 | ||
|
|
2cd1b08ff7 | ||
|
|
0ee41339aa | ||
|
|
369b59c84a | ||
|
|
c305b2b406 | ||
|
|
c89ef85902 | ||
|
|
3d9df822ad | ||
|
|
bc2f05d949 | ||
|
|
05f115e047 | ||
|
|
5cf9d7b6ca | ||
|
|
a1cd194d0c | ||
|
|
a56c430db0 | ||
|
|
6a1ec8db25 | ||
|
|
04708f10aa | ||
|
|
ddf36c8324 | ||
|
|
96b2a5fb28 | ||
|
|
bbbba29afc | ||
|
|
b229c94fba | ||
|
|
2ad50332cb | ||
|
|
513569ed5d | ||
|
|
69d9a2845f | ||
|
|
1067357b72 | ||
|
|
2caf003db0 | ||
|
|
9bf9aa1082 | ||
|
|
353c8230db | ||
|
|
577d81f14c | ||
|
|
856bba5d95 | ||
|
|
89399131dd | ||
|
|
d20b592fe8 | ||
|
|
bcb0f14227 | ||
|
|
3b27adb3fe | ||
|
|
4d6fe31fff | ||
|
|
1b0b9add90 |
@@ -48,12 +48,11 @@ runs:
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
- name: Build greptime # Builds standard greptime binary
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -71,7 +70,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -9,8 +9,8 @@ runs:
|
||||
steps:
|
||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||
# ${WORKING_DIR}
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# ...
|
||||
|
||||
6
.github/actions/upload-artifacts/action.yml
vendored
6
.github/actions/upload-artifacts/action.yml
vendored
@@ -30,9 +30,9 @@ runs:
|
||||
done
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-pyo3-v0.3.0
|
||||
# greptime-linux-amd64-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-v0.3.0
|
||||
# └── greptime
|
||||
- name: Compress artifacts and calculate checksum
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
|
||||
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,11 +27,11 @@ function upload_artifacts() {
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
|
||||
3
.github/workflows/dependency-check.yml
vendored
3
.github/workflows/dependency-check.yml
vendored
@@ -1,9 +1,6 @@
|
||||
name: Check Dependencies
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
141
.github/workflows/develop.yml
vendored
141
.github/workflows/develop.yml
vendored
@@ -1,4 +1,6 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 15 * * 1-5"
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||
@@ -43,7 +45,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-2022, ubuntu-20.04 ]
|
||||
os: [ ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -57,6 +59,8 @@ jobs:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Clippy` job
|
||||
shared-key: "check-lint"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Run cargo check
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
@@ -67,11 +71,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-toml"
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
- name: Run taplo
|
||||
@@ -94,13 +93,15 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-binaries"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -142,11 +143,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -200,11 +196,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -255,13 +246,15 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-greptime-ci"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -317,11 +310,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -466,11 +454,6 @@ jobs:
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -573,13 +556,16 @@ jobs:
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "Pg Kvbackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait kafka
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -609,11 +595,6 @@ jobs:
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
|
||||
@@ -635,55 +616,99 @@ jobs:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Run cargo clippy
|
||||
run: make clippy
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
conflict-check:
|
||||
name: Check for conflict
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Merge Conflict Finder
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
test:
|
||||
if: github.event_name != 'merge_group'
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy, fmt]
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: llvm-tools-preview
|
||||
cache: false
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
- name: Docker Cache
|
||||
uses: ScribeMD/docker-cache@0.3.7
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
coverage:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
key: docker-${{ runner.os }}-coverage
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: llvm-tools
|
||||
cache: false
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup postgres server
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
|
||||
1
.github/workflows/nightly-ci.yml
vendored
1
.github/workflows/nightly-ci.yml
vendored
@@ -109,6 +109,7 @@ jobs:
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
cleanbuild-linux-nix:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
|
||||
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@@ -436,6 +436,22 @@ jobs:
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
bump-doc-version:
|
||||
name: Bump doc version
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners]
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump doc version
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-doc-version.ts
|
||||
env:
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
name: Send notification to Greptime team
|
||||
|
||||
2252
Cargo.lock
generated
2252
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
22
Cargo.toml
22
Cargo.toml
@@ -55,7 +55,6 @@ members = [
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
@@ -79,8 +78,6 @@ clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
@@ -99,6 +96,7 @@ arrow-schema = { version = "51.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
axum = { version = "0.6", features = ["headers"] }
|
||||
backon = "1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
@@ -118,13 +116,15 @@ datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
deadpool = "0.10"
|
||||
deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.13"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ec801a91aa22f9666063d02805f1f60f7c93458a" }
|
||||
hex = "0.4"
|
||||
http = "0.2"
|
||||
humantime = "2.1"
|
||||
@@ -132,6 +132,7 @@ humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
@@ -179,15 +180,17 @@ similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.44.x
|
||||
|
||||
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] }
|
||||
] } # on branch v0.44.x
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||
tokio-stream = "0.1"
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
@@ -254,7 +257,6 @@ plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
puffin = { path = "src/puffin" }
|
||||
query = { path = "src/query" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
@@ -264,9 +266,9 @@ table = { path = "src/table" }
|
||||
|
||||
[patch.crates-io]
|
||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
|
||||
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
|
||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||
|
||||
5
Makefile
5
Makefile
@@ -165,15 +165,14 @@ nextest: ## Install nextest tools.
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
FUZZ_TARGET ?= fuzz_alter_table
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
fuzz-ls: ## List all fuzz targets.
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
|
||||
@@ -138,7 +138,8 @@ Check the prerequisite:
|
||||
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||
* Python toolchain (optional): Required only if using some test scripts.
|
||||
|
||||
Build GreptimeDB binary:
|
||||
|
||||
@@ -228,4 +229,3 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||
|
||||
@@ -91,10 +91,12 @@
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -132,10 +134,10 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
@@ -143,15 +145,15 @@
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -214,7 +216,7 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
@@ -293,9 +295,11 @@
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
@@ -378,7 +382,7 @@
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
@@ -466,10 +470,10 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
@@ -477,15 +481,15 @@
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -534,12 +538,18 @@
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
|
||||
@@ -59,7 +59,7 @@ body_limit = "64MB"
|
||||
addr = "127.0.0.1:3001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
hostname = "127.0.0.1:3001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
@@ -475,18 +475,18 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
experimental_write_cache_path = ""
|
||||
write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "5GiB"
|
||||
write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
@@ -516,6 +516,15 @@ aux_path = ""
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
@@ -543,15 +552,6 @@ mem_threshold_on_create = "auto"
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "8MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
|
||||
@@ -5,6 +5,12 @@ mode = "distributed"
|
||||
## @toml2docs:none-default
|
||||
node_id = 14
|
||||
|
||||
## flow engine options.
|
||||
[flow]
|
||||
## The number of flow worker in flownode.
|
||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||
#+num_workers=0
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -19,6 +25,16 @@ max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
|
||||
@@ -38,7 +38,7 @@ body_limit = "64MB"
|
||||
addr = "127.0.0.1:4001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
hostname = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
|
||||
@@ -8,13 +8,29 @@ bind_addr = "127.0.0.1:3002"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||
## For etcd store, the format is:
|
||||
## "127.0.0.1:2379"
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## The datastore for meta server.
|
||||
backend = "EtcdStore"
|
||||
## Available values:
|
||||
## - `etcd_store` (default value)
|
||||
## - `memory_store`
|
||||
## - `postgres_store`
|
||||
backend = "etcd_store"
|
||||
|
||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||
## **Only used when backend is `postgres_store`.**
|
||||
meta_table_name = "greptime_metakv"
|
||||
|
||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||
## Only used when backend is `postgres_store`.
|
||||
meta_election_lock_id = 1
|
||||
|
||||
## Datanode selector type.
|
||||
## - `round_robin` (default value)
|
||||
|
||||
@@ -284,6 +284,12 @@ max_retry_times = 3
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
## flow engine options.
|
||||
[flow]
|
||||
## The number of flow worker in flownode.
|
||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||
#+num_workers=0
|
||||
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
@@ -337,7 +343,7 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
@@ -518,18 +524,18 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "5GiB"
|
||||
write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
@@ -559,6 +565,15 @@ aux_path = ""
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
@@ -586,15 +601,6 @@ mem_threshold_on_create = "auto"
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "8MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
|
||||
75
cyborg/bin/bump-doc-version.ts
Normal file
75
cyborg/bin/bump-doc-version.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
async function triggerWorkflow(workflowId: string, version: string) {
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
try {
|
||||
await docsClient.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: "docs",
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function determineWorkflow(version: string): [string, string] {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
try {
|
||||
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||
triggerWorkflow(workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing version: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
|
||||
@@ -7,10 +7,8 @@ ARG OUTPUT_DIR
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
|
||||
@@ -13,12 +13,7 @@ RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
pkg-config
|
||||
|
||||
# Trust workdir
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
@@ -12,8 +12,6 @@ RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
@@ -23,7 +21,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
|
||||
@@ -6,11 +6,8 @@ ARG DOCKER_BUILD_ROOT=.
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
@@ -20,9 +17,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev
|
||||
pkg-config
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN echo "target platform: $TARGETPLATFORM"
|
||||
@@ -38,21 +33,6 @@ fi
|
||||
RUN mv protoc3/bin/* /usr/local/bin/
|
||||
RUN mv protoc3/include/* /usr/local/include/
|
||||
|
||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
||||
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
|
||||
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
|
||||
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
|
||||
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
|
||||
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
|
||||
|
||||
# Remove Python 3.8 and install pip.
|
||||
RUN apt-get -y purge python3.8 && \
|
||||
apt-get -y autoremove && \
|
||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||
|
||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||
@@ -65,10 +45,6 @@ RUN apt-get -y purge python3.8 && \
|
||||
# it can be a different user that have prepared the submodules.
|
||||
RUN git config --global --add safe.directory '*'
|
||||
|
||||
# Install Python dependencies.
|
||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
numpy>=1.24.2
|
||||
pandas>=1.5.3
|
||||
pyarrow>=11.0.0
|
||||
requests>=2.28.2
|
||||
scipy>=1.10.1
|
||||
@@ -5296,7 +5296,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_requests_total{pod=~\"$datanode\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
|
||||
"range": true,
|
||||
@@ -5392,7 +5392,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"read\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"read\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
|
||||
"range": true,
|
||||
@@ -5488,7 +5488,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\",operation=\"read\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\",operation=\"read\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-{{scheme}}-p99",
|
||||
"range": true,
|
||||
@@ -5584,7 +5584,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
|
||||
"range": true,
|
||||
@@ -5680,7 +5680,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-p99",
|
||||
"range": true,
|
||||
@@ -5776,7 +5776,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
|
||||
@@ -5873,7 +5873,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-p99",
|
||||
@@ -5970,7 +5970,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
|
||||
"range": true,
|
||||
@@ -6066,7 +6066,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation!~\"read|write|list\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation!~\"read|write|list\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-p99",
|
||||
"range": true,
|
||||
@@ -6298,6 +6298,6 @@
|
||||
"timezone": "",
|
||||
"title": "GreptimeDB Cluster Metrics",
|
||||
"uid": "ce3q6xwn3xa0wa",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"weekStart": ""
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-10-19"
|
||||
components = ["rust-analyzer"]
|
||||
components = ["rust-analyzer", "llvm-tools"]
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
from multiprocessing import Pool
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
@@ -33,13 +34,11 @@ def extract_branch_names(file_content):
|
||||
return pattern.findall(file_content)
|
||||
|
||||
|
||||
def check_snafu_in_files(branch_name, rust_files):
|
||||
def check_snafu_in_files(branch_name, rust_files_content):
|
||||
branch_name_snafu = f"{branch_name}Snafu"
|
||||
for rust_file in rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
content = file.read()
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
for content in rust_files_content.values():
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -49,21 +48,24 @@ def main():
|
||||
|
||||
for error_file in error_files:
|
||||
with open(error_file, "r") as file:
|
||||
content = file.read()
|
||||
branch_names.extend(extract_branch_names(content))
|
||||
branch_names.extend(extract_branch_names(file.read()))
|
||||
|
||||
unused_snafu = [
|
||||
branch_name
|
||||
for branch_name in branch_names
|
||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||
]
|
||||
# Read all rust files into memory once
|
||||
rust_files_content = {}
|
||||
for rust_file in other_rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
rust_files_content[rust_file] = file.read()
|
||||
|
||||
with Pool() as pool:
|
||||
results = pool.starmap(
|
||||
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
|
||||
)
|
||||
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
|
||||
|
||||
if unused_snafu:
|
||||
print("Unused error variants:")
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
|
||||
if unused_snafu:
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
let
|
||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
|
||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-24.11";
|
||||
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
||||
pkgs = import nixpkgs { config = {}; overlays = []; };
|
||||
in
|
||||
@@ -11,16 +11,20 @@ pkgs.mkShell rec {
|
||||
clang
|
||||
gcc
|
||||
protobuf
|
||||
gnumake
|
||||
mold
|
||||
(fenix.fromToolchainFile {
|
||||
dir = ./.;
|
||||
})
|
||||
cargo-nextest
|
||||
cargo-llvm-cov
|
||||
taplo
|
||||
curl
|
||||
];
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
libgit2
|
||||
libz
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
|
||||
@@ -57,13 +57,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
}
|
||||
if let Some(options) = column_def.options.as_ref() {
|
||||
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
|
||||
}
|
||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
|
||||
}
|
||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
|
||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
|
||||
}
|
||||
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
||||
options
|
||||
@@ -181,14 +181,14 @@ mod tests {
|
||||
let options = options_from_column_schema(&schema);
|
||||
assert!(options.is_none());
|
||||
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
})
|
||||
.unwrap()
|
||||
.set_inverted_index(true);
|
||||
.unwrap();
|
||||
schema.with_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
|
||||
@@ -122,13 +122,6 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
@@ -343,9 +336,7 @@ impl ErrorExt for Error {
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::Internal { source, .. } => source.status_code(),
|
||||
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
|
||||
@@ -58,6 +58,8 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||
/// Fulltext index constraint name
|
||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||
/// Skipping index constraint name
|
||||
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
|
||||
|
||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||
@@ -225,6 +227,12 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
let keys = &table_info.meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
// For compatibility, use primary key columns as inverted index columns.
|
||||
let pk_as_inverted_index = !schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.any(|c| c.has_inverted_index_key());
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let mut constraints = vec![];
|
||||
if column.is_time_index() {
|
||||
@@ -242,14 +250,20 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
if keys.contains(&idx) {
|
||||
constraints.push(PRI_CONSTRAINT_NAME);
|
||||
|
||||
if pk_as_inverted_index {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
}
|
||||
if column.is_inverted_indexed() {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
|
||||
if column.has_fulltext_index_key() {
|
||||
if column.is_fulltext_indexed() {
|
||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
if column.is_skipping_indexed() {
|
||||
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
|
||||
if !constraints.is_empty() {
|
||||
let aggregated_constraints = constraints.join(", ");
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_database;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
@@ -26,6 +27,7 @@ use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_database::PGDatabase;
|
||||
use pg_namespace::PGNamespace;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use table::TableRef;
|
||||
@@ -113,6 +115,10 @@ impl PGCatalogProvider {
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_DATABASE.to_string(),
|
||||
self.build_table(PG_DATABASE).expect(PG_DATABASE),
|
||||
);
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
@@ -135,6 +141,11 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
214
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
214
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const DATNAME: &str = "datname";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `pg_catalog.database` table implementation.
|
||||
pub(super) struct PGDatabase {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGDatabase {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(DATNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGCDatabaseBuilder {
|
||||
PGCDatabaseBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGDatabase {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_database(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGDatabase {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_DATABASE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_DATABASE
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_database(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_database` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGCDatabaseBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
datname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGCDatabaseBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
self.add_database(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row: [(&str, &Value); 2] = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(DATNAME, &Value::from(schema_name)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.datname.push(Some(schema_name));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const PG_DATABASE: &str = "pg_databases";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-database.html
|
||||
pub const PG_DATABASE: &str = "pg_database";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-class.html
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-type.html
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -56,7 +59,6 @@ tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -22,6 +22,9 @@ use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::postgres::PgStore;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_telemetry::info;
|
||||
@@ -55,18 +58,34 @@ where
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct BenchTableMetadataCommand {
|
||||
#[clap(long)]
|
||||
etcd_addr: String,
|
||||
etcd_addr: Option<String>,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
count: u32,
|
||||
}
|
||||
|
||||
impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
||||
.await
|
||||
.unwrap();
|
||||
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
|
||||
info!("Using etcd as kv backend");
|
||||
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
|
||||
} else {
|
||||
Arc::new(MemoryKvBackend::new())
|
||||
};
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
|
||||
info!("Using postgres as kv backend");
|
||||
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
|
||||
.await
|
||||
.unwrap()
|
||||
} else {
|
||||
kv_backend
|
||||
};
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
table_metadata_manager,
|
||||
|
||||
@@ -10,9 +10,8 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["python", "servers/pprof", "servers/mem-prof"]
|
||||
default = ["servers/pprof", "servers/mem-prof"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -62,6 +62,11 @@ impl Instance {
|
||||
pub fn datanode(&self) -> &Datanode {
|
||||
&self.datanode
|
||||
}
|
||||
|
||||
/// allow customizing datanode for downstream projects
|
||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||
&mut self.datanode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -271,7 +276,8 @@ impl StartCommand {
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
@@ -66,6 +67,11 @@ impl Instance {
|
||||
pub fn flownode(&self) -> &FlownodeInstance {
|
||||
&self.flownode
|
||||
}
|
||||
|
||||
/// allow customizing flownode for downstream projects
|
||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||
&mut self.flownode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -137,6 +143,11 @@ struct StartCommand {
|
||||
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
|
||||
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
|
||||
env_prefix: String,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
/// HTTP request timeout in seconds.
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -193,6 +204,14 @@ impl StartCommand {
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr.clone_from(http_addr);
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||
return MissingConfigSnafu {
|
||||
msg: "Missing node id option",
|
||||
@@ -217,7 +236,8 @@ impl StartCommand {
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
|
||||
let opts = opts.component;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
@@ -268,7 +268,8 @@ impl StartCommand {
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
|
||||
@@ -249,8 +249,6 @@ impl StartCommand {
|
||||
|
||||
if let Some(backend) = &self.backend {
|
||||
opts.backend.clone_from(backend);
|
||||
} else {
|
||||
opts.backend = BackendImpl::default()
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
@@ -274,7 +272,8 @@ impl StartCommand {
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut opts = opts.component;
|
||||
opts.detect_server_addr();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
|
||||
@@ -54,7 +54,7 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
|
||||
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
@@ -145,6 +145,7 @@ pub struct StandaloneOptions {
|
||||
pub storage: StorageConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub flow: FlowConfig,
|
||||
pub logging: LoggingOptions,
|
||||
pub user_provider: Option<String>,
|
||||
/// Options for different store engines.
|
||||
@@ -173,6 +174,7 @@ impl Default for StandaloneOptions {
|
||||
storage: StorageConfig::default(),
|
||||
metadata_store: KvBackendConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
flow: FlowConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
export_metrics: ExportMetricsOption::default(),
|
||||
user_provider: None,
|
||||
@@ -461,7 +463,8 @@ impl StartCommand {
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_hostname();
|
||||
let fe_opts = opts.frontend_options();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
@@ -522,8 +525,12 @@ impl StartCommand {
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let flownode_options = FlownodeOptions {
|
||||
flow: opts.flow.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
|
||||
@@ -69,7 +69,7 @@ fn test_load_datanode_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
@@ -85,7 +85,9 @@ fn test_load_datanode_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
|
||||
grpc: GrpcOptions::default()
|
||||
.with_addr("127.0.0.1:3001")
|
||||
.with_hostname("127.0.0.1:3001"),
|
||||
rpc_addr: Some("127.0.0.1:3001".to_string()),
|
||||
rpc_hostname: Some("127.0.0.1".to_string()),
|
||||
rpc_runtime_size: Some(8),
|
||||
@@ -137,6 +139,7 @@ fn test_load_frontend_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default().with_hostname("127.0.0.1:4001"),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -154,6 +157,7 @@ fn test_load_metasrv_example_config() {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
@@ -203,7 +207,7 @@ fn test_load_standalone_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::io;
|
||||
use std::ops::Range;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
@@ -33,19 +34,22 @@ pub struct Metadata {
|
||||
pub content_length: u64,
|
||||
}
|
||||
|
||||
/// `RangeReader` reads a range of bytes from a source.
|
||||
#[async_trait]
|
||||
pub trait RangeReader: Send + Unpin {
|
||||
/// `SizeAwareRangeReader` is a `RangeReader` that supports setting a file size hint.
|
||||
pub trait SizeAwareRangeReader: RangeReader {
|
||||
/// Sets the file size hint for the reader.
|
||||
///
|
||||
/// It's used to optimize the reading process by reducing the number of remote requests.
|
||||
fn with_file_size_hint(&mut self, file_size_hint: u64);
|
||||
}
|
||||
|
||||
/// `RangeReader` reads a range of bytes from a source.
|
||||
#[async_trait]
|
||||
pub trait RangeReader: Sync + Send + Unpin {
|
||||
/// Returns the metadata of the source.
|
||||
async fn metadata(&mut self) -> io::Result<Metadata>;
|
||||
async fn metadata(&self) -> io::Result<Metadata>;
|
||||
|
||||
/// Reads the bytes in the given range.
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
|
||||
async fn read(&self, range: Range<u64>) -> io::Result<Bytes>;
|
||||
|
||||
/// Reads the bytes in the given range into the buffer.
|
||||
///
|
||||
@@ -53,18 +57,14 @@ pub trait RangeReader: Send + Unpin {
|
||||
/// - If the buffer is insufficient to hold the bytes, it will either:
|
||||
/// - Allocate additional space (e.g., for `Vec<u8>`)
|
||||
/// - Panic (e.g., for `&mut [u8]`)
|
||||
async fn read_into(
|
||||
&mut self,
|
||||
range: Range<u64>,
|
||||
buf: &mut (impl BufMut + Send),
|
||||
) -> io::Result<()> {
|
||||
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
|
||||
let bytes = self.read(range).await?;
|
||||
buf.put_slice(&bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads the bytes in the given ranges.
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
let mut result = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
result.push(self.read(range.clone()).await?);
|
||||
@@ -74,25 +74,20 @@ pub trait RangeReader: Send + Unpin {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
||||
fn with_file_size_hint(&mut self, file_size_hint: u64) {
|
||||
(*self).with_file_size_hint(file_size_hint)
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
impl<R: ?Sized + RangeReader> RangeReader for &R {
|
||||
async fn metadata(&self) -> io::Result<Metadata> {
|
||||
(*self).metadata().await
|
||||
}
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
|
||||
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
(*self).read(range).await
|
||||
}
|
||||
async fn read_into(
|
||||
&mut self,
|
||||
range: Range<u64>,
|
||||
buf: &mut (impl BufMut + Send),
|
||||
) -> io::Result<()> {
|
||||
|
||||
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
|
||||
(*self).read_into(range, buf).await
|
||||
}
|
||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
|
||||
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||
(*self).read_vec(ranges).await
|
||||
}
|
||||
}
|
||||
@@ -120,7 +115,7 @@ pub struct AsyncReadAdapter<R> {
|
||||
|
||||
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
|
||||
pub async fn new(inner: R) -> io::Result<Self> {
|
||||
let mut inner = inner;
|
||||
let inner = inner;
|
||||
let metadata = inner.metadata().await?;
|
||||
Ok(AsyncReadAdapter {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
@@ -160,7 +155,7 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
||||
let range = *this.position..(*this.position + size);
|
||||
let inner = this.inner.clone();
|
||||
let fut = async move {
|
||||
let mut inner = inner.lock().await;
|
||||
let inner = inner.lock().await;
|
||||
inner.read(range).await
|
||||
};
|
||||
|
||||
@@ -195,27 +190,24 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for Vec<u8> {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
async fn metadata(&self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.len() as u64,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(weny): considers replacing `tokio::fs::File` with opendal reader.
|
||||
/// `FileReader` is a `RangeReader` for reading a file.
|
||||
pub struct FileReader {
|
||||
content_length: u64,
|
||||
position: u64,
|
||||
file: tokio::fs::File,
|
||||
position: AtomicU64,
|
||||
file: Mutex<tokio::fs::File>,
|
||||
}
|
||||
|
||||
impl FileReader {
|
||||
@@ -225,32 +217,37 @@ impl FileReader {
|
||||
let metadata = file.metadata().await?;
|
||||
Ok(FileReader {
|
||||
content_length: metadata.len(),
|
||||
position: 0,
|
||||
file,
|
||||
position: AtomicU64::new(0),
|
||||
file: Mutex::new(file),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
impl SizeAwareRangeReader for FileReader {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for FileReader {
|
||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
async fn metadata(&self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.content_length,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
if range.start != self.position {
|
||||
self.file.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.position = range.start;
|
||||
async fn read(&self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
let mut file = self.file.lock().await;
|
||||
|
||||
if range.start != self.position.load(Ordering::Relaxed) {
|
||||
file.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.position.store(range.start, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
range.end = range.end.min(self.content_length);
|
||||
if range.end <= self.position {
|
||||
if range.end <= self.position.load(Ordering::Relaxed) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"Start of range is out of bounds",
|
||||
@@ -259,8 +256,8 @@ impl RangeReader for FileReader {
|
||||
|
||||
let mut buf = vec![0; (range.end - range.start) as usize];
|
||||
|
||||
self.file.read_exact(&mut buf).await?;
|
||||
self.position = range.end;
|
||||
file.read_exact(&mut buf).await?;
|
||||
self.position.store(range.end, Ordering::Relaxed);
|
||||
|
||||
Ok(Bytes::from(buf))
|
||||
}
|
||||
@@ -301,7 +298,7 @@ mod tests {
|
||||
let data = b"hello world";
|
||||
tokio::fs::write(path, data).await.unwrap();
|
||||
|
||||
let mut reader = FileReader::new(path).await.unwrap();
|
||||
let reader = FileReader::new(path).await.unwrap();
|
||||
let metadata = reader.metadata().await.unwrap();
|
||||
assert_eq!(metadata.content_length, data.len() as u64);
|
||||
|
||||
|
||||
@@ -109,6 +109,7 @@ pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
||||
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
||||
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
||||
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
|
||||
pub const PG_CATALOG_PG_DATABASE_TABLE_ID: u32 = 259;
|
||||
|
||||
// ----- End of pg_catalog tables -----
|
||||
|
||||
|
||||
@@ -73,14 +73,21 @@ pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
|
||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||
}
|
||||
|
||||
let opts = layered_config
|
||||
let mut opts: Self = layered_config
|
||||
.build()
|
||||
.and_then(|x| x.try_deserialize())
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
|
||||
opts.validate_sanitize()?;
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
/// Validate(and possibly sanitize) the configuration.
|
||||
fn validate_sanitize(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List of toml keys that should be parsed as a list.
|
||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
None
|
||||
|
||||
@@ -180,7 +180,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
||||
ParseFormat {
|
||||
key: &'static str,
|
||||
key: String,
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
|
||||
@@ -35,10 +35,23 @@ data = {
|
||||
"bigint_other": [5, -5, 1, 5, 5],
|
||||
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
|
||||
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
|
||||
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
|
||||
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
|
||||
"timestamp_simple": [
|
||||
datetime.datetime(2023, 4, 1, 20, 15, 30, 2000),
|
||||
datetime.datetime.fromtimestamp(int("1629617204525777000") / 1000000000),
|
||||
datetime.datetime(2023, 1, 1),
|
||||
datetime.datetime(2023, 2, 1),
|
||||
datetime.datetime(2023, 3, 1),
|
||||
],
|
||||
"date_simple": [
|
||||
datetime.date(2023, 4, 1),
|
||||
datetime.date(2023, 3, 1),
|
||||
datetime.date(2023, 1, 1),
|
||||
datetime.date(2023, 2, 1),
|
||||
datetime.date(2023, 3, 1),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def infer_schema(data):
|
||||
schema = "struct<"
|
||||
for key, value in data.items():
|
||||
@@ -56,7 +69,7 @@ def infer_schema(data):
|
||||
elif key.startswith("date"):
|
||||
dt = "date"
|
||||
else:
|
||||
print(key,value,dt)
|
||||
print(key, value, dt)
|
||||
raise NotImplementedError
|
||||
if key.startswith("double"):
|
||||
dt = "double"
|
||||
@@ -68,7 +81,6 @@ def infer_schema(data):
|
||||
return schema
|
||||
|
||||
|
||||
|
||||
def _write(
|
||||
schema: str,
|
||||
data,
|
||||
|
||||
@@ -32,6 +32,7 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
|
||||
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::vector::product::VectorProductCreator;
|
||||
use crate::scalars::vector::sum::VectorSumCreator;
|
||||
|
||||
/// A function creates `AggregateFunctionCreator`.
|
||||
@@ -93,6 +94,7 @@ impl AggregateFunctions {
|
||||
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
||||
register_aggr_func!("vec_sum", 1, VectorSumCreator);
|
||||
register_aggr_func!("vec_product", 1, VectorProductCreator);
|
||||
|
||||
#[cfg(feature = "geo")]
|
||||
register_aggr_func!(
|
||||
|
||||
@@ -14,14 +14,17 @@
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
mod elem_product;
|
||||
mod elem_sum;
|
||||
pub mod impl_conv;
|
||||
pub(crate) mod product;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
mod sub;
|
||||
pub(crate) mod sum;
|
||||
mod vector_div;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -46,8 +49,10 @@ impl VectorFunction {
|
||||
|
||||
// vector calculation
|
||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(sub::SubFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
}
|
||||
|
||||
142
src/common/function/src/scalars/vector/elem_product.rs
Normal file
142
src/common/function/src/scalars/vector/elem_product.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
const NAME: &str = "vec_elem_product";
|
||||
|
||||
/// Multiplies all elements of the vector, returns a scalar.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_elem_product(parse_vec('[1.0, 2.0, 3.0, 4.0]'));
|
||||
///
|
||||
// +-----------------------------------------------------------+
|
||||
// | vec_elem_product(parse_vec(Utf8("[1.0, 2.0, 3.0, 4.0]"))) |
|
||||
// +-----------------------------------------------------------+
|
||||
// | 24.0 |
|
||||
// +-----------------------------------------------------------+
|
||||
/// ``````
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ElemProductFunction;
|
||||
|
||||
impl Function for ElemProductFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
result.push(Some(DVectorView::from_slice(&arg0, arg0.len()).product()));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ElemProductFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
|
||||
#[test]
|
||||
fn test_elem_product() {
|
||||
let func = ElemProductFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(6.0));
|
||||
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(120.0));
|
||||
assert_eq!(result.get_ref(2).as_f32().unwrap(), None);
|
||||
}
|
||||
}
|
||||
211
src/common/function/src/scalars/vector/product.rs
Normal file
211
src/common/function/src/scalars/vector/product.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
/// Aggregates by multiplying elements across the same dimension, returns a vector.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VectorProduct {
|
||||
product: Option<OVector<f32, Dyn>>,
|
||||
has_null: bool,
|
||||
}
|
||||
|
||||
#[as_aggr_func_creator]
|
||||
#[derive(Debug, Default, AggrFuncTypeStore)]
|
||||
pub struct VectorProductCreator {}
|
||||
|
||||
impl AggregateFunctionCreator for VectorProductCreator {
|
||||
fn creator(&self) -> AccumulatorCreatorFunction {
|
||||
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
|
||||
ensure!(
|
||||
types.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let input_type = &types[0];
|
||||
match input_type {
|
||||
ConcreteDataType::String(_) | ConcreteDataType::Binary(_) => {
|
||||
Ok(Box::new(VectorProduct::default()))
|
||||
}
|
||||
_ => {
|
||||
let err_msg = format!(
|
||||
"\"VEC_PRODUCT\" aggregate function not support data type {:?}",
|
||||
input_type.logical_type_id(),
|
||||
);
|
||||
CreateAccumulatorSnafu { err_msg }.fail()?
|
||||
}
|
||||
}
|
||||
});
|
||||
creator
|
||||
}
|
||||
|
||||
fn output_type(&self) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn state_types(&self) -> common_query::error::Result<Vec<ConcreteDataType>> {
|
||||
Ok(vec![self.output_type()?])
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorProduct {
|
||||
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||
self.product.get_or_insert_with(|| {
|
||||
OVector::from_iterator_generic(Dyn(len), Const::<1>, (0..len).map(|_| 1.0))
|
||||
})
|
||||
}
|
||||
|
||||
fn update(&mut self, values: &[VectorRef], is_update: bool) -> Result<(), Error> {
|
||||
if values.is_empty() || self.has_null {
|
||||
return Ok(());
|
||||
};
|
||||
let column = &values[0];
|
||||
let len = column.len();
|
||||
|
||||
match as_veclit_if_const(column)? {
|
||||
Some(column) => {
|
||||
let vec_column = DVectorView::from_slice(&column, column.len()).scale(len as f32);
|
||||
*self.inner(vec_column.len()) =
|
||||
(*self.inner(vec_column.len())).component_mul(&vec_column);
|
||||
}
|
||||
None => {
|
||||
for i in 0..len {
|
||||
let Some(arg0) = as_veclit(column.get_ref(i))? else {
|
||||
if is_update {
|
||||
self.has_null = true;
|
||||
self.product = None;
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
let vec_column = DVectorView::from_slice(&arg0, arg0.len());
|
||||
*self.inner(vec_column.len()) =
|
||||
(*self.inner(vec_column.len())).component_mul(&vec_column);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for VectorProduct {
|
||||
fn state(&self) -> common_query::error::Result<Vec<Value>> {
|
||||
self.evaluate().map(|v| vec![v])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(values, true)
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(states, false)
|
||||
}
|
||||
|
||||
fn evaluate(&self) -> common_query::error::Result<Value> {
|
||||
match &self.product {
|
||||
None => Ok(Value::Null),
|
||||
Some(vector) => {
|
||||
let v = vector.as_slice();
|
||||
Ok(Value::from(veclit_to_binlit(v)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{ConstantVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut vec_product = VectorProduct::default();
|
||||
vec_product.update_batch(&[]).unwrap();
|
||||
assert!(vec_product.product.is_none());
|
||||
assert!(!vec_product.has_null);
|
||||
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Some(
|
||||
"[1.0,2.0,3.0]".to_string(),
|
||||
)]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[1.0, 2.0, 3.0])),
|
||||
vec_product.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update one null value
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Option::<String>::None]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[28.0, 80.0, 162.0])),
|
||||
vec_product.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update null-value batch
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
None,
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
let mut vec_product = VectorProduct::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
|
||||
4,
|
||||
))];
|
||||
|
||||
vec_product.update_batch(&v).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[4.0, 8.0, 12.0])),
|
||||
vec_product.evaluate().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
168
src/common/function/src/scalars/vector/vector_norm.rs
Normal file
168
src/common/function/src/scalars/vector/vector_norm.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_norm";
|
||||
|
||||
/// Normalizes the vector to length 1, returns a vector.
|
||||
/// This's equivalent to `VECTOR_SCALAR_MUL(1/SQRT(VECTOR_ELEM_SUM(VECTOR_MUL(v, v))), v)`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_norm('[7.0, 8.0, 9.0]'));
|
||||
///
|
||||
/// +--------------------------------------------------+
|
||||
/// | vec_to_string(vec_norm(Utf8("[7.0, 8.0, 9.0]"))) |
|
||||
/// +--------------------------------------------------+
|
||||
/// | [0.013888889,0.015873017,0.017857144] |
|
||||
/// +--------------------------------------------------+
|
||||
///
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorNormFunction;
|
||||
|
||||
impl Function for VectorNormFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec2scalar = vec1.component_mul(&vec0);
|
||||
let scalar_var = vec2scalar.sum().sqrt();
|
||||
|
||||
let vec = DVectorView::from_slice(&arg0, arg0.len());
|
||||
// Use unscale to avoid division by zero and keep more precision as possible
|
||||
let vec_res = vec.unscale(scalar_var);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorNormFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vec_norm() {
|
||||
let func = VectorNormFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0,2.0,3.0]".to_string()),
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
Some("[7.0,-8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 5);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.0, 0.5547002, 0.8320503]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.26726124, 0.5345225, 0.8017837]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(2).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.5025707, 0.5743665, 0.64616233]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(3).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.5025707, -0.5743665, 0.64616233]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(4).is_null());
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,7 @@ mod version;
|
||||
use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::{CurrentSchemaFunction, DatabaseFunction};
|
||||
use database::{CurrentSchemaFunction, DatabaseFunction, SessionUserFunction};
|
||||
use pg_catalog::PGCatalogFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
@@ -36,8 +36,9 @@ impl SystemFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(BuildFunction));
|
||||
registry.register(Arc::new(VersionFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(CurrentSchemaFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(SessionUserFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register_async(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
|
||||
@@ -28,9 +28,11 @@ pub struct DatabaseFunction;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct CurrentSchemaFunction;
|
||||
pub struct SessionUserFunction;
|
||||
|
||||
const DATABASE_FUNCTION_NAME: &str = "database";
|
||||
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||
|
||||
impl Function for DatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
@@ -72,6 +74,26 @@ impl Function for CurrentSchemaFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for SessionUserFunction {
|
||||
fn name(&self) -> &str {
|
||||
SESSION_USER_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(0, vec![], Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let user = func_ctx.query_ctx.current_user();
|
||||
|
||||
Ok(Arc::new(StringVector::from_slice(&[user.username()])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DatabaseFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "DATABASE")
|
||||
@@ -84,6 +106,12 @@ impl fmt::Display for CurrentSchemaFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SessionUserFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "SESSION_USER")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -25,12 +25,15 @@ use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest, SetIndexOptions,
|
||||
UnsetIndexOptions,
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
||||
UnknownLocationTypeSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
};
|
||||
|
||||
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||
@@ -60,6 +63,7 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
column_schema: schema,
|
||||
is_key: column_def.semantic_type == SemanticType::Tag as i32,
|
||||
location: parse_location(ac.location)?,
|
||||
add_if_not_exists: ac.add_if_not_exists,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
@@ -113,18 +117,43 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
.context(InvalidUnsetTableOptionRequestSnafu)?,
|
||||
}
|
||||
}
|
||||
Kind::SetColumnFulltext(c) => AlterKind::SetColumnFulltext {
|
||||
column_name: c.column_name,
|
||||
options: FulltextOptions {
|
||||
enable: c.enable,
|
||||
analyzer: as_fulltext_option(
|
||||
Analyzer::try_from(c.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: c.case_sensitive,
|
||||
Kind::SetIndex(o) => match o.options {
|
||||
Some(opt) => match opt {
|
||||
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Fulltext {
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions {
|
||||
enable: f.enable,
|
||||
analyzer: as_fulltext_option(
|
||||
Analyzer::try_from(f.analyzer)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: f.case_sensitive,
|
||||
},
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Inverted {
|
||||
column_name: i.column_name,
|
||||
},
|
||||
},
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
Kind::UnsetColumnFulltext(c) => AlterKind::UnsetColumnFulltext {
|
||||
column_name: c.column_name,
|
||||
Kind::UnsetIndex(o) => match o.options {
|
||||
Some(opt) => match opt {
|
||||
api::v1::unset_index::Options::Fulltext(f) => AlterKind::UnsetIndex {
|
||||
options: UnsetIndexOptions::Fulltext {
|
||||
column_name: f.column_name,
|
||||
},
|
||||
},
|
||||
api::v1::unset_index::Options::Inverted(i) => AlterKind::UnsetIndex {
|
||||
options: UnsetIndexOptions::Inverted {
|
||||
column_name: i.column_name,
|
||||
},
|
||||
},
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -220,6 +249,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
location: None,
|
||||
add_if_not_exists: true,
|
||||
}],
|
||||
})),
|
||||
};
|
||||
@@ -240,6 +270,7 @@ mod tests {
|
||||
add_column.column_schema.data_type
|
||||
);
|
||||
assert_eq!(None, add_column.location);
|
||||
assert!(add_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -265,6 +296,7 @@ mod tests {
|
||||
location_type: LocationType::First.into(),
|
||||
after_column_name: String::default(),
|
||||
}),
|
||||
add_if_not_exists: false,
|
||||
},
|
||||
AddColumn {
|
||||
column_def: Some(ColumnDef {
|
||||
@@ -280,6 +312,7 @@ mod tests {
|
||||
location_type: LocationType::After.into(),
|
||||
after_column_name: "ts".to_string(),
|
||||
}),
|
||||
add_if_not_exists: true,
|
||||
},
|
||||
],
|
||||
})),
|
||||
@@ -308,6 +341,7 @@ mod tests {
|
||||
}),
|
||||
add_column.location
|
||||
);
|
||||
assert!(add_column.add_if_not_exists);
|
||||
|
||||
let add_column = add_columns.pop().unwrap();
|
||||
assert!(!add_column.is_key);
|
||||
@@ -317,6 +351,7 @@ mod tests {
|
||||
add_column.column_schema.data_type
|
||||
);
|
||||
assert_eq!(Some(AddColumnLocation::First), add_column.location);
|
||||
assert!(!add_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -139,6 +139,12 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing alter index options"))]
|
||||
MissingAlterIndexOption {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -164,7 +170,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
Error::InvalidSetTableOptionRequest { .. }
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. } => StatusCode::InvalidArguments,
|
||||
| Error::InvalidSetFulltextOptionRequest { .. }
|
||||
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -299,6 +299,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let memory_column = &add_columns.add_columns[1];
|
||||
assert_eq!(
|
||||
@@ -311,6 +312,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let time_column = &add_columns.add_columns[2];
|
||||
assert_eq!(
|
||||
@@ -323,6 +325,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let interval_column = &add_columns.add_columns[3];
|
||||
assert_eq!(
|
||||
@@ -335,6 +338,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let decimal_column = &add_columns.add_columns[4];
|
||||
assert_eq!(
|
||||
@@ -352,6 +356,7 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -119,29 +119,30 @@ pub fn build_create_table_expr(
|
||||
}
|
||||
|
||||
let mut column_defs = Vec::with_capacity(column_exprs.len());
|
||||
let mut primary_keys = Vec::default();
|
||||
let mut primary_keys = Vec::with_capacity(column_exprs.len());
|
||||
let mut time_index = None;
|
||||
|
||||
for ColumnExpr {
|
||||
column_name,
|
||||
datatype,
|
||||
semantic_type,
|
||||
datatype_extension,
|
||||
options,
|
||||
} in column_exprs
|
||||
{
|
||||
for expr in column_exprs {
|
||||
let ColumnExpr {
|
||||
column_name,
|
||||
datatype,
|
||||
semantic_type,
|
||||
datatype_extension,
|
||||
options,
|
||||
} = expr;
|
||||
|
||||
let mut is_nullable = true;
|
||||
match semantic_type {
|
||||
v if v == SemanticType::Tag as i32 => primary_keys.push(column_name.to_string()),
|
||||
v if v == SemanticType::Tag as i32 => primary_keys.push(column_name.to_owned()),
|
||||
v if v == SemanticType::Timestamp as i32 => {
|
||||
ensure!(
|
||||
time_index.is_none(),
|
||||
DuplicatedTimestampColumnSnafu {
|
||||
exists: time_index.unwrap(),
|
||||
exists: time_index.as_ref().unwrap(),
|
||||
duplicated: column_name,
|
||||
}
|
||||
);
|
||||
time_index = Some(column_name.to_string());
|
||||
time_index = Some(column_name.to_owned());
|
||||
// Timestamp column must not be null.
|
||||
is_nullable = false;
|
||||
}
|
||||
@@ -158,8 +159,8 @@ pub fn build_create_table_expr(
|
||||
}
|
||||
);
|
||||
|
||||
let column_def = ColumnDef {
|
||||
name: column_name.to_string(),
|
||||
column_defs.push(ColumnDef {
|
||||
name: column_name.to_owned(),
|
||||
data_type: datatype,
|
||||
is_nullable,
|
||||
default_constraint: vec![],
|
||||
@@ -167,15 +168,14 @@ pub fn build_create_table_expr(
|
||||
comment: String::new(),
|
||||
datatype_extension: datatype_extension.clone(),
|
||||
options: options.clone(),
|
||||
};
|
||||
column_defs.push(column_def);
|
||||
});
|
||||
}
|
||||
|
||||
let time_index = time_index.context(MissingTimestampColumnSnafu {
|
||||
msg: format!("table is {}", table_name.table),
|
||||
})?;
|
||||
|
||||
let expr = CreateTableExpr {
|
||||
Ok(CreateTableExpr {
|
||||
catalog_name: table_name.catalog.to_string(),
|
||||
schema_name: table_name.schema.to_string(),
|
||||
table_name: table_name.table.to_string(),
|
||||
@@ -187,11 +187,12 @@ pub fn build_create_table_expr(
|
||||
table_options: Default::default(),
|
||||
table_id: table_id.map(|id| api::v1::TableId { id }),
|
||||
engine: engine.to_string(),
|
||||
};
|
||||
|
||||
Ok(expr)
|
||||
})
|
||||
}
|
||||
|
||||
/// Find columns that are not present in the schema and return them as `AddColumns`
|
||||
/// for adding columns automatically.
|
||||
/// It always sets `add_if_not_exists` to `true` for now.
|
||||
pub fn extract_new_columns(
|
||||
schema: &Schema,
|
||||
column_exprs: Vec<ColumnExpr>,
|
||||
@@ -213,6 +214,7 @@ pub fn extract_new_columns(
|
||||
AddColumn {
|
||||
column_def,
|
||||
location: None,
|
||||
add_if_not_exists: true,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -6,7 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres"]
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -17,6 +17,7 @@ api.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-stream = "0.3"
|
||||
async-trait.workspace = true
|
||||
backon = { workspace = true, optional = true }
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
@@ -35,6 +36,8 @@ common-wal.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -105,7 +105,7 @@ impl AlterLogicalTablesProcedure {
|
||||
.context(ConvertAlterTableRequestSnafu)?;
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind, true)
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
|
||||
@@ -28,13 +28,13 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status, StringKey,
|
||||
};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use futures::future;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::metadata::{RawTableInfo, TableId, TableInfo};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
@@ -51,10 +51,14 @@ use crate::{metrics, ClusterId};
|
||||
|
||||
/// The alter table procedure
|
||||
pub struct AlterTableProcedure {
|
||||
// The runtime context.
|
||||
/// The runtime context.
|
||||
context: DdlContext,
|
||||
// The serialized data.
|
||||
/// The serialized data.
|
||||
data: AlterTableData,
|
||||
/// Cached new table metadata in the prepare step.
|
||||
/// If we recover the procedure from json, then the table info value is not cached.
|
||||
/// But we already validated it in the prepare step.
|
||||
new_table_info: Option<TableInfo>,
|
||||
}
|
||||
|
||||
impl AlterTableProcedure {
|
||||
@@ -70,18 +74,31 @@ impl AlterTableProcedure {
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterTableData::new(task, table_id, cluster_id),
|
||||
new_table_info: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data: AlterTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
Ok(AlterTableProcedure { context, data })
|
||||
Ok(AlterTableProcedure {
|
||||
context,
|
||||
data,
|
||||
new_table_info: None,
|
||||
})
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
self.check_alter().await?;
|
||||
self.fill_table_info().await?;
|
||||
|
||||
// Validates the request and builds the new table info.
|
||||
// We need to build the new table info here because we should ensure the alteration
|
||||
// is valid in `UpdateMeta` state as we already altered the region.
|
||||
// Safety: `fill_table_info()` already set it.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
self.new_table_info = Some(self.build_new_table_info(&table_info_value.table_info)?);
|
||||
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
if matches!(alter_kind, Kind::RenameTable { .. }) {
|
||||
@@ -106,6 +123,14 @@ impl AlterTableProcedure {
|
||||
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
let alter_kind = self.make_region_alter_kind()?;
|
||||
|
||||
info!(
|
||||
"Submitting alter region requests for table {}, table_id: {}, alter_kind: {:?}",
|
||||
self.data.table_ref(),
|
||||
table_id,
|
||||
alter_kind,
|
||||
);
|
||||
|
||||
for datanode in leaders {
|
||||
let requester = self.context.node_manager.datanode(&datanode).await;
|
||||
@@ -113,7 +138,7 @@ impl AlterTableProcedure {
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
let request = self.make_alter_region_request(region_id)?;
|
||||
let request = self.make_alter_region_request(region_id, alter_kind.clone())?;
|
||||
debug!("Submitting {request:?} to {datanode}");
|
||||
|
||||
let datanode = datanode.clone();
|
||||
@@ -150,7 +175,15 @@ impl AlterTableProcedure {
|
||||
let table_ref = self.data.table_ref();
|
||||
// Safety: checked before.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
let new_info = self.build_new_table_info(&table_info_value.table_info)?;
|
||||
// Gets the table info from the cache or builds it.
|
||||
let new_info = match &self.new_table_info {
|
||||
Some(cached) => cached.clone(),
|
||||
None => self.build_new_table_info(&table_info_value.table_info)
|
||||
.inspect_err(|e| {
|
||||
// We already check the table info in the prepare step so this should not happen.
|
||||
error!(e; "Unable to build info for table {} in update metadata step, table_id: {}", table_ref, table_id);
|
||||
})?,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Starting update table: {} metadata, new table info {:?}",
|
||||
@@ -174,7 +207,7 @@ impl AlterTableProcedure {
|
||||
.await?;
|
||||
}
|
||||
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}");
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}");
|
||||
self.data.state = AlterTableState::InvalidateTableCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{
|
||||
@@ -27,13 +29,15 @@ use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{InvalidProtoMsgSnafu, Result};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Makes alter region request.
|
||||
pub(crate) fn make_alter_region_request(&self, region_id: RegionId) -> Result<RegionRequest> {
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
/// Makes alter region request from existing an alter kind.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_alter_region_request(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> Result<RegionRequest> {
|
||||
// Safety: checked
|
||||
let table_info = self.data.table_info().unwrap();
|
||||
let kind = create_proto_alter_kind(table_info, alter_kind)?;
|
||||
|
||||
Ok(RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
@@ -47,45 +51,66 @@ impl AlterTableProcedure {
|
||||
})),
|
||||
})
|
||||
}
|
||||
|
||||
/// Makes alter kind proto that all regions can reuse.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_region_alter_kind(&self) -> Result<Option<alter_request::Kind>> {
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
// Safety: checked
|
||||
let table_info = self.data.table_info().unwrap();
|
||||
let kind = create_proto_alter_kind(table_info, alter_kind)?;
|
||||
|
||||
Ok(kind)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates region proto alter kind from `table_info` and `alter_kind`.
|
||||
///
|
||||
/// Returns the kind and next column id if it adds new columns.
|
||||
/// It always adds column if not exists and drops column if exists.
|
||||
/// It skips the column if it already exists in the table.
|
||||
fn create_proto_alter_kind(
|
||||
table_info: &RawTableInfo,
|
||||
alter_kind: &Kind,
|
||||
) -> Result<Option<alter_request::Kind>> {
|
||||
match alter_kind {
|
||||
Kind::AddColumns(x) => {
|
||||
// Construct a set of existing columns in the table.
|
||||
let existing_columns: HashSet<_> = table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|col| &col.name)
|
||||
.collect();
|
||||
let mut next_column_id = table_info.meta.next_column_id;
|
||||
|
||||
let add_columns = x
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| {
|
||||
let column_def =
|
||||
add_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.context(InvalidProtoMsgSnafu {
|
||||
err_msg: "'column_def' is absent",
|
||||
})?;
|
||||
let mut add_columns = Vec::with_capacity(x.add_columns.len());
|
||||
for add_column in &x.add_columns {
|
||||
let column_def = add_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.context(InvalidProtoMsgSnafu {
|
||||
err_msg: "'column_def' is absent",
|
||||
})?;
|
||||
|
||||
let column_id = next_column_id;
|
||||
next_column_id += 1;
|
||||
// Skips existing columns.
|
||||
if existing_columns.contains(&column_def.name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let column_def = RegionColumnDef {
|
||||
column_def: Some(column_def.clone()),
|
||||
column_id,
|
||||
};
|
||||
let column_id = next_column_id;
|
||||
next_column_id += 1;
|
||||
let column_def = RegionColumnDef {
|
||||
column_def: Some(column_def.clone()),
|
||||
column_id,
|
||||
};
|
||||
|
||||
Ok(AddColumn {
|
||||
column_def: Some(column_def),
|
||||
location: add_column.location.clone(),
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
add_columns.push(AddColumn {
|
||||
column_def: Some(column_def),
|
||||
location: add_column.location.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Some(alter_request::Kind::AddColumns(AddColumns {
|
||||
add_columns,
|
||||
@@ -108,10 +133,8 @@ fn create_proto_alter_kind(
|
||||
Kind::RenameTable(_) => Ok(None),
|
||||
Kind::SetTableOptions(v) => Ok(Some(alter_request::Kind::SetTableOptions(v.clone()))),
|
||||
Kind::UnsetTableOptions(v) => Ok(Some(alter_request::Kind::UnsetTableOptions(v.clone()))),
|
||||
Kind::SetColumnFulltext(v) => Ok(Some(alter_request::Kind::SetColumnFulltext(v.clone()))),
|
||||
Kind::UnsetColumnFulltext(v) => {
|
||||
Ok(Some(alter_request::Kind::UnsetColumnFulltext(v.clone())))
|
||||
}
|
||||
Kind::SetIndex(v) => Ok(Some(alter_request::Kind::SetIndex(v.clone()))),
|
||||
Kind::UnsetIndex(v) => Ok(Some(alter_request::Kind::UnsetIndex(v.clone()))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,6 +166,7 @@ mod tests {
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
|
||||
async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
@@ -171,6 +195,7 @@ mod tests {
|
||||
.name("cpu")
|
||||
.data_type(ColumnDataType::Float64)
|
||||
.semantic_type(SemanticType::Field)
|
||||
.is_nullable(true)
|
||||
.build()
|
||||
.unwrap()
|
||||
.into(),
|
||||
@@ -225,15 +250,16 @@ mod tests {
|
||||
name: "my_tag3".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: b"hello".to_vec(),
|
||||
default_constraint: Vec::new(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: Some(AddColumnLocation {
|
||||
location_type: LocationType::After as i32,
|
||||
after_column_name: "my_tag2".to_string(),
|
||||
after_column_name: "host".to_string(),
|
||||
}),
|
||||
add_if_not_exists: false,
|
||||
}],
|
||||
})),
|
||||
},
|
||||
@@ -242,8 +268,11 @@ mod tests {
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
procedure.make_alter_region_request(region_id).unwrap().body
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
@@ -259,7 +288,7 @@ mod tests {
|
||||
name: "my_tag3".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: b"hello".to_vec(),
|
||||
default_constraint: Vec::new(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
@@ -268,7 +297,7 @@ mod tests {
|
||||
}),
|
||||
location: Some(AddColumnLocation {
|
||||
location_type: LocationType::After as i32,
|
||||
after_column_name: "my_tag2".to_string(),
|
||||
after_column_name: "host".to_string(),
|
||||
}),
|
||||
}]
|
||||
}
|
||||
@@ -299,8 +328,11 @@ mod tests {
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
procedure.make_alter_region_request(region_id).unwrap().body
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -23,7 +23,9 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Builds new_meta
|
||||
/// Builds new table info after alteration.
|
||||
/// It bumps the column id of the table by the number of the add column requests.
|
||||
/// So there may be holes in the column id sequence.
|
||||
pub(crate) fn build_new_table_info(&self, table_info: &RawTableInfo) -> Result<TableInfo> {
|
||||
let table_info =
|
||||
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
|
||||
@@ -34,7 +36,7 @@ impl AlterTableProcedure {
|
||||
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind, false)
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
@@ -46,6 +48,9 @@ impl AlterTableProcedure {
|
||||
new_info.ident.version = table_info.ident.version + 1;
|
||||
match request.alter_kind {
|
||||
AlterKind::AddColumns { columns } => {
|
||||
// Bumps the column id for the new columns.
|
||||
// It may bump more than the actual number of columns added if there are
|
||||
// existing columns, but it's fine.
|
||||
new_info.meta.next_column_id += columns.len() as u32;
|
||||
}
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
@@ -55,8 +60,8 @@ impl AlterTableProcedure {
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetColumnFulltext { .. }
|
||||
| AlterKind::UnsetColumnFulltext { .. } => {}
|
||||
| AlterKind::SetIndex { .. }
|
||||
| AlterKind::UnsetIndex { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
|
||||
@@ -21,7 +21,7 @@ use api::v1::CreateTableExpr;
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::warn;
|
||||
use common_telemetry::{debug, warn};
|
||||
use futures_util::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -143,7 +143,12 @@ impl CreateLogicalTablesProcedure {
|
||||
|
||||
for peer in leaders {
|
||||
let requester = self.context.node_manager.datanode(&peer).await;
|
||||
let request = self.make_request(&peer, region_routes)?;
|
||||
let Some(request) = self.make_request(&peer, region_routes)? else {
|
||||
debug!("no region request to send to datanode {}", peer);
|
||||
// We can skip the rest of the datanodes,
|
||||
// the rest of the datanodes should have the same result.
|
||||
break;
|
||||
};
|
||||
|
||||
create_region_tasks.push(async move {
|
||||
requester
|
||||
|
||||
@@ -25,7 +25,7 @@ impl CreateLogicalTablesProcedure {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn check_tables_already_exist(&mut self) -> Result<()> {
|
||||
pub async fn check_tables_already_exist(&mut self) -> Result<()> {
|
||||
let table_name_keys = self
|
||||
.data
|
||||
.all_create_table_exprs()
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
@@ -31,11 +32,15 @@ impl CreateLogicalTablesProcedure {
|
||||
&self,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
) -> Result<Option<RegionRequest>> {
|
||||
let tasks = &self.data.tasks;
|
||||
let table_ids_already_exists = &self.data.table_ids_already_exists;
|
||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
||||
let mut requests = Vec::with_capacity(tasks.len() * regions_on_this_peer.len());
|
||||
for task in tasks {
|
||||
for (task, table_id_already_exists) in tasks.iter().zip(table_ids_already_exists) {
|
||||
if table_id_already_exists.is_some() {
|
||||
continue;
|
||||
}
|
||||
let create_table_expr = &task.create_table;
|
||||
let catalog = &create_table_expr.catalog_name;
|
||||
let schema = &create_table_expr.schema_name;
|
||||
@@ -51,13 +56,18 @@ impl CreateLogicalTablesProcedure {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RegionRequest {
|
||||
if requests.is_empty() {
|
||||
debug!("no region request to send to datanodes");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Creates(CreateRequests { requests })),
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
fn create_region_request_builder(
|
||||
|
||||
@@ -30,6 +30,8 @@ pub struct TestAlterTableExpr {
|
||||
add_columns: Vec<ColumnDef>,
|
||||
#[builder(setter(into, strip_option))]
|
||||
new_table_name: Option<String>,
|
||||
#[builder(setter)]
|
||||
add_if_not_exists: bool,
|
||||
}
|
||||
|
||||
impl From<TestAlterTableExpr> for AlterTableExpr {
|
||||
@@ -53,6 +55,7 @@ impl From<TestAlterTableExpr> for AlterTableExpr {
|
||||
.map(|col| AddColumn {
|
||||
column_def: Some(col),
|
||||
location: None,
|
||||
add_if_not_exists: value.add_if_not_exists,
|
||||
})
|
||||
.collect(),
|
||||
})),
|
||||
|
||||
@@ -56,6 +56,7 @@ fn make_alter_logical_table_add_column_task(
|
||||
let alter_table = alter_table
|
||||
.table_name(table.to_string())
|
||||
.add_columns(add_columns)
|
||||
.add_if_not_exists(true)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ async fn test_on_submit_alter_request() {
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: "my_field_column".to_string(),
|
||||
name: "cpu".to_string(),
|
||||
}],
|
||||
})),
|
||||
},
|
||||
@@ -225,7 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: "my_field_column".to_string(),
|
||||
name: "cpu".to_string(),
|
||||
}],
|
||||
})),
|
||||
},
|
||||
@@ -330,6 +330,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
..Default::default()
|
||||
}),
|
||||
location: None,
|
||||
add_if_not_exists: false,
|
||||
}],
|
||||
})),
|
||||
},
|
||||
|
||||
@@ -639,15 +639,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse {} from str to utf8", name))]
|
||||
StrFromUtf8 {
|
||||
name: String,
|
||||
#[snafu(source)]
|
||||
error: std::str::Utf8Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Value not exists"))]
|
||||
ValueNotExist {
|
||||
#[snafu(implicit)]
|
||||
@@ -658,8 +649,9 @@ pub enum Error {
|
||||
GetCache { source: Arc<Error> },
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to execute via Postgres"))]
|
||||
#[snafu(display("Failed to execute via Postgres, sql: {}", sql))]
|
||||
PostgresExecution {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
@@ -667,12 +659,37 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to connect to Postgres"))]
|
||||
ConnectPostgres {
|
||||
#[snafu(display("Failed to create connection pool for Postgres"))]
|
||||
CreatePostgresPool {
|
||||
#[snafu(source)]
|
||||
error: deadpool_postgres::CreatePoolError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to get Postgres connection from pool: {}", reason))]
|
||||
GetPostgresConnection {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to {} Postgres transaction", operation))]
|
||||
PostgresTransaction {
|
||||
#[snafu(source)]
|
||||
error: tokio_postgres::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
operation: String,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Postgres transaction retry failed"))]
|
||||
PostgresTransactionRetryFailed {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
@@ -738,8 +755,7 @@ impl ErrorExt for Error {
|
||||
| UnexpectedLogicalRouteTable { .. }
|
||||
| ProcedureOutput { .. }
|
||||
| FromUtf8 { .. }
|
||||
| MetadataCorruption { .. }
|
||||
| StrFromUtf8 { .. } => StatusCode::Unexpected,
|
||||
| MetadataCorruption { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -786,9 +802,11 @@ impl ErrorExt for Error {
|
||||
| EmptyDdlTasks { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
PostgresExecution { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
ConnectPostgres { .. } => StatusCode::Internal,
|
||||
PostgresExecution { .. }
|
||||
| CreatePostgresPool { .. }
|
||||
| GetPostgresConnection { .. }
|
||||
| PostgresTransaction { .. }
|
||||
| PostgresTransactionRetryFailed { .. } => StatusCode::Internal,
|
||||
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
@@ -799,6 +817,20 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
/// Check if the error is a serialization error.
|
||||
pub fn is_serialization_error(&self) -> bool {
|
||||
match self {
|
||||
Error::PostgresTransaction { error, .. } => {
|
||||
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
|
||||
}
|
||||
Error::PostgresExecution { error, .. } => {
|
||||
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] error from source `err`.
|
||||
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::RetryLater {
|
||||
|
||||
@@ -190,6 +190,13 @@ impl TableInfoManager {
|
||||
))
|
||||
}
|
||||
|
||||
/// Checks if the table exists.
|
||||
pub async fn exists(&self, table_id: TableId) -> Result<bool> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
self.kv_backend.exists(&raw_key).await
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
|
||||
@@ -542,6 +542,8 @@ mod tests {
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
|
||||
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
unprepare_kv,
|
||||
};
|
||||
|
||||
@@ -589,7 +591,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_range_2() {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
test_kv_range_2_with_prefix(kv_backend, b"range2/".to_vec()).await;
|
||||
test_kv_range_2_with_prefix(&kv_backend, b"range2/".to_vec()).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -616,7 +618,8 @@ mod tests {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
let prefix = b"deleteRange/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -625,7 +628,20 @@ mod tests {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
let prefix = b"batchDelete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_delete_with_prefix(kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_etcd_txn() {
|
||||
if let Some(kv_backend) = build_kv_backend().await {
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
test_txn_compare_equal(&kv_backend).await;
|
||||
test_txn_compare_greater(&kv_backend).await;
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,7 +325,9 @@ mod tests {
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv, test_kv_batch_delete, test_kv_batch_get, test_kv_compare_and_put,
|
||||
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2,
|
||||
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2, test_txn_compare_equal,
|
||||
test_txn_compare_greater, test_txn_compare_less, test_txn_compare_not_equal,
|
||||
test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
};
|
||||
|
||||
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
|
||||
@@ -353,7 +355,7 @@ mod tests {
|
||||
async fn test_range_2() {
|
||||
let kv = MemoryKvBackend::<Error>::new();
|
||||
|
||||
test_kv_range_2(kv).await;
|
||||
test_kv_range_2(&kv).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -374,13 +376,24 @@ mod tests {
|
||||
async fn test_delete_range() {
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_delete_range(kv_backend).await;
|
||||
test_kv_delete_range(&kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
test_kv_batch_delete(kv_backend).await;
|
||||
test_kv_batch_delete(&kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_txn() {
|
||||
let kv_backend = MemoryKvBackend::<Error>::new();
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
test_txn_compare_equal(&kv_backend).await;
|
||||
test_txn_compare_greater(&kv_backend).await;
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,6 +15,8 @@
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use txn::{Compare, CompareOp, TxnOp};
|
||||
|
||||
use super::{KvBackend, *};
|
||||
use crate::error::Error;
|
||||
use crate::rpc::store::{BatchGetRequest, PutRequest};
|
||||
@@ -59,14 +61,18 @@ pub async fn prepare_kv_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>
|
||||
|
||||
pub async fn unprepare_kv(kv_backend: &impl KvBackend, prefix: &[u8]) {
|
||||
let range_end = util::get_prefix_end_key(prefix);
|
||||
assert!(kv_backend
|
||||
.delete_range(DeleteRangeRequest {
|
||||
key: prefix.to_vec(),
|
||||
range_end,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
assert!(
|
||||
kv_backend
|
||||
.delete_range(DeleteRangeRequest {
|
||||
key: prefix.to_vec(),
|
||||
range_end,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok(),
|
||||
"prefix: {:?}",
|
||||
std::str::from_utf8(prefix).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn test_kv_put(kv_backend: &impl KvBackend) {
|
||||
@@ -168,11 +174,11 @@ pub async fn test_kv_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_2(kv_backend: impl KvBackend) {
|
||||
pub async fn test_kv_range_2(kv_backend: &impl KvBackend) {
|
||||
test_kv_range_2_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_2_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
|
||||
pub async fn test_kv_range_2_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
let atest = [prefix.clone(), b"atest".to_vec()].concat();
|
||||
let test = [prefix.clone(), b"test".to_vec()].concat();
|
||||
|
||||
@@ -346,11 +352,11 @@ pub async fn test_kv_compare_and_put_with_prefix(
|
||||
assert!(resp.is_none());
|
||||
}
|
||||
|
||||
pub async fn test_kv_delete_range(kv_backend: impl KvBackend) {
|
||||
pub async fn test_kv_delete_range(kv_backend: &impl KvBackend) {
|
||||
test_kv_delete_range_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_kv_delete_range_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
|
||||
pub async fn test_kv_delete_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
let key3 = [prefix.clone(), b"key3".to_vec()].concat();
|
||||
let req = DeleteRangeRequest {
|
||||
key: key3.clone(),
|
||||
@@ -401,11 +407,11 @@ pub async fn test_kv_delete_range_with_prefix(kv_backend: impl KvBackend, prefix
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_delete(kv_backend: impl KvBackend) {
|
||||
pub async fn test_kv_batch_delete(kv_backend: &impl KvBackend) {
|
||||
test_kv_batch_delete_with_prefix(kv_backend, vec![]).await;
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_delete_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
|
||||
pub async fn test_kv_batch_delete_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
|
||||
let key1 = [prefix.clone(), b"key1".to_vec()].concat();
|
||||
let key100 = [prefix.clone(), b"key100".to_vec()].concat();
|
||||
assert!(kv_backend.get(&key1).await.unwrap().is_some());
|
||||
@@ -444,3 +450,207 @@ pub async fn test_kv_batch_delete_with_prefix(kv_backend: impl KvBackend, prefix
|
||||
assert!(kv_backend.get(&key3).await.unwrap().is_none());
|
||||
assert!(kv_backend.get(&key11).await.unwrap().is_none());
|
||||
}
|
||||
|
||||
pub async fn test_txn_one_compare_op(kv_backend: &impl KvBackend) {
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![11],
|
||||
value: vec![3],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
vec![11],
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
|
||||
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 1);
|
||||
}
|
||||
|
||||
pub async fn text_txn_multi_compare_op(kv_backend: &impl KvBackend) {
|
||||
for i in 1..3 {
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![i],
|
||||
value: vec![i],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let when: Vec<_> = (1..3u8)
|
||||
.map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
|
||||
.collect();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(when)
|
||||
.and_then(vec![
|
||||
TxnOp::Put(vec![1], vec![10]),
|
||||
TxnOp::Put(vec![2], vec![20]),
|
||||
])
|
||||
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 2);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_equal(kv_backend: &impl KvBackend) {
|
||||
let key = vec![101u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Put(key, vec![4])]);
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_greater(kv_backend: &impl KvBackend) {
|
||||
let key = vec![102u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_less(kv_backend: &impl KvBackend) {
|
||||
let key = vec![103u8];
|
||||
kv_backend.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![2]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn test_txn_compare_not_equal(kv_backend: &impl KvBackend) {
|
||||
let key = vec![104u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::NotEqual,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -131,9 +131,9 @@ pub struct TxnResponse {
|
||||
pub struct Txn {
|
||||
// HACK - chroot would modify this field
|
||||
pub(super) req: TxnRequest,
|
||||
c_when: bool,
|
||||
c_then: bool,
|
||||
c_else: bool,
|
||||
pub(super) c_when: bool,
|
||||
pub(super) c_then: bool,
|
||||
pub(super) c_else: bool,
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
@@ -241,14 +241,7 @@ impl From<Txn> for TxnRequest {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
#[test]
|
||||
fn test_compare() {
|
||||
@@ -310,232 +303,4 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_one_compare_op() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![11],
|
||||
value: vec![3],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
vec![11],
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
|
||||
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_multi_compare_op() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
for i in 1..3 {
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![i],
|
||||
value: vec![i],
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let when: Vec<_> = (1..3u8)
|
||||
.map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
|
||||
.collect();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(when)
|
||||
.and_then(vec![
|
||||
TxnOp::Put(vec![1], vec![10]),
|
||||
TxnOp::Put(vec![2], vec![20]),
|
||||
])
|
||||
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_equal() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![101u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Put(key, vec![4])]);
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_greater() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![102u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Greater,
|
||||
vec![1],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_less() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![103u8];
|
||||
kv_backend.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Less,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![2]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_not_equal() {
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![104u8];
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value_not_exists(
|
||||
key.clone(),
|
||||
CompareOp::NotEqual,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
vec![2],
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
res,
|
||||
TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: vec![KeyValue {
|
||||
key,
|
||||
value: vec![1]
|
||||
}],
|
||||
more: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_kv_backend() -> KvBackendRef {
|
||||
Arc::new(MemoryKvBackend::<Error>::new())
|
||||
// TODO(jiachun): Add a feature to test against etcd in github CI
|
||||
//
|
||||
// The same test can be run against etcd by uncommenting the following line
|
||||
// crate::service::store::etcd::EtcdStore::with_endpoints(["127.0.0.1:2379"])
|
||||
// .await
|
||||
// .unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,7 +266,7 @@ impl PutRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
pub struct PutResponse {
|
||||
pub prev_kv: Option<KeyValue>,
|
||||
}
|
||||
@@ -425,7 +425,7 @@ impl BatchPutRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BatchPutResponse {
|
||||
pub prev_kvs: Vec<KeyValue>,
|
||||
}
|
||||
@@ -509,7 +509,7 @@ impl BatchDeleteRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BatchDeleteResponse {
|
||||
pub prev_kvs: Vec<KeyValue>,
|
||||
}
|
||||
@@ -754,6 +754,19 @@ impl TryFrom<PbDeleteRangeResponse> for DeleteRangeResponse {
|
||||
}
|
||||
|
||||
impl DeleteRangeResponse {
|
||||
/// Creates a new [`DeleteRangeResponse`] with the given deleted count.
|
||||
pub fn new(deleted: i64) -> Self {
|
||||
Self {
|
||||
deleted,
|
||||
prev_kvs: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [`DeleteRangeResponse`] with the given deleted count and previous key-value pairs.
|
||||
pub fn with_prev_kvs(&mut self, prev_kvs: Vec<KeyValue>) {
|
||||
self.prev_kvs = prev_kvs;
|
||||
}
|
||||
|
||||
pub fn to_proto_resp(self, header: PbResponseHeader) -> PbDeleteRangeResponse {
|
||||
PbDeleteRangeResponse {
|
||||
header: Some(header),
|
||||
|
||||
@@ -12,7 +12,7 @@ snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
pprof = { version = "0.13", features = [
|
||||
pprof = { version = "0.14", features = [
|
||||
"flamegraph",
|
||||
"prost-codec",
|
||||
"protobuf",
|
||||
|
||||
@@ -13,7 +13,7 @@ workspace = true
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon = "1"
|
||||
backon.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -189,7 +189,7 @@ impl StateStore for ObjectStateStore {
|
||||
|
||||
async fn batch_delete(&self, keys: &[String]) -> Result<()> {
|
||||
self.store
|
||||
.remove(keys.to_vec())
|
||||
.delete_iter(keys.iter().map(String::as_str))
|
||||
.await
|
||||
.with_context(|_| DeleteStateSnafu {
|
||||
key: format!("{:?}", keys),
|
||||
|
||||
@@ -18,7 +18,6 @@ use arrow::error::ArrowError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_recordbatch::error::Error as RecordbatchError;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::datatypes::DataType as ArrowDatatype;
|
||||
@@ -31,21 +30,6 @@ use statrs::StatsError;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to execute Python UDF: {}", msg))]
|
||||
PyUdf {
|
||||
// TODO(discord9): find a way that prevent circle depend(query<-script<-query) and can use script's error type
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create temporary recordbatch when eval Python UDF"))]
|
||||
UdfTempRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: RecordbatchError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute function"))]
|
||||
ExecuteFunction {
|
||||
#[snafu(source)]
|
||||
@@ -260,9 +244,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UdfTempRecordBatch { .. }
|
||||
| Error::PyUdf { .. }
|
||||
| Error::CreateAccumulator { .. }
|
||||
Error::CreateAccumulator { .. }
|
||||
| Error::DowncastVector { .. }
|
||||
| Error::InvalidInputState { .. }
|
||||
| Error::InvalidInputCol { .. }
|
||||
|
||||
@@ -28,14 +28,13 @@ pub fn build_same_type_ts_filter(
|
||||
ts_schema: &ColumnSchema,
|
||||
time_range: Option<TimestampRange>,
|
||||
) -> Option<Expr> {
|
||||
let ts_type = ts_schema.data_type.clone();
|
||||
let time_range = time_range?;
|
||||
let start = time_range
|
||||
.start()
|
||||
.and_then(|start| ts_type.try_cast(Value::Timestamp(start)));
|
||||
.and_then(|start| ts_schema.data_type.try_cast(Value::Timestamp(start)));
|
||||
let end = time_range
|
||||
.end()
|
||||
.and_then(|end| ts_type.try_cast(Value::Timestamp(end)));
|
||||
.and_then(|end| ts_schema.data_type.try_cast(Value::Timestamp(end)));
|
||||
|
||||
let time_range = match (start, end) {
|
||||
(Some(Value::Timestamp(start)), Some(Value::Timestamp(end))) => {
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::DfRecordBatch;
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct RecordBatch {
|
||||
pub schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
pub columns: Vec<VectorRef>,
|
||||
df_record_batch: DfRecordBatch,
|
||||
}
|
||||
|
||||
|
||||
@@ -108,11 +108,6 @@ impl Time {
|
||||
self.as_formatted_string("%H:%M:%S%.f%z", None)
|
||||
}
|
||||
|
||||
/// Format Time for system timeszone.
|
||||
pub fn to_system_tz_string(&self) -> String {
|
||||
self.as_formatted_string("%H:%M:%S%.f", None)
|
||||
}
|
||||
|
||||
/// Format Time for given timezone.
|
||||
/// When timezone is None, using system timezone by default.
|
||||
pub fn to_timezone_aware_string(&self, tz: Option<&Timezone>) -> String {
|
||||
|
||||
@@ -19,7 +19,7 @@ futures-util.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
rskafka.workspace = true
|
||||
rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||
rustls = { workspace = true, default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||
rustls-native-certs = "0.7"
|
||||
rustls-pemfile = "2.1"
|
||||
serde.workspace = true
|
||||
|
||||
@@ -433,8 +433,8 @@ impl DatanodeBuilder {
|
||||
) -> Result<MitoEngine> {
|
||||
if opts.storage.is_object_storage() {
|
||||
// Enable the write cache when setting object storage
|
||||
config.enable_experimental_write_cache = true;
|
||||
info!("Configured 'enable_experimental_write_cache=true' for mito engine.");
|
||||
config.enable_write_cache = true;
|
||||
info!("Configured 'enable_write_cache=true' for mito engine.");
|
||||
}
|
||||
|
||||
let mito_engine = match &opts.wal {
|
||||
|
||||
@@ -123,6 +123,14 @@ impl ColumnSchema {
|
||||
self.default_constraint.as_ref()
|
||||
}
|
||||
|
||||
/// Check if the default constraint is a impure function.
|
||||
pub fn is_default_impure(&self) -> bool {
|
||||
self.default_constraint
|
||||
.as_ref()
|
||||
.map(|c| c.is_function())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn metadata(&self) -> &Metadata {
|
||||
&self.metadata
|
||||
@@ -150,11 +158,22 @@ impl ColumnSchema {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_inverted_index(mut self, value: bool) -> Self {
|
||||
let _ = self
|
||||
.metadata
|
||||
.insert(INVERTED_INDEX_KEY.to_string(), value.to_string());
|
||||
self
|
||||
pub fn with_inverted_index(&mut self, value: bool) {
|
||||
match value {
|
||||
true => {
|
||||
self.metadata
|
||||
.insert(INVERTED_INDEX_KEY.to_string(), value.to_string());
|
||||
}
|
||||
false => {
|
||||
self.metadata.remove(INVERTED_INDEX_KEY);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put a placeholder to invalidate schemas.all(!has_inverted_index_key).
|
||||
pub fn insert_inverted_index_placeholder(&mut self) {
|
||||
self.metadata
|
||||
.insert(INVERTED_INDEX_KEY.to_string(), "".to_string());
|
||||
}
|
||||
|
||||
pub fn is_inverted_indexed(&self) -> bool {
|
||||
@@ -164,8 +183,15 @@ impl ColumnSchema {
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn has_fulltext_index_key(&self) -> bool {
|
||||
self.metadata.contains_key(FULLTEXT_KEY)
|
||||
pub fn is_fulltext_indexed(&self) -> bool {
|
||||
self.fulltext_options()
|
||||
.unwrap_or_default()
|
||||
.map(|option| option.enable)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_skipping_indexed(&self) -> bool {
|
||||
self.skipping_index_options().unwrap_or_default().is_some()
|
||||
}
|
||||
|
||||
pub fn has_inverted_index_key(&self) -> bool {
|
||||
@@ -283,6 +309,15 @@ impl ColumnSchema {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an impure default value for this column, only if it have a impure default constraint.
|
||||
/// Otherwise, returns `Ok(None)`.
|
||||
pub fn create_impure_default(&self) -> Result<Option<Value>> {
|
||||
match &self.default_constraint {
|
||||
Some(c) => c.create_impure_default(&self.data_type),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the fulltext options for the column.
|
||||
pub fn fulltext_options(&self) -> Result<Option<FulltextOptions>> {
|
||||
match self.metadata.get(FULLTEXT_KEY) {
|
||||
|
||||
@@ -178,12 +178,63 @@ impl ColumnDefaultConstraint {
|
||||
}
|
||||
}
|
||||
|
||||
/// Only create default vector if it's impure, i.e., it's a function.
|
||||
///
|
||||
/// This helps to delay creating constant default values to mito engine while also keeps impure default have consistent values
|
||||
pub fn create_impure_default_vector(
|
||||
&self,
|
||||
data_type: &ConcreteDataType,
|
||||
num_rows: usize,
|
||||
) -> Result<Option<VectorRef>> {
|
||||
assert!(num_rows > 0);
|
||||
|
||||
match self {
|
||||
ColumnDefaultConstraint::Function(expr) => {
|
||||
// Functions should also ensure its return value is not null when
|
||||
// is_nullable is true.
|
||||
match &expr[..] {
|
||||
// TODO(dennis): we only supports current_timestamp right now,
|
||||
// it's better to use a expression framework in future.
|
||||
CURRENT_TIMESTAMP | CURRENT_TIMESTAMP_FN | NOW_FN => {
|
||||
create_current_timestamp_vector(data_type, num_rows).map(Some)
|
||||
}
|
||||
_ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
|
||||
}
|
||||
}
|
||||
ColumnDefaultConstraint::Value(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Only create default value if it's impure, i.e., it's a function.
|
||||
///
|
||||
/// This helps to delay creating constant default values to mito engine while also keeps impure default have consistent values
|
||||
pub fn create_impure_default(&self, data_type: &ConcreteDataType) -> Result<Option<Value>> {
|
||||
match self {
|
||||
ColumnDefaultConstraint::Function(expr) => {
|
||||
// Functions should also ensure its return value is not null when
|
||||
// is_nullable is true.
|
||||
match &expr[..] {
|
||||
CURRENT_TIMESTAMP | CURRENT_TIMESTAMP_FN | NOW_FN => {
|
||||
create_current_timestamp(data_type).map(Some)
|
||||
}
|
||||
_ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
|
||||
}
|
||||
}
|
||||
ColumnDefaultConstraint::Value(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this constraint might creates NULL.
|
||||
fn maybe_null(&self) -> bool {
|
||||
// Once we support more functions, we may return true if given function
|
||||
// could return null.
|
||||
matches!(self, ColumnDefaultConstraint::Value(Value::Null))
|
||||
}
|
||||
|
||||
/// Returns true if this constraint is a function.
|
||||
pub fn is_function(&self) -> bool {
|
||||
matches!(self, ColumnDefaultConstraint::Function(_))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_current_timestamp(data_type: &ConcreteDataType) -> Result<Value> {
|
||||
|
||||
@@ -32,6 +32,7 @@ common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
config.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
@@ -40,7 +41,6 @@ datatypes.workspace = true
|
||||
enum-as-inner = "0.6.0"
|
||||
enum_dispatch = "0.3"
|
||||
futures = "0.3"
|
||||
get-size-derive2 = "0.1.2"
|
||||
get-size2 = "0.1.2"
|
||||
greptime-proto.workspace = true
|
||||
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version
|
||||
|
||||
@@ -36,6 +36,7 @@ use query::QueryEngine;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -45,23 +46,20 @@ use tokio::sync::broadcast::error::TryRecvError;
|
||||
use tokio::sync::{broadcast, watch, Mutex, RwLock};
|
||||
|
||||
pub(crate) use crate::adapter::node_context::FlownodeContext;
|
||||
use crate::adapter::table_source::TableSource;
|
||||
use crate::adapter::util::{
|
||||
relation_desc_to_column_schemas_with_fallback, table_info_value_to_relation_desc,
|
||||
};
|
||||
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||
use crate::adapter::refill::RefillTask;
|
||||
use crate::adapter::table_source::ManagedTableSource;
|
||||
use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
|
||||
pub(crate) use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||
use crate::compute::ErrCollector;
|
||||
use crate::df_optimizer::sql_to_flow_plan;
|
||||
use crate::error::{
|
||||
EvalSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu, InvalidQuerySnafu,
|
||||
UnexpectedSnafu,
|
||||
};
|
||||
use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
|
||||
use crate::expr::Batch;
|
||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
|
||||
|
||||
mod flownode_impl;
|
||||
mod parse_expr;
|
||||
pub(crate) mod refill;
|
||||
mod stat;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@@ -69,7 +67,7 @@ mod util;
|
||||
mod worker;
|
||||
|
||||
pub(crate) mod node_context;
|
||||
mod table_source;
|
||||
pub(crate) mod table_source;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::utils::StateReportHandler;
|
||||
@@ -85,6 +83,21 @@ pub const UPDATE_AT_TS_COL: &str = "update_at";
|
||||
pub type FlowId = u64;
|
||||
pub type TableName = [String; 3];
|
||||
|
||||
/// Flow config that exists both in standalone&distributed mode
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct FlowConfig {
|
||||
pub num_workers: usize,
|
||||
}
|
||||
|
||||
impl Default for FlowConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
num_workers: (common_config::utils::get_cpus() / 2).max(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for flow node
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
@@ -92,7 +105,9 @@ pub struct FlownodeOptions {
|
||||
pub mode: Mode,
|
||||
pub cluster_id: Option<u64>,
|
||||
pub node_id: Option<u64>,
|
||||
pub flow: FlowConfig,
|
||||
pub grpc: GrpcOptions,
|
||||
pub http: HttpOptions,
|
||||
pub meta_client: Option<MetaClientOptions>,
|
||||
pub logging: LoggingOptions,
|
||||
pub tracing: TracingOptions,
|
||||
@@ -105,7 +120,9 @@ impl Default for FlownodeOptions {
|
||||
mode: servers::Mode::Standalone,
|
||||
cluster_id: None,
|
||||
node_id: None,
|
||||
flow: FlowConfig::default(),
|
||||
grpc: GrpcOptions::default().with_addr("127.0.0.1:3004"),
|
||||
http: HttpOptions::default(),
|
||||
meta_client: None,
|
||||
logging: LoggingOptions::default(),
|
||||
tracing: TracingOptions::default(),
|
||||
@@ -114,7 +131,14 @@ impl Default for FlownodeOptions {
|
||||
}
|
||||
}
|
||||
|
||||
impl Configurable for FlownodeOptions {}
|
||||
impl Configurable for FlownodeOptions {
|
||||
fn validate_sanitize(&mut self) -> common_config::error::Result<()> {
|
||||
if self.flow.num_workers == 0 {
|
||||
self.flow.num_workers = (common_config::utils::get_cpus() / 2).max(1);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Arc-ed FlowNodeManager, cheaper to clone
|
||||
pub type FlowWorkerManagerRef = Arc<FlowWorkerManager>;
|
||||
@@ -125,14 +149,18 @@ pub type FlowWorkerManagerRef = Arc<FlowWorkerManager>;
|
||||
pub struct FlowWorkerManager {
|
||||
/// The handler to the worker that will run the dataflow
|
||||
/// which is `!Send` so a handle is used
|
||||
pub worker_handles: Vec<Mutex<WorkerHandle>>,
|
||||
pub worker_handles: Vec<WorkerHandle>,
|
||||
/// The selector to select a worker to run the dataflow
|
||||
worker_selector: Mutex<usize>,
|
||||
/// The query engine that will be used to parse the query and convert it to a dataflow plan
|
||||
pub query_engine: Arc<dyn QueryEngine>,
|
||||
/// Getting table name and table schema from table info manager
|
||||
table_info_source: TableSource,
|
||||
table_info_source: ManagedTableSource,
|
||||
frontend_invoker: RwLock<Option<FrontendInvoker>>,
|
||||
/// contains mapping from table name to global id, and table schema
|
||||
node_context: RwLock<FlownodeContext>,
|
||||
/// Contains all refill tasks
|
||||
refill_tasks: RwLock<BTreeMap<FlowId, RefillTask>>,
|
||||
flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
|
||||
src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
|
||||
tick_manager: FlowTickManager,
|
||||
@@ -158,19 +186,21 @@ impl FlowWorkerManager {
|
||||
query_engine: Arc<dyn QueryEngine>,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
) -> Self {
|
||||
let srv_map = TableSource::new(
|
||||
let srv_map = ManagedTableSource::new(
|
||||
table_meta.table_info_manager().clone(),
|
||||
table_meta.table_name_manager().clone(),
|
||||
);
|
||||
let node_context = FlownodeContext::default();
|
||||
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
|
||||
let tick_manager = FlowTickManager::new();
|
||||
let worker_handles = Vec::new();
|
||||
FlowWorkerManager {
|
||||
worker_handles,
|
||||
worker_selector: Mutex::new(0),
|
||||
query_engine,
|
||||
table_info_source: srv_map,
|
||||
frontend_invoker: RwLock::new(None),
|
||||
node_context: RwLock::new(node_context),
|
||||
refill_tasks: Default::default(),
|
||||
flow_err_collectors: Default::default(),
|
||||
src_send_buf_lens: Default::default(),
|
||||
tick_manager,
|
||||
@@ -186,20 +216,27 @@ impl FlowWorkerManager {
|
||||
}
|
||||
|
||||
/// Create a flownode manager with one worker
|
||||
pub fn new_with_worker<'s>(
|
||||
pub fn new_with_workers<'s>(
|
||||
node_id: Option<u32>,
|
||||
query_engine: Arc<dyn QueryEngine>,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
) -> (Self, Worker<'s>) {
|
||||
num_workers: usize,
|
||||
) -> (Self, Vec<Worker<'s>>) {
|
||||
let mut zelf = Self::new(node_id, query_engine, table_meta);
|
||||
let (handle, worker) = create_worker();
|
||||
zelf.add_worker_handle(handle);
|
||||
(zelf, worker)
|
||||
|
||||
let workers: Vec<_> = (0..num_workers)
|
||||
.map(|_| {
|
||||
let (handle, worker) = create_worker();
|
||||
zelf.add_worker_handle(handle);
|
||||
worker
|
||||
})
|
||||
.collect();
|
||||
(zelf, workers)
|
||||
}
|
||||
|
||||
/// add a worker handler to manager, meaning this corresponding worker is under it's manage
|
||||
pub fn add_worker_handle(&mut self, handle: WorkerHandle) {
|
||||
self.worker_handles.push(Mutex::new(handle));
|
||||
self.worker_handles.push(handle);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,12 +284,29 @@ impl FlowWorkerManager {
|
||||
let (catalog, schema) = (table_name[0].clone(), table_name[1].clone());
|
||||
let ctx = Arc::new(QueryContext::with(&catalog, &schema));
|
||||
|
||||
let (is_ts_placeholder, proto_schema) = self
|
||||
let (is_ts_placeholder, proto_schema) = match self
|
||||
.try_fetch_existing_table(&table_name)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Table not found: {}", table_name.join(".")),
|
||||
})?;
|
||||
}) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
if self
|
||||
.table_info_source
|
||||
.get_opt_table_id_from_name(&table_name)
|
||||
.await?
|
||||
.is_none()
|
||||
{
|
||||
// deal with both flow&sink table no longer exists
|
||||
// but some output is still in output buf
|
||||
common_telemetry::warn!(e; "Table `{}` no longer exists, skip writeback", table_name.join("."));
|
||||
continue;
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
let schema_len = proto_schema.len();
|
||||
|
||||
let total_rows = reqs.iter().map(|r| r.len()).sum::<usize>();
|
||||
@@ -409,7 +463,7 @@ impl FlowWorkerManager {
|
||||
) -> Result<Option<(Vec<String>, Option<usize>, Vec<ColumnSchema>)>, Error> {
|
||||
if let Some(table_id) = self
|
||||
.table_info_source
|
||||
.get_table_id_from_name(table_name)
|
||||
.get_opt_table_id_from_name(table_name)
|
||||
.await?
|
||||
{
|
||||
let table_info = self
|
||||
@@ -540,13 +594,16 @@ impl FlowWorkerManager {
|
||||
pub async fn run(&self, mut shutdown: Option<broadcast::Receiver<()>>) {
|
||||
debug!("Starting to run");
|
||||
let default_interval = Duration::from_secs(1);
|
||||
let mut tick_interval = tokio::time::interval(default_interval);
|
||||
// burst mode, so that if we miss a tick, we will run immediately to fully utilize the cpu
|
||||
tick_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Burst);
|
||||
let mut avg_spd = 0; // rows/sec
|
||||
let mut since_last_run = tokio::time::Instant::now();
|
||||
let run_per_trace = 10;
|
||||
let mut run_cnt = 0;
|
||||
loop {
|
||||
// TODO(discord9): only run when new inputs arrive or scheduled to
|
||||
let row_cnt = self.run_available(true).await.unwrap_or_else(|err| {
|
||||
let row_cnt = self.run_available(false).await.unwrap_or_else(|err| {
|
||||
common_telemetry::error!(err;"Run available errors");
|
||||
0
|
||||
});
|
||||
@@ -576,9 +633,9 @@ impl FlowWorkerManager {
|
||||
|
||||
// for now we want to batch rows until there is around `BATCH_SIZE` rows in send buf
|
||||
// before trigger a run of flow's worker
|
||||
// (plus one for prevent div by zero)
|
||||
let wait_for = since_last_run.elapsed();
|
||||
|
||||
// last runs insert speed
|
||||
let cur_spd = row_cnt * 1000 / wait_for.as_millis().max(1) as usize;
|
||||
// rapid increase, slow decay
|
||||
avg_spd = if cur_spd > avg_spd {
|
||||
@@ -601,7 +658,10 @@ impl FlowWorkerManager {
|
||||
|
||||
METRIC_FLOW_RUN_INTERVAL_MS.set(new_wait.as_millis() as i64);
|
||||
since_last_run = tokio::time::Instant::now();
|
||||
tokio::time::sleep(new_wait).await;
|
||||
tokio::select! {
|
||||
_ = tick_interval.tick() => (),
|
||||
_ = tokio::time::sleep(new_wait) => ()
|
||||
}
|
||||
}
|
||||
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
|
||||
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
|
||||
@@ -612,9 +672,9 @@ impl FlowWorkerManager {
|
||||
/// Run all available subgraph in the flow node
|
||||
/// This will try to run all dataflow in this node
|
||||
///
|
||||
/// set `blocking` to true to wait until lock is acquired
|
||||
/// and false to return immediately if lock is not acquired
|
||||
/// return numbers of rows send to worker
|
||||
/// set `blocking` to true to wait until worker finish running
|
||||
/// false to just trigger run and return immediately
|
||||
/// return numbers of rows send to worker(Inaccuary)
|
||||
/// TODO(discord9): add flag for subgraph that have input since last run
|
||||
pub async fn run_available(&self, blocking: bool) -> Result<usize, Error> {
|
||||
let mut row_cnt = 0;
|
||||
@@ -622,13 +682,7 @@ impl FlowWorkerManager {
|
||||
let now = self.tick_manager.tick();
|
||||
for worker in self.worker_handles.iter() {
|
||||
// TODO(discord9): consider how to handle error in individual worker
|
||||
if blocking {
|
||||
worker.lock().await.run_available(now, blocking).await?;
|
||||
} else if let Ok(worker) = worker.try_lock() {
|
||||
worker.run_available(now, blocking).await?;
|
||||
} else {
|
||||
return Ok(row_cnt);
|
||||
}
|
||||
worker.run_available(now, blocking).await?;
|
||||
}
|
||||
// check row send and rows remain in send buf
|
||||
let flush_res = if blocking {
|
||||
@@ -699,7 +753,6 @@ impl FlowWorkerManager {
|
||||
/// remove a flow by it's id
|
||||
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||
for handle in self.worker_handles.iter() {
|
||||
let handle = handle.lock().await;
|
||||
if handle.contains_flow(flow_id).await? {
|
||||
handle.remove_flow(flow_id).await?;
|
||||
break;
|
||||
@@ -729,43 +782,6 @@ impl FlowWorkerManager {
|
||||
query_ctx,
|
||||
} = args;
|
||||
|
||||
let already_exist = {
|
||||
let mut flag = false;
|
||||
|
||||
// check if the task already exists
|
||||
for handle in self.worker_handles.iter() {
|
||||
if handle.lock().await.contains_flow(flow_id).await? {
|
||||
flag = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
flag
|
||||
};
|
||||
match (create_if_not_exists, or_replace, already_exist) {
|
||||
// do replace
|
||||
(_, true, true) => {
|
||||
info!("Replacing flow with id={}", flow_id);
|
||||
self.remove_flow(flow_id).await?;
|
||||
}
|
||||
(false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
|
||||
// do nothing if exists
|
||||
(true, false, true) => {
|
||||
info!("Flow with id={} already exists, do nothing", flow_id);
|
||||
return Ok(None);
|
||||
}
|
||||
// create if not exists
|
||||
(_, _, false) => (),
|
||||
}
|
||||
|
||||
if create_if_not_exists {
|
||||
// check if the task already exists
|
||||
for handle in self.worker_handles.iter() {
|
||||
if handle.lock().await.contains_flow(flow_id).await? {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut node_ctx = self.node_context.write().await;
|
||||
// assign global id to source and sink table
|
||||
for source in &source_table_ids {
|
||||
@@ -828,27 +844,9 @@ impl FlowWorkerManager {
|
||||
.fail()?,
|
||||
}
|
||||
}
|
||||
|
||||
let table_id = self
|
||||
.table_info_source
|
||||
.get_table_id_from_name(&sink_table_name)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Can't get table id for table name {:?}", sink_table_name),
|
||||
})?;
|
||||
let table_info_value = self
|
||||
.table_info_source
|
||||
.get_table_info_value(&table_id)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Can't get table info value for table id {:?}", table_id),
|
||||
})?;
|
||||
let real_schema = table_info_value_to_relation_desc(table_info_value)?;
|
||||
node_ctx.assign_table_schema(&sink_table_name, real_schema.clone())?;
|
||||
} else {
|
||||
// assign inferred schema to sink table
|
||||
// create sink table
|
||||
node_ctx.assign_table_schema(&sink_table_name, flow_plan.schema.clone())?;
|
||||
let did_create = self
|
||||
.create_table_from_relation(
|
||||
&format!("flow-id={flow_id}"),
|
||||
@@ -864,6 +862,8 @@ impl FlowWorkerManager {
|
||||
}
|
||||
}
|
||||
|
||||
node_ctx.add_flow_plan(flow_id, flow_plan.clone());
|
||||
|
||||
let _ = comment;
|
||||
let _ = flow_options;
|
||||
|
||||
@@ -888,7 +888,8 @@ impl FlowWorkerManager {
|
||||
.write()
|
||||
.await
|
||||
.insert(flow_id, err_collector.clone());
|
||||
let handle = &self.worker_handles[0].lock().await;
|
||||
// TODO(discord9): load balance?
|
||||
let handle = self.get_worker_handle_for_create_flow().await;
|
||||
let create_request = worker::Request::Create {
|
||||
flow_id,
|
||||
plan: flow_plan,
|
||||
@@ -897,9 +898,11 @@ impl FlowWorkerManager {
|
||||
source_ids,
|
||||
src_recvs: source_receivers,
|
||||
expire_after,
|
||||
or_replace,
|
||||
create_if_not_exists,
|
||||
err_collector,
|
||||
};
|
||||
|
||||
handle.create_flow(create_request).await?;
|
||||
info!("Successfully create flow with id={}", flow_id);
|
||||
Ok(Some(flow_id))
|
||||
|
||||
@@ -24,21 +24,26 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
|
||||
use common_meta::node_manager::Flownode;
|
||||
use common_telemetry::{debug, trace};
|
||||
use datatypes::value::Value;
|
||||
use itertools::Itertools;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{IntoError, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::util::from_proto_to_data_type;
|
||||
use crate::adapter::{CreateFlowArgs, FlowWorkerManager};
|
||||
use crate::error::InternalSnafu;
|
||||
use crate::error::{CreateFlowSnafu, InsertIntoFlowSnafu, InternalSnafu};
|
||||
use crate::metrics::METRIC_FLOW_TASK_COUNT;
|
||||
use crate::repr::{self, DiffRow};
|
||||
|
||||
fn to_meta_err(err: crate::error::Error) -> common_meta::error::Error {
|
||||
// TODO(discord9): refactor this
|
||||
Err::<(), _>(BoxedError::new(err))
|
||||
.with_context(|_| ExternalSnafu)
|
||||
.unwrap_err()
|
||||
/// return a function to convert `crate::error::Error` to `common_meta::error::Error`
|
||||
fn to_meta_err(
|
||||
location: snafu::Location,
|
||||
) -> impl FnOnce(crate::error::Error) -> common_meta::error::Error {
|
||||
move |err: crate::error::Error| -> common_meta::error::Error {
|
||||
common_meta::error::Error::External {
|
||||
location,
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -75,11 +80,16 @@ impl Flownode for FlowWorkerManager {
|
||||
or_replace,
|
||||
expire_after,
|
||||
comment: Some(comment),
|
||||
sql,
|
||||
sql: sql.clone(),
|
||||
flow_options,
|
||||
query_ctx,
|
||||
};
|
||||
let ret = self.create_flow(args).await.map_err(to_meta_err)?;
|
||||
let ret = self
|
||||
.create_flow(args)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| CreateFlowSnafu { sql: sql.clone() })
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.inc();
|
||||
Ok(FlowResponse {
|
||||
affected_flows: ret
|
||||
@@ -94,7 +104,7 @@ impl Flownode for FlowWorkerManager {
|
||||
})) => {
|
||||
self.remove_flow(flow_id.id as u64)
|
||||
.await
|
||||
.map_err(to_meta_err)?;
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
METRIC_FLOW_TASK_COUNT.dec();
|
||||
Ok(Default::default())
|
||||
}
|
||||
@@ -112,9 +122,15 @@ impl Flownode for FlowWorkerManager {
|
||||
.await
|
||||
.flush_all_sender()
|
||||
.await
|
||||
.map_err(to_meta_err)?;
|
||||
let rows_send = self.run_available(true).await.map_err(to_meta_err)?;
|
||||
let row = self.send_writeback_requests().await.map_err(to_meta_err)?;
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let rows_send = self
|
||||
.run_available(true)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let row = self
|
||||
.send_writeback_requests()
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
|
||||
debug!(
|
||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sended and {} output rows flushed",
|
||||
@@ -154,17 +170,41 @@ impl Flownode for FlowWorkerManager {
|
||||
// TODO(discord9): reconsider time assignment mechanism
|
||||
let now = self.tick_manager.tick();
|
||||
|
||||
let fetch_order = {
|
||||
let (table_types, fetch_order) = {
|
||||
let ctx = self.node_context.read().await;
|
||||
let table_col_names = ctx
|
||||
.table_repr
|
||||
.get_by_table_id(&table_id)
|
||||
.map(|r| r.1)
|
||||
.and_then(|id| ctx.schema.get(&id))
|
||||
.map(|desc| &desc.names)
|
||||
.context(UnexpectedSnafu {
|
||||
err_msg: format!("Table not found: {}", table_id),
|
||||
})?;
|
||||
|
||||
// TODO(discord9): also check schema version so that altered table can be reported
|
||||
let table_schema = ctx
|
||||
.table_source
|
||||
.table_from_id(&table_id)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()))?;
|
||||
let default_vals = table_schema
|
||||
.default_values
|
||||
.iter()
|
||||
.zip(table_schema.relation_desc.typ().column_types.iter())
|
||||
.map(|(v, ty)| {
|
||||
v.as_ref().and_then(|v| {
|
||||
match v.create_default(ty.scalar_type(), ty.nullable()) {
|
||||
Ok(v) => Some(v),
|
||||
Err(err) => {
|
||||
common_telemetry::error!(err; "Failed to create default value");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let table_types = table_schema
|
||||
.relation_desc
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|t| t.scalar_type)
|
||||
.collect_vec();
|
||||
let table_col_names = table_schema.relation_desc.names;
|
||||
let table_col_names = table_col_names
|
||||
.iter().enumerate()
|
||||
.map(|(idx,name)| match name {
|
||||
@@ -181,44 +221,80 @@ impl Flownode for FlowWorkerManager {
|
||||
.enumerate()
|
||||
.map(|(i, name)| (&name.column_name, i)),
|
||||
);
|
||||
let fetch_order: Vec<usize> = table_col_names
|
||||
|
||||
let fetch_order: Vec<FetchFromRow> = table_col_names
|
||||
.iter()
|
||||
.map(|names| {
|
||||
name_to_col.get(names).copied().context(UnexpectedSnafu {
|
||||
err_msg: format!("Column not found: {}", names),
|
||||
})
|
||||
.zip(default_vals.into_iter())
|
||||
.map(|(col_name, col_default_val)| {
|
||||
name_to_col
|
||||
.get(col_name)
|
||||
.copied()
|
||||
.map(FetchFromRow::Idx)
|
||||
.or_else(|| col_default_val.clone().map(FetchFromRow::Default))
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Column not found: {}, default_value: {:?}",
|
||||
col_name, col_default_val
|
||||
),
|
||||
})
|
||||
})
|
||||
.try_collect()?;
|
||||
if !fetch_order.iter().enumerate().all(|(i, &v)| i == v) {
|
||||
trace!("Reordering columns: {:?}", fetch_order)
|
||||
}
|
||||
fetch_order
|
||||
|
||||
trace!("Reordering columns: {:?}", fetch_order);
|
||||
(table_types, fetch_order)
|
||||
};
|
||||
|
||||
// TODO(discord9): use column instead of row
|
||||
let rows: Vec<DiffRow> = rows_proto
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let r = repr::Row::from(r);
|
||||
let reordered = fetch_order
|
||||
.iter()
|
||||
.map(|&i| r.inner[i].clone())
|
||||
.collect_vec();
|
||||
let reordered = fetch_order.iter().map(|i| i.fetch(&r)).collect_vec();
|
||||
repr::Row::new(reordered)
|
||||
})
|
||||
.map(|r| (r, now, 1))
|
||||
.collect_vec();
|
||||
let batch_datatypes = insert_schema
|
||||
.iter()
|
||||
.map(from_proto_to_data_type)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.map_err(to_meta_err)?;
|
||||
self.handle_write_request(region_id.into(), rows, &batch_datatypes)
|
||||
if let Err(err) = self
|
||||
.handle_write_request(region_id.into(), rows, &table_types)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
common_telemetry::error!(err;"Failed to handle write request");
|
||||
to_meta_err(err)
|
||||
})?;
|
||||
{
|
||||
let err = BoxedError::new(err);
|
||||
let flow_ids = self
|
||||
.node_context
|
||||
.read()
|
||||
.await
|
||||
.get_flow_ids(table_id)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.cloned()
|
||||
.collect_vec();
|
||||
let err = InsertIntoFlowSnafu {
|
||||
region_id,
|
||||
flow_ids,
|
||||
}
|
||||
.into_error(err);
|
||||
common_telemetry::error!(err; "Failed to handle write request");
|
||||
let err = to_meta_err(snafu::location!())(err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
Ok(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple helper enum for fetching value from row with default value
|
||||
#[derive(Debug, Clone)]
|
||||
enum FetchFromRow {
|
||||
Idx(usize),
|
||||
Default(Value),
|
||||
}
|
||||
|
||||
impl FetchFromRow {
|
||||
/// Panic if idx is out of bound
|
||||
fn fetch(&self, row: &repr::Row) -> Value {
|
||||
match self {
|
||||
FetchFromRow::Idx(idx) => row.get(*idx).unwrap().clone(),
|
||||
FetchFromRow::Default(v) => v.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::collections::{BTreeMap, BTreeSet, HashMap};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_recordbatch::RecordBatch;
|
||||
use common_telemetry::trace;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use session::context::QueryContext;
|
||||
@@ -25,20 +26,23 @@ use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::{broadcast, mpsc, RwLock};
|
||||
|
||||
use crate::adapter::{FlowId, TableName, TableSource};
|
||||
use crate::adapter::table_source::FlowTableSource;
|
||||
use crate::adapter::{FlowId, ManagedTableSource, TableName};
|
||||
use crate::error::{Error, EvalSnafu, TableNotFoundSnafu};
|
||||
use crate::expr::error::InternalSnafu;
|
||||
use crate::expr::{Batch, GlobalId};
|
||||
use crate::metrics::METRIC_FLOW_INPUT_BUF_SIZE;
|
||||
use crate::plan::TypedPlan;
|
||||
use crate::repr::{DiffRow, RelationDesc, BATCH_SIZE, BROADCAST_CAP, SEND_BUF_CAP};
|
||||
|
||||
/// A context that holds the information of the dataflow
|
||||
#[derive(Default, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct FlownodeContext {
|
||||
/// mapping from source table to tasks, useful for schedule which task to run when a source table is updated
|
||||
pub source_to_tasks: BTreeMap<TableId, BTreeSet<FlowId>>,
|
||||
/// mapping from task to sink table, useful for sending data back to the client when a task is done running
|
||||
pub flow_to_sink: BTreeMap<FlowId, TableName>,
|
||||
pub flow_plans: BTreeMap<FlowId, TypedPlan>,
|
||||
pub sink_to_flow: BTreeMap<TableName, FlowId>,
|
||||
/// broadcast sender for source table, any incoming write request will be sent to the source table's corresponding sender
|
||||
///
|
||||
@@ -50,13 +54,33 @@ pub struct FlownodeContext {
|
||||
/// note that the sink receiver should only have one, and we are using broadcast as mpsc channel here
|
||||
pub sink_receiver:
|
||||
BTreeMap<TableName, (mpsc::UnboundedSender<Batch>, mpsc::UnboundedReceiver<Batch>)>,
|
||||
/// the schema of the table, query from metasrv or inferred from TypedPlan
|
||||
pub schema: HashMap<GlobalId, RelationDesc>,
|
||||
/// can query the schema of the table source, from metasrv with local cache
|
||||
pub table_source: Box<dyn FlowTableSource>,
|
||||
/// All the tables that have been registered in the worker
|
||||
pub table_repr: IdToNameMap,
|
||||
pub query_context: Option<Arc<QueryContext>>,
|
||||
}
|
||||
|
||||
impl FlownodeContext {
|
||||
pub fn new(table_source: Box<dyn FlowTableSource>) -> Self {
|
||||
Self {
|
||||
source_to_tasks: Default::default(),
|
||||
flow_to_sink: Default::default(),
|
||||
flow_plans: Default::default(),
|
||||
sink_to_flow: Default::default(),
|
||||
source_sender: Default::default(),
|
||||
sink_receiver: Default::default(),
|
||||
table_source,
|
||||
table_repr: Default::default(),
|
||||
query_context: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_flow_ids(&self, table_id: TableId) -> Option<&BTreeSet<FlowId>> {
|
||||
self.source_to_tasks.get(&table_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// a simple broadcast sender with backpressure, bounded capacity and blocking on send when send buf is full
|
||||
/// note that it wouldn't evict old data, so it's possible to block forever if the receiver is slow
|
||||
///
|
||||
@@ -106,7 +130,16 @@ impl SourceSender {
|
||||
// TODO(discord9): send rows instead so it's just moving a point
|
||||
if let Some(batch) = send_buf.recv().await {
|
||||
let len = batch.row_count();
|
||||
self.send_buf_row_cnt.fetch_sub(len, Ordering::SeqCst);
|
||||
if let Err(prev_row_cnt) =
|
||||
self.send_buf_row_cnt
|
||||
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| x.checked_sub(len))
|
||||
{
|
||||
common_telemetry::error!(
|
||||
"send buf row count underflow, prev = {}, len = {}",
|
||||
prev_row_cnt,
|
||||
len
|
||||
);
|
||||
}
|
||||
row_cnt += len;
|
||||
self.sender
|
||||
.send(batch)
|
||||
@@ -138,18 +171,21 @@ impl SourceSender {
|
||||
batch_datatypes: &[ConcreteDataType],
|
||||
) -> Result<usize, Error> {
|
||||
METRIC_FLOW_INPUT_BUF_SIZE.add(rows.len() as _);
|
||||
// important for backpressure. if send buf is full, block until it's not
|
||||
while self.send_buf_row_cnt.load(Ordering::SeqCst) >= BATCH_SIZE * 4 {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
// row count metrics is approx so relaxed order is ok
|
||||
self.send_buf_row_cnt
|
||||
.fetch_add(rows.len(), Ordering::SeqCst);
|
||||
let batch = Batch::try_from_rows_with_types(
|
||||
rows.into_iter().map(|(row, _, _)| row).collect(),
|
||||
batch_datatypes,
|
||||
)
|
||||
.context(EvalSnafu)?;
|
||||
common_telemetry::trace!("Send one batch to worker with {} rows", batch.row_count());
|
||||
|
||||
self.send_buf_row_cnt
|
||||
.fetch_add(batch.row_count(), Ordering::SeqCst);
|
||||
self.send_buf_tx.send(batch).await.map_err(|e| {
|
||||
crate::error::InternalSnafu {
|
||||
reason: format!("Failed to send row, error = {:?}", e),
|
||||
@@ -159,6 +195,22 @@ impl SourceSender {
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
/// send record batch
|
||||
pub async fn send_record_batch(&self, batch: RecordBatch) -> Result<usize, Error> {
|
||||
let row_cnt = batch.num_rows();
|
||||
let batch = Batch::from(batch);
|
||||
|
||||
self.send_buf_row_cnt.fetch_add(row_cnt, Ordering::SeqCst);
|
||||
|
||||
self.send_buf_tx.send(batch).await.map_err(|e| {
|
||||
crate::error::InternalSnafu {
|
||||
reason: format!("Failed to send batch, error = {:?}", e),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
Ok(row_cnt)
|
||||
}
|
||||
}
|
||||
|
||||
impl FlownodeContext {
|
||||
@@ -180,6 +232,16 @@ impl FlownodeContext {
|
||||
sender.send_rows(rows, batch_datatypes).await
|
||||
}
|
||||
|
||||
pub async fn send_rb(&self, table_id: TableId, batch: RecordBatch) -> Result<usize, Error> {
|
||||
let sender = self
|
||||
.source_sender
|
||||
.get(&table_id)
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: table_id.to_string(),
|
||||
})?;
|
||||
sender.send_record_batch(batch).await
|
||||
}
|
||||
|
||||
/// flush all sender's buf
|
||||
///
|
||||
/// return numbers being sent
|
||||
@@ -215,6 +277,15 @@ impl FlownodeContext {
|
||||
self.sink_to_flow.insert(sink_table_name, task_id);
|
||||
}
|
||||
|
||||
/// add flow plan to worker context
|
||||
pub fn add_flow_plan(&mut self, task_id: FlowId, plan: TypedPlan) {
|
||||
self.flow_plans.insert(task_id, plan);
|
||||
}
|
||||
|
||||
pub fn get_flow_plan(&self, task_id: &FlowId) -> Option<TypedPlan> {
|
||||
self.flow_plans.get(task_id).cloned()
|
||||
}
|
||||
|
||||
/// remove flow from worker context
|
||||
pub fn remove_flow(&mut self, task_id: FlowId) {
|
||||
if let Some(sink_table_name) = self.flow_to_sink.remove(&task_id) {
|
||||
@@ -226,6 +297,7 @@ impl FlownodeContext {
|
||||
self.source_sender.remove(source_table_id);
|
||||
}
|
||||
}
|
||||
self.flow_plans.remove(&task_id);
|
||||
}
|
||||
|
||||
/// try add source sender, if already exist, do nothing
|
||||
@@ -284,7 +356,7 @@ impl FlownodeContext {
|
||||
/// Retrieves a GlobalId and table schema representing a table previously registered by calling the [register_table] function.
|
||||
///
|
||||
/// Returns an error if no table has been registered with the provided names
|
||||
pub fn table(&self, name: &TableName) -> Result<(GlobalId, RelationDesc), Error> {
|
||||
pub async fn table(&self, name: &TableName) -> Result<(GlobalId, RelationDesc), Error> {
|
||||
let id = self
|
||||
.table_repr
|
||||
.get_by_name(name)
|
||||
@@ -292,14 +364,8 @@ impl FlownodeContext {
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: name.join("."),
|
||||
})?;
|
||||
let schema = self
|
||||
.schema
|
||||
.get(&id)
|
||||
.cloned()
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: name.join("."),
|
||||
})?;
|
||||
Ok((id, schema))
|
||||
let schema = self.table_source.table(name).await?;
|
||||
Ok((id, schema.relation_desc))
|
||||
}
|
||||
|
||||
/// Assign a global id to a table, if already assigned, return the existing global id
|
||||
@@ -312,7 +378,7 @@ impl FlownodeContext {
|
||||
/// merely creating a mapping from table id to global id
|
||||
pub async fn assign_global_id_to_table(
|
||||
&mut self,
|
||||
srv_map: &TableSource,
|
||||
srv_map: &ManagedTableSource,
|
||||
mut table_name: Option<TableName>,
|
||||
table_id: Option<TableId>,
|
||||
) -> Result<GlobalId, Error> {
|
||||
@@ -333,9 +399,8 @@ impl FlownodeContext {
|
||||
|
||||
// table id is Some meaning db must have created the table
|
||||
if let Some(table_id) = table_id {
|
||||
let (known_table_name, schema) = srv_map.get_table_name_schema(&table_id).await?;
|
||||
let known_table_name = srv_map.get_table_name(&table_id).await?;
|
||||
table_name = table_name.or(Some(known_table_name));
|
||||
self.schema.insert(global_id, schema);
|
||||
} // if we don't have table id, it means database haven't assign one yet or we don't need it
|
||||
|
||||
// still update the mapping with new global id
|
||||
@@ -344,26 +409,6 @@ impl FlownodeContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assign a schema to a table
|
||||
///
|
||||
pub fn assign_table_schema(
|
||||
&mut self,
|
||||
table_name: &TableName,
|
||||
schema: RelationDesc,
|
||||
) -> Result<(), Error> {
|
||||
let gid = self
|
||||
.table_repr
|
||||
.get_by_name(table_name)
|
||||
.map(|(_, gid)| gid)
|
||||
.context(TableNotFoundSnafu {
|
||||
name: format!("Table not found: {:?} in flownode cache", table_name),
|
||||
})?;
|
||||
|
||||
self.schema.insert(gid, schema);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a new global id
|
||||
pub fn new_global_id(&self) -> GlobalId {
|
||||
GlobalId::User(self.table_repr.global_id_to_name_id.len() as u64)
|
||||
|
||||
433
src/flow/src/adapter/refill.rs
Normal file
433
src/flow/src/adapter/refill.rs
Normal file
@@ -0,0 +1,433 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! This module contains the refill flow task, which is used to refill flow with given table id and a time range.
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::flow::FlowMetadataManagerRef;
|
||||
use common_recordbatch::{RecordBatch, RecordBatches, SendableRecordBatchStream};
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::error;
|
||||
use datatypes::value::Value;
|
||||
use futures::StreamExt;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContextBuilder;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::{FlowId, FlowWorkerManager};
|
||||
use crate::adapter::table_source::ManagedTableSource;
|
||||
use crate::adapter::FlowWorkerManagerRef;
|
||||
use crate::error::{FlowNotFoundSnafu, JoinTaskSnafu, UnexpectedSnafu};
|
||||
use crate::expr::error::ExternalSnafu;
|
||||
use crate::expr::utils::find_plan_time_window_expr_lower_bound;
|
||||
use crate::repr::RelationDesc;
|
||||
use crate::server::get_all_flow_ids;
|
||||
use crate::{Error, FrontendInvoker};
|
||||
|
||||
impl FlowWorkerManager {
|
||||
/// Create and start refill flow tasks in background
|
||||
pub async fn create_and_start_refill_flow_tasks(
|
||||
self: &FlowWorkerManagerRef,
|
||||
flow_metadata_manager: &FlowMetadataManagerRef,
|
||||
catalog_manager: &CatalogManagerRef,
|
||||
) -> Result<(), Error> {
|
||||
let tasks = self
|
||||
.create_refill_flow_tasks(flow_metadata_manager, catalog_manager)
|
||||
.await?;
|
||||
self.starting_refill_flows(tasks).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a series of tasks to refill flow
|
||||
pub async fn create_refill_flow_tasks(
|
||||
&self,
|
||||
flow_metadata_manager: &FlowMetadataManagerRef,
|
||||
catalog_manager: &CatalogManagerRef,
|
||||
) -> Result<Vec<RefillTask>, Error> {
|
||||
let nodeid = self.node_id.map(|c| c as u64);
|
||||
|
||||
let flow_ids = get_all_flow_ids(flow_metadata_manager, catalog_manager, nodeid).await?;
|
||||
let mut refill_tasks = Vec::new();
|
||||
'flow_id_loop: for flow_id in flow_ids {
|
||||
let info = flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.context(FlowNotFoundSnafu { id: flow_id })?;
|
||||
|
||||
// TODO(discord9): also check flow is already running
|
||||
for src_table in info.source_table_ids() {
|
||||
// check if source table still exists
|
||||
if !self.table_info_source.check_table_exist(src_table).await? {
|
||||
error!(
|
||||
"Source table id = {:?} not found while refill flow_id={}, consider re-create the flow if necessary",
|
||||
src_table, flow_id
|
||||
);
|
||||
continue 'flow_id_loop;
|
||||
}
|
||||
}
|
||||
|
||||
let expire_after = info.expire_after();
|
||||
// TODO(discord9): better way to get last point
|
||||
let now = self.tick_manager.tick();
|
||||
let plan = self
|
||||
.node_context
|
||||
.read()
|
||||
.await
|
||||
.get_flow_plan(&FlowId::from(flow_id))
|
||||
.context(FlowNotFoundSnafu { id: flow_id })?;
|
||||
let time_range = if let Some(expire_after) = expire_after {
|
||||
let low_bound = common_time::Timestamp::new_millisecond(now - expire_after);
|
||||
let real_low_bound = find_plan_time_window_expr_lower_bound(&plan, low_bound)?;
|
||||
real_low_bound.map(|l| (l, common_time::Timestamp::new_millisecond(now)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
common_telemetry::debug!(
|
||||
"Time range for refill flow_id={} is {:?}",
|
||||
flow_id,
|
||||
time_range
|
||||
);
|
||||
|
||||
for src_table in info.source_table_ids() {
|
||||
let time_index_col = self
|
||||
.table_info_source
|
||||
.get_time_index_column_from_table_id(*src_table)
|
||||
.await?
|
||||
.1;
|
||||
let time_index_name = time_index_col.name;
|
||||
let task = RefillTask::create(
|
||||
flow_id as u64,
|
||||
*src_table,
|
||||
time_range,
|
||||
&time_index_name,
|
||||
&self.table_info_source,
|
||||
)
|
||||
.await?;
|
||||
refill_tasks.push(task);
|
||||
}
|
||||
}
|
||||
Ok(refill_tasks)
|
||||
}
|
||||
|
||||
/// Starting to refill flows, if any error occurs, will rebuild the flow and retry
|
||||
pub(crate) async fn starting_refill_flows(
|
||||
self: &FlowWorkerManagerRef,
|
||||
tasks: Vec<RefillTask>,
|
||||
) -> Result<(), Error> {
|
||||
// TODO(discord9): add a back pressure mechanism
|
||||
let frontend_invoker =
|
||||
self.frontend_invoker
|
||||
.read()
|
||||
.await
|
||||
.clone()
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "frontend invoker is not set",
|
||||
})?;
|
||||
|
||||
for mut task in tasks {
|
||||
task.start_running(self.clone(), &frontend_invoker).await?;
|
||||
// TODO(discord9): save refill tasks to a map and check if it's finished when necessary
|
||||
// i.e. when system table need query it's state
|
||||
self.refill_tasks
|
||||
.write()
|
||||
.await
|
||||
.insert(task.data.flow_id, task);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Task to refill flow with given table id and a time range
|
||||
pub struct RefillTask {
|
||||
data: TaskData,
|
||||
state: TaskState<()>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TaskData {
|
||||
flow_id: FlowId,
|
||||
table_id: TableId,
|
||||
table_schema: RelationDesc,
|
||||
}
|
||||
|
||||
impl TaskData {
|
||||
/// validate that incoming batch's schema is the same as table schema(by comparing types&names)
|
||||
fn validate_schema(table_schema: &RelationDesc, rb: &RecordBatch) -> Result<(), Error> {
|
||||
let rb_schema = &rb.schema;
|
||||
ensure!(
|
||||
rb_schema.column_schemas().len() == table_schema.len()?,
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"RecordBatch schema length does not match table schema length, {}!={}",
|
||||
rb_schema.column_schemas().len(),
|
||||
table_schema.len()?
|
||||
)
|
||||
}
|
||||
);
|
||||
for (i, rb_col) in rb_schema.column_schemas().iter().enumerate() {
|
||||
let (rb_name, rb_ty) = (rb_col.name.as_str(), &rb_col.data_type);
|
||||
let (table_name, table_ty) = (
|
||||
table_schema.names[i].as_ref(),
|
||||
&table_schema.typ().column_types[i].scalar_type,
|
||||
);
|
||||
ensure!(
|
||||
Some(rb_name) == table_name.map(|c| c.as_str()),
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Mismatch in column names: expected {:?}, found {}",
|
||||
table_name, rb_name
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
ensure!(
|
||||
rb_ty == table_ty,
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Mismatch in column types for {}: expected {:?}, found {:?}",
|
||||
rb_name, table_ty, rb_ty
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Refill task state
|
||||
enum TaskState<T> {
|
||||
/// Task is not started
|
||||
Prepared { sql: String },
|
||||
/// Task is running
|
||||
Running {
|
||||
handle: JoinHandle<Result<T, Error>>,
|
||||
},
|
||||
/// Task is finished
|
||||
Finished { res: Result<T, Error> },
|
||||
}
|
||||
|
||||
impl<T> TaskState<T> {
|
||||
fn new(sql: String) -> Self {
|
||||
Self::Prepared { sql }
|
||||
}
|
||||
}
|
||||
|
||||
mod test_send {
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use super::*;
|
||||
fn is_send<T: Send + Sync>() {}
|
||||
fn foo() {
|
||||
is_send::<TaskState<()>>();
|
||||
is_send::<RefillTask>();
|
||||
is_send::<BTreeMap<FlowId, RefillTask>>();
|
||||
is_send::<RwLock<BTreeMap<FlowId, RefillTask>>>();
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskState<()> {
|
||||
/// check if task is finished
|
||||
async fn is_finished(&mut self) -> Result<bool, Error> {
|
||||
match self {
|
||||
Self::Finished { .. } => Ok(true),
|
||||
Self::Running { handle } => Ok(if handle.is_finished() {
|
||||
*self = Self::Finished {
|
||||
res: handle.await.context(JoinTaskSnafu)?,
|
||||
};
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}),
|
||||
_ => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn start_running(
|
||||
&mut self,
|
||||
task_data: &TaskData,
|
||||
manager: FlowWorkerManagerRef,
|
||||
mut output_stream: SendableRecordBatchStream,
|
||||
) -> Result<(), Error> {
|
||||
let data = (*task_data).clone();
|
||||
let handle: JoinHandle<Result<(), Error>> = common_runtime::spawn_global(async move {
|
||||
while let Some(rb) = output_stream.next().await {
|
||||
let rb = match rb {
|
||||
Ok(rb) => rb,
|
||||
Err(err) => Err(BoxedError::new(err)).context(ExternalSnafu)?,
|
||||
};
|
||||
TaskData::validate_schema(&data.table_schema, &rb)?;
|
||||
|
||||
// send rb into flow node
|
||||
manager
|
||||
.node_context
|
||||
.read()
|
||||
.await
|
||||
.send_rb(data.table_id, rb)
|
||||
.await?;
|
||||
}
|
||||
common_telemetry::info!(
|
||||
"Refill successful for source table_id={}, flow_id={}",
|
||||
data.table_id,
|
||||
data.flow_id
|
||||
);
|
||||
Ok(())
|
||||
});
|
||||
*self = Self::Running { handle };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Query stream of RefillTask, simply wrap RecordBatches and RecordBatchStream and check output is not `AffectedRows`
|
||||
enum QueryStream {
|
||||
Batches { batches: RecordBatches },
|
||||
Stream { stream: SendableRecordBatchStream },
|
||||
}
|
||||
|
||||
impl TryFrom<common_query::Output> for QueryStream {
|
||||
type Error = Error;
|
||||
fn try_from(value: common_query::Output) -> Result<Self, Self::Error> {
|
||||
match value.data {
|
||||
common_query::OutputData::Stream(stream) => Ok(QueryStream::Stream { stream }),
|
||||
common_query::OutputData::RecordBatches(batches) => {
|
||||
Ok(QueryStream::Batches { batches })
|
||||
}
|
||||
_ => UnexpectedSnafu {
|
||||
reason: format!("Unexpected output data type: {:?}", value.data),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl QueryStream {
|
||||
fn try_into_stream(self) -> Result<SendableRecordBatchStream, Error> {
|
||||
match self {
|
||||
Self::Batches { batches } => Ok(batches.as_stream()),
|
||||
Self::Stream { stream } => Ok(stream),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RefillTask {
|
||||
/// Query with "select * from table WHERE time >= range_start and time < range_end"
|
||||
pub async fn create(
|
||||
flow_id: FlowId,
|
||||
table_id: TableId,
|
||||
time_range: Option<(common_time::Timestamp, common_time::Timestamp)>,
|
||||
time_col_name: &str,
|
||||
table_src: &ManagedTableSource,
|
||||
) -> Result<RefillTask, Error> {
|
||||
let (table_name, table_schema) = table_src.get_table_name_schema(&table_id).await?;
|
||||
let all_col_names: BTreeSet<_> = table_schema
|
||||
.relation_desc
|
||||
.iter_names()
|
||||
.flatten()
|
||||
.map(|s| s.as_str())
|
||||
.collect();
|
||||
|
||||
if !all_col_names.contains(time_col_name) {
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Can't find column {} in table {} while refill flow",
|
||||
time_col_name,
|
||||
table_name.join(".")
|
||||
),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
|
||||
let sql = if let Some(time_range) = time_range {
|
||||
format!(
|
||||
"select * from {0} where {1} >= {2} and {1} < {3}",
|
||||
table_name.join("."),
|
||||
time_col_name,
|
||||
Value::from(time_range.0),
|
||||
Value::from(time_range.1),
|
||||
)
|
||||
} else {
|
||||
format!("select * from {0}", table_name.join("."))
|
||||
};
|
||||
|
||||
Ok(RefillTask {
|
||||
data: TaskData {
|
||||
flow_id,
|
||||
table_id,
|
||||
table_schema: table_schema.relation_desc,
|
||||
},
|
||||
state: TaskState::new(sql),
|
||||
})
|
||||
}
|
||||
|
||||
/// Start running the task in background, non-blocking
|
||||
pub async fn start_running(
|
||||
&mut self,
|
||||
manager: FlowWorkerManagerRef,
|
||||
invoker: &FrontendInvoker,
|
||||
) -> Result<(), Error> {
|
||||
let TaskState::Prepared { sql } = &mut self.state else {
|
||||
UnexpectedSnafu {
|
||||
reason: "task is not prepared",
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
|
||||
// we don't need information from query context in this query so a default query context is enough
|
||||
let query_ctx = Arc::new(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog("greptime".to_string())
|
||||
.current_schema("public".to_string())
|
||||
.build(),
|
||||
);
|
||||
|
||||
let stmt_exec = invoker.statement_executor();
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let plan = stmt_exec
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let output_data = stmt_exec
|
||||
.exec_plan(plan, query_ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let output_stream = QueryStream::try_from(output_data)?;
|
||||
let output_stream = output_stream.try_into_stream()?;
|
||||
|
||||
self.state
|
||||
.start_running(&self.data, manager, output_stream)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn is_finished(&mut self) -> Result<bool, Error> {
|
||||
self.state.is_finished().await
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,6 @@ impl FlowWorkerManager {
|
||||
pub async fn gen_state_report(&self) -> FlowStat {
|
||||
let mut full_report = BTreeMap::new();
|
||||
for worker in self.worker_handles.iter() {
|
||||
let worker = worker.lock().await;
|
||||
match worker.get_state_size().await {
|
||||
Ok(state_size) => {
|
||||
full_report.extend(state_size.into_iter().map(|(k, v)| (k as u32, v)))
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
||||
use datatypes::schema::ColumnDefaultConstraint;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
@@ -27,21 +29,112 @@ use crate::error::{
|
||||
};
|
||||
use crate::repr::RelationDesc;
|
||||
|
||||
/// mapping of table name <-> table id should be query from tableinfo manager
|
||||
pub struct TableSource {
|
||||
/// Table description, include relation desc and default values, which is the minimal information flow needed for table
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TableDesc {
|
||||
pub relation_desc: RelationDesc,
|
||||
pub default_values: Vec<Option<ColumnDefaultConstraint>>,
|
||||
}
|
||||
|
||||
impl TableDesc {
|
||||
pub fn new(
|
||||
relation_desc: RelationDesc,
|
||||
default_values: Vec<Option<ColumnDefaultConstraint>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
relation_desc,
|
||||
default_values,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_no_default(relation_desc: RelationDesc) -> Self {
|
||||
Self {
|
||||
relation_desc,
|
||||
default_values: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Table source but for flow, provide table schema by table name/id
|
||||
#[async_trait::async_trait]
|
||||
pub trait FlowTableSource: Send + Sync + std::fmt::Debug {
|
||||
async fn table_name_from_id(&self, table_id: &TableId) -> Result<TableName, Error>;
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error>;
|
||||
|
||||
/// Get the table schema by table name
|
||||
async fn table(&self, name: &TableName) -> Result<TableDesc, Error> {
|
||||
let id = self.table_id_from_name(name).await?;
|
||||
self.table_from_id(&id).await
|
||||
}
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<TableDesc, Error>;
|
||||
}
|
||||
|
||||
/// managed table source information, query from table info manager and table name manager
|
||||
#[derive(Clone)]
|
||||
pub struct ManagedTableSource {
|
||||
/// for query `TableId -> TableName` mapping
|
||||
table_info_manager: TableInfoManager,
|
||||
table_name_manager: TableNameManager,
|
||||
}
|
||||
|
||||
impl TableSource {
|
||||
#[async_trait::async_trait]
|
||||
impl FlowTableSource for ManagedTableSource {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<TableDesc, Error> {
|
||||
let table_info_value = self
|
||||
.get_table_info_value(table_id)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: format!("TableId = {:?}, Can't found table info", table_id),
|
||||
})?;
|
||||
let desc = table_info_value_to_relation_desc(table_info_value)?;
|
||||
|
||||
Ok(desc)
|
||||
}
|
||||
async fn table_name_from_id(&self, table_id: &TableId) -> Result<TableName, Error> {
|
||||
self.get_table_name(table_id).await
|
||||
}
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error> {
|
||||
self.get_opt_table_id_from_name(name)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: name.join("."),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagedTableSource {
|
||||
pub fn new(table_info_manager: TableInfoManager, table_name_manager: TableNameManager) -> Self {
|
||||
TableSource {
|
||||
ManagedTableSource {
|
||||
table_info_manager,
|
||||
table_name_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the time index column from table id
|
||||
pub async fn get_time_index_column_from_table_id(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<(usize, datatypes::schema::ColumnSchema), Error> {
|
||||
let info = self
|
||||
.table_info_manager
|
||||
.get(table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Table id = {:?}, couldn't found table info", table_id),
|
||||
})?;
|
||||
let raw_schema = &info.table_info.meta.schema;
|
||||
let Some(ts_index) = raw_schema.timestamp_index else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Table id = {:?}, couldn't found timestamp index", table_id),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
let col_schema = raw_schema.column_schemas[ts_index].clone();
|
||||
Ok((ts_index, col_schema))
|
||||
}
|
||||
|
||||
pub async fn get_table_id_from_proto_name(
|
||||
&self,
|
||||
name: &greptime_proto::v1::TableName,
|
||||
@@ -63,7 +156,10 @@ impl TableSource {
|
||||
}
|
||||
|
||||
/// If the table haven't been created in database, the tableId returned would be null
|
||||
pub async fn get_table_id_from_name(&self, name: &TableName) -> Result<Option<TableId>, Error> {
|
||||
pub async fn get_opt_table_id_from_name(
|
||||
&self,
|
||||
name: &TableName,
|
||||
) -> Result<Option<TableId>, Error> {
|
||||
let ret = self
|
||||
.table_name_manager
|
||||
.get(TableNameKey::new(&name[0], &name[1], &name[2]))
|
||||
@@ -107,7 +203,7 @@ impl TableSource {
|
||||
pub async fn get_table_name_schema(
|
||||
&self,
|
||||
table_id: &TableId,
|
||||
) -> Result<(TableName, RelationDesc), Error> {
|
||||
) -> Result<(TableName, TableDesc), Error> {
|
||||
let table_info_value = self
|
||||
.get_table_info_value(table_id)
|
||||
.await?
|
||||
@@ -125,4 +221,130 @@ impl TableSource {
|
||||
let desc = table_info_value_to_relation_desc(table_info_value)?;
|
||||
Ok((table_name, desc))
|
||||
}
|
||||
|
||||
pub async fn check_table_exist(&self, table_id: &TableId) -> Result<bool, Error> {
|
||||
self.table_info_manager
|
||||
.exists(*table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ManagedTableSource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("KvBackendTableSource").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::data_type::ConcreteDataType as CDT;
|
||||
|
||||
use super::*;
|
||||
use crate::repr::{ColumnType, RelationType};
|
||||
|
||||
pub struct FlowDummyTableSource {
|
||||
pub id_names_to_desc: Vec<(TableId, TableName, TableDesc)>,
|
||||
id_to_idx: HashMap<TableId, usize>,
|
||||
name_to_idx: HashMap<TableName, usize>,
|
||||
}
|
||||
|
||||
impl Default for FlowDummyTableSource {
|
||||
fn default() -> Self {
|
||||
let id_names_to_desc = vec![
|
||||
(
|
||||
1024,
|
||||
[
|
||||
"greptime".to_string(),
|
||||
"public".to_string(),
|
||||
"numbers".to_string(),
|
||||
],
|
||||
TableDesc::new_no_default(
|
||||
RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)])
|
||||
.into_named(vec![Some("number".to_string())]),
|
||||
),
|
||||
),
|
||||
(
|
||||
1025,
|
||||
[
|
||||
"greptime".to_string(),
|
||||
"public".to_string(),
|
||||
"numbers_with_ts".to_string(),
|
||||
],
|
||||
TableDesc::new_no_default(
|
||||
RelationType::new(vec![
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), false),
|
||||
])
|
||||
.into_named(vec![Some("number".to_string()), Some("ts".to_string())]),
|
||||
),
|
||||
),
|
||||
];
|
||||
let id_to_idx = id_names_to_desc
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, (id, _name, _desc))| (*id, idx))
|
||||
.collect();
|
||||
let name_to_idx = id_names_to_desc
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, (_id, name, _desc))| (name.clone(), idx))
|
||||
.collect();
|
||||
Self {
|
||||
id_names_to_desc,
|
||||
id_to_idx,
|
||||
name_to_idx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl FlowTableSource for FlowDummyTableSource {
|
||||
async fn table_from_id(&self, table_id: &TableId) -> Result<TableDesc, Error> {
|
||||
let idx = self.id_to_idx.get(table_id).context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
let desc = self
|
||||
.id_names_to_desc
|
||||
.get(*idx)
|
||||
.map(|x| x.2.clone())
|
||||
.context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
Ok(desc)
|
||||
}
|
||||
|
||||
async fn table_name_from_id(&self, table_id: &TableId) -> Result<TableName, Error> {
|
||||
let idx = self.id_to_idx.get(table_id).context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})?;
|
||||
self.id_names_to_desc
|
||||
.get(*idx)
|
||||
.map(|x| x.1.clone())
|
||||
.context(TableNotFoundSnafu {
|
||||
name: format!("Table id = {:?}, couldn't found table desc", table_id),
|
||||
})
|
||||
}
|
||||
|
||||
async fn table_id_from_name(&self, name: &TableName) -> Result<TableId, Error> {
|
||||
for (id, table_name, _desc) in &self.id_names_to_desc {
|
||||
if name == table_name {
|
||||
return Ok(*id);
|
||||
}
|
||||
}
|
||||
TableNotFoundSnafu {
|
||||
name: format!("Table name = {:?}, couldn't found table id", name),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for FlowDummyTableSource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DummyTableSource").finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,12 +27,28 @@ use session::context::QueryContextBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::adapter::{TableName, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::adapter::table_source::TableDesc;
|
||||
use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||
use crate::FlowWorkerManager;
|
||||
|
||||
impl FlowWorkerManager {
|
||||
/// Get a worker handle for creating flow, using round robin to select a worker
|
||||
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
|
||||
let use_idx = {
|
||||
let mut selector = self.worker_selector.lock().await;
|
||||
if *selector >= self.worker_handles.len() {
|
||||
*selector = 0
|
||||
};
|
||||
let use_idx = *selector;
|
||||
*selector += 1;
|
||||
use_idx
|
||||
};
|
||||
// Safety: selector is always in bound
|
||||
&self.worker_handles[use_idx]
|
||||
}
|
||||
|
||||
/// Create table from given schema(will adjust to add auto column if needed), return true if table is created
|
||||
pub(crate) async fn create_table_from_relation(
|
||||
&self,
|
||||
@@ -126,7 +142,7 @@ impl FlowWorkerManager {
|
||||
|
||||
pub fn table_info_value_to_relation_desc(
|
||||
table_info_value: TableInfoValue,
|
||||
) -> Result<RelationDesc, Error> {
|
||||
) -> Result<TableDesc, Error> {
|
||||
let raw_schema = table_info_value.table_info.meta.schema;
|
||||
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
|
||||
.column_schemas
|
||||
@@ -147,8 +163,7 @@ pub fn table_info_value_to_relation_desc(
|
||||
let keys = vec![crate::repr::Key::from(key)];
|
||||
|
||||
let time_index = raw_schema.timestamp_index;
|
||||
|
||||
Ok(RelationDesc {
|
||||
let relation_desc = RelationDesc {
|
||||
typ: RelationType {
|
||||
column_types,
|
||||
keys,
|
||||
@@ -157,7 +172,14 @@ pub fn table_info_value_to_relation_desc(
|
||||
auto_columns: vec![],
|
||||
},
|
||||
names: col_names,
|
||||
})
|
||||
};
|
||||
let default_values = raw_schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| c.default_constraint().cloned())
|
||||
.collect_vec();
|
||||
|
||||
Ok(TableDesc::new(relation_desc, default_values))
|
||||
}
|
||||
|
||||
pub fn from_proto_to_data_type(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user