mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 12:52:57 +00:00
Compare commits
168 Commits
chore/debu
...
poc-write-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68593ae92a | ||
|
|
91d755d9b5 | ||
|
|
2566d254ad | ||
|
|
0ec4ed804d | ||
|
|
cc435234a4 | ||
|
|
9c4aa81f85 | ||
|
|
bdbb5435ea | ||
|
|
fd9940a253 | ||
|
|
e0bafd661c | ||
|
|
99baa86b6a | ||
|
|
76d69901ea | ||
|
|
764a57b80a | ||
|
|
95b388d819 | ||
|
|
c2b556e321 | ||
|
|
06ebe6b3fb | ||
|
|
bec8245e75 | ||
|
|
3cb2343f7f | ||
|
|
d10c207371 | ||
|
|
1a73a40bd9 | ||
|
|
713a73e9b2 | ||
|
|
65a88a63db | ||
|
|
5ad1436a8f | ||
|
|
ae59206caf | ||
|
|
094d0fcdf5 | ||
|
|
7170120de6 | ||
|
|
dba6da4d00 | ||
|
|
59b31372aa | ||
|
|
d6b8672e63 | ||
|
|
deaa1f9578 | ||
|
|
f378d218e9 | ||
|
|
5b6279f191 | ||
|
|
698b28c636 | ||
|
|
c4d10313e6 | ||
|
|
f165bfb0af | ||
|
|
4111c18d44 | ||
|
|
5abe4c141a | ||
|
|
adb5c3743c | ||
|
|
7c5ead90ac | ||
|
|
d870987a65 | ||
|
|
dce4ed9f1d | ||
|
|
bbfbc9f0f8 | ||
|
|
b107384cc6 | ||
|
|
2802c8bf28 | ||
|
|
9b9784a557 | ||
|
|
1e61d05211 | ||
|
|
d53b9fbd03 | ||
|
|
d01bc916f1 | ||
|
|
8ea463f516 | ||
|
|
088317fd3a | ||
|
|
69881e3bc1 | ||
|
|
9af4160068 | ||
|
|
45e68603a1 | ||
|
|
1eb4b8ed4f | ||
|
|
05f21679d6 | ||
|
|
35b635f639 | ||
|
|
3ed085459c | ||
|
|
51a8d0a726 | ||
|
|
965a48656f | ||
|
|
4259975be9 | ||
|
|
d2f3f2e24d | ||
|
|
f74a955504 | ||
|
|
6f1b5101a3 | ||
|
|
9f626ec776 | ||
|
|
0163ce8df9 | ||
|
|
2ab235ec9d | ||
|
|
281d9a5920 | ||
|
|
385b1bcbb0 | ||
|
|
5287d46073 | ||
|
|
64ce9d3744 | ||
|
|
80790daae0 | ||
|
|
5daac5fe3d | ||
|
|
4323c20d18 | ||
|
|
f53b6777cc | ||
|
|
87c21e2baa | ||
|
|
d072801ad6 | ||
|
|
0607b38a20 | ||
|
|
e0384a7d46 | ||
|
|
d73815ba84 | ||
|
|
c78a492863 | ||
|
|
859717c309 | ||
|
|
52697a9e66 | ||
|
|
f8d26b433e | ||
|
|
1acfb6ed1c | ||
|
|
7eaabb3ca2 | ||
|
|
3a55f5d17c | ||
|
|
8d5d4000e6 | ||
|
|
a598008ec3 | ||
|
|
86bd54194a | ||
|
|
ccd2b06b7a | ||
|
|
0db10a33d0 | ||
|
|
317fe9eaa5 | ||
|
|
a4761d6245 | ||
|
|
758aef39d8 | ||
|
|
4e3dd04f42 | ||
|
|
18b77408ae | ||
|
|
725d5a9e68 | ||
|
|
4f29e50ef3 | ||
|
|
121ec7936f | ||
|
|
0185a65905 | ||
|
|
f0d30a0f26 | ||
|
|
7e61d1ae27 | ||
|
|
e56dd20426 | ||
|
|
b64c075cdb | ||
|
|
57f8afcb70 | ||
|
|
bd37e086c2 | ||
|
|
66f63ae981 | ||
|
|
95b20592ac | ||
|
|
1855dccdf1 | ||
|
|
5efcb41310 | ||
|
|
f5829364a2 | ||
|
|
87bd12d6df | ||
|
|
c370b4b40d | ||
|
|
3f01f67f94 | ||
|
|
6eb746d994 | ||
|
|
03a144fa56 | ||
|
|
f069ea082f | ||
|
|
9ae48010f0 | ||
|
|
3a996c2f00 | ||
|
|
45d4065fd6 | ||
|
|
9e09be7ba6 | ||
|
|
50583815de | ||
|
|
24ea9cf215 | ||
|
|
78d0fa75c9 | ||
|
|
0685ba265c | ||
|
|
be22da775a | ||
|
|
d33309be2b | ||
|
|
fdbfebf4be | ||
|
|
812a775b3d | ||
|
|
751fa4ede9 | ||
|
|
03a2e6d0c1 | ||
|
|
815ce59a3a | ||
|
|
c19a56c79f | ||
|
|
7f307a4cac | ||
|
|
52eebfce77 | ||
|
|
e18416a726 | ||
|
|
d1f8ea7880 | ||
|
|
2cd1b08ff7 | ||
|
|
0ee41339aa | ||
|
|
369b59c84a | ||
|
|
c305b2b406 | ||
|
|
c89ef85902 | ||
|
|
3d9df822ad | ||
|
|
bc2f05d949 | ||
|
|
05f115e047 | ||
|
|
5cf9d7b6ca | ||
|
|
a1cd194d0c | ||
|
|
a56c430db0 | ||
|
|
6a1ec8db25 | ||
|
|
04708f10aa | ||
|
|
ddf36c8324 | ||
|
|
96b2a5fb28 | ||
|
|
bbbba29afc | ||
|
|
b229c94fba | ||
|
|
2ad50332cb | ||
|
|
513569ed5d | ||
|
|
69d9a2845f | ||
|
|
1067357b72 | ||
|
|
2caf003db0 | ||
|
|
9bf9aa1082 | ||
|
|
353c8230db | ||
|
|
577d81f14c | ||
|
|
856bba5d95 | ||
|
|
89399131dd | ||
|
|
d20b592fe8 | ||
|
|
bcb0f14227 | ||
|
|
3b27adb3fe | ||
|
|
4d6fe31fff | ||
|
|
1b0b9add90 |
@@ -48,12 +48,11 @@ runs:
|
|||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime
|
- name: Build greptime # Builds standard greptime binary
|
||||||
if: ${{ inputs.dev-mode == 'false' }}
|
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
features: servers/dashboard
|
features: servers/dashboard,pg_kvbackend
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
@@ -71,7 +70,7 @@ runs:
|
|||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard
|
features: servers/dashboard,pg_kvbackend
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||||
# ${WORKING_DIR}
|
# ${WORKING_DIR}
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
|
|||||||
6
.github/actions/upload-artifacts/action.yml
vendored
6
.github/actions/upload-artifacts/action.yml
vendored
@@ -30,9 +30,9 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-v0.3.0sha256sum
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0
|
# greptime-linux-amd64-v0.3.0
|
||||||
# └── greptime
|
# └── greptime
|
||||||
- name: Compress artifacts and calculate checksum
|
- name: Compress artifacts and calculate checksum
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
|||||||
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,11 +27,11 @@ function upload_artifacts() {
|
|||||||
# ├── latest-version.txt
|
# ├── latest-version.txt
|
||||||
# ├── latest-nightly-version.txt
|
# ├── latest-nightly-version.txt
|
||||||
# ├── v0.1.0
|
# ├── v0.1.0
|
||||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||||
# └── v0.2.0
|
# └── v0.2.0
|
||||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
aws s3 cp \
|
aws s3 cp \
|
||||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
|
|||||||
3
.github/workflows/dependency-check.yml
vendored
3
.github/workflows/dependency-check.yml
vendored
@@ -1,9 +1,6 @@
|
|||||||
name: Check Dependencies
|
name: Check Dependencies
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
|||||||
144
.github/workflows/develop.yml
vendored
144
.github/workflows/develop.yml
vendored
@@ -1,4 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 15 * * 1-5"
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||||
@@ -43,7 +45,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-2022, ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -57,6 +59,8 @@ jobs:
|
|||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Clippy` job
|
# Shares with `Clippy` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
@@ -67,11 +71,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "check-toml"
|
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||||
- name: Run taplo
|
- name: Run taplo
|
||||||
@@ -94,13 +93,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-binaries"
|
shared-key: "build-binaries"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime binaries
|
- name: Build greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||||
- name: Pack greptime binaries
|
- name: Pack greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -142,11 +143,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -200,11 +196,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -255,13 +246,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-greptime-ci"
|
shared-key: "build-greptime-ci"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime bianry
|
- name: Build greptime bianry
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc --profile ci -- --bin greptime
|
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||||
- name: Pack greptime binary
|
- name: Pack greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -317,11 +310,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -466,11 +454,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -573,13 +556,16 @@ jobs:
|
|||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
kafka: true
|
kafka: true
|
||||||
|
- name: "Pg Kvbackend"
|
||||||
|
opts: "--setup-pg"
|
||||||
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup kafka server
|
name: Setup kafka server
|
||||||
working-directory: tests-integration/fixtures/kafka
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose up -d --wait kafka
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -609,11 +595,6 @@ jobs:
|
|||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "check-rust-fmt"
|
|
||||||
- name: Check format
|
- name: Check format
|
||||||
run: make fmt-check
|
run: make fmt-check
|
||||||
|
|
||||||
@@ -635,55 +616,100 @@ jobs:
|
|||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Check` job
|
# Shares with `Check` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
coverage:
|
conflict-check:
|
||||||
if: github.event.pull_request.draft == false
|
name: Check for conflict
|
||||||
runs-on: ubuntu-20.04-8-cores
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Merge Conflict Finder
|
||||||
|
uses: olivernybroe/action-conflict-finder@v4.0
|
||||||
|
|
||||||
|
test:
|
||||||
|
if: github.event_name != 'merge_group'
|
||||||
|
runs-on: ubuntu-22.04-arm
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
needs: [clippy, fmt]
|
needs: [conflict-check, clippy, fmt]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: rui314/setup-mold@v1
|
||||||
with:
|
|
||||||
version: "14.0"
|
|
||||||
- name: Install toolchain
|
- name: Install toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
components: llvm-tools-preview
|
cache: false
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares cross multiple jobs
|
# Shares cross multiple jobs
|
||||||
shared-key: "coverage-test"
|
shared-key: "coverage-test"
|
||||||
- name: Docker Cache
|
cache-all-crates: "true"
|
||||||
uses: ScribeMD/docker-cache@0.3.7
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
- name: Install latest nextest release
|
||||||
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Setup external services
|
||||||
|
working-directory: tests-integration/fixtures
|
||||||
|
run: docker compose up -d --wait
|
||||||
|
- name: Run nextest cases
|
||||||
|
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
RUST_MIN_STACK: 8388608 # 8MB
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
GT_MINIO_BUCKET: greptime
|
||||||
|
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||||
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
|
GT_MINIO_REGION: us-west-2
|
||||||
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
if: github.event_name == 'merge_group'
|
||||||
|
runs-on: ubuntu-20.04-8-cores
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
key: docker-${{ runner.os }}-coverage
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: rui314/setup-mold@v1
|
||||||
|
- name: Install toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
components: llvm-tools
|
||||||
|
cache: false
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares cross multiple jobs
|
||||||
|
shared-key: "coverage-test"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Setup etcd server
|
- name: Setup external services
|
||||||
working-directory: tests-integration/fixtures/etcd
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose up -d --wait
|
||||||
- name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures/kafka
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Setup minio
|
|
||||||
working-directory: tests-integration/fixtures/minio
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Setup postgres server
|
|
||||||
working-directory: tests-integration/fixtures/postgres
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
|||||||
5
.github/workflows/docs.yml
vendored
5
.github/workflows/docs.yml
vendored
@@ -66,6 +66,11 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|||||||
7
.github/workflows/nightly-ci.yml
vendored
7
.github/workflows/nightly-ci.yml
vendored
@@ -109,14 +109,15 @@ jobs:
|
|||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
cleanbuild-linux-nix:
|
cleanbuild-linux-nix:
|
||||||
runs-on: ubuntu-latest-8-cores
|
name: Run clean build on Linux
|
||||||
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: cachix/install-nix-action@v27
|
- uses: cachix/install-nix-action@v27
|
||||||
with:
|
with:
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
nix_path: nixpkgs=channel:nixos-24.11
|
||||||
- run: nix-shell --pure --run "cargo build"
|
- run: nix develop --command cargo build
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
|
|||||||
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@@ -436,6 +436,22 @@ jobs:
|
|||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
bump-doc-version:
|
||||||
|
name: Bump doc version
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
|
needs: [allocate-runners]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Bump doc version
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/bump-doc-version.ts
|
||||||
|
env:
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
|
|||||||
3397
Cargo.lock
generated
3397
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
77
Cargo.toml
77
Cargo.toml
@@ -55,7 +55,6 @@ members = [
|
|||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/script",
|
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
@@ -79,8 +78,6 @@ clippy.dbg_macro = "warn"
|
|||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.readonly_write_lock = "allow"
|
clippy.readonly_write_lock = "allow"
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
|
||||||
rust.non_local_definitions = "allow"
|
|
||||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
@@ -91,14 +88,18 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
|||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
arrow = { version = "53.0.0", features = ["prettyprint"] }
|
||||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-flight = "51.0"
|
arrow-flight = "53.0"
|
||||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
|
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] }
|
||||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
arrow-schema = { version = "53.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
axum = { version = "0.6", features = ["headers"] }
|
# Remember to update axum-extra, axum-macros when updating axum
|
||||||
|
axum = "0.8"
|
||||||
|
axum-extra = "0.10"
|
||||||
|
axum-macros = "0.4"
|
||||||
|
backon = "1"
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
@@ -109,29 +110,36 @@ clap = { version = "4.4", features = ["derive"] }
|
|||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
|
deadpool = "0.10"
|
||||||
|
deadpool-postgres = "0.12"
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
etcd-client = "0.13"
|
etcd-client = "0.14"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
|
# branch: poc-write-path
|
||||||
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1915576b113a494f5352fd61f211d899b7f87aab" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "0.2"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
|
hyper = "1.1"
|
||||||
|
hyper-util = "0.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
|
local-ip-address = "0.6"
|
||||||
|
loki-api = { git = "https://github.com/shuiyisong/tracing-loki", branch = "chore/prost_version" }
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||||
mockall = "0.11.4"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
@@ -139,7 +147,7 @@ nalgebra = "0.33"
|
|||||||
notify = "6.1"
|
notify = "6.1"
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.5", features = [
|
opentelemetry-proto = { version = "0.27", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
@@ -147,12 +155,12 @@ opentelemetry-proto = { version = "0.5", features = [
|
|||||||
"logs",
|
"logs",
|
||||||
] }
|
] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.4.3", features = ["ser"] }
|
promql-parser = { version = "0.4.3", features = ["ser"] }
|
||||||
prost = "0.12"
|
prost = "0.13"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
ratelimit = "0.9"
|
ratelimit = "0.9"
|
||||||
@@ -171,28 +179,30 @@ rstest = "0.21"
|
|||||||
rstest_reuse = "0.7"
|
rstest_reuse = "0.7"
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
rustc-hash = "2.0"
|
rustc-hash = "2.0"
|
||||||
|
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
shadow-rs = "0.35"
|
shadow-rs = "0.38"
|
||||||
similar-asserts = "1.6.0"
|
similar-asserts = "1.6.0"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.52.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
"serde",
|
"serde",
|
||||||
] }
|
] } # on branch v0.44.x
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.40", features = ["full"] }
|
tokio = { version = "1.40", features = ["full"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-postgres = "0.7"
|
||||||
|
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||||
tower = "0.4"
|
tower = "0.5"
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
typetag = "0.2"
|
typetag = "0.2"
|
||||||
@@ -254,7 +264,6 @@ plugins = { path = "src/plugins" }
|
|||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
puffin = { path = "src/puffin" }
|
puffin = { path = "src/puffin" }
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
script = { path = "src/script" }
|
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
@@ -264,9 +273,9 @@ table = { path = "src/table" }
|
|||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
|
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||||
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
|
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||||
|
|||||||
7
Makefile
7
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
|
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -165,15 +165,14 @@ nextest: ## Install nextest tools.
|
|||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness ${SQLNESS_OPTS}
|
cargo sqlness ${SQLNESS_OPTS}
|
||||||
|
|
||||||
# Run fuzz test ${FUZZ_TARGET}.
|
|
||||||
RUNS ?= 1
|
RUNS ?= 1
|
||||||
FUZZ_TARGET ?= fuzz_alter_table
|
FUZZ_TARGET ?= fuzz_alter_table
|
||||||
.PHONY: fuzz
|
.PHONY: fuzz
|
||||||
fuzz:
|
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
||||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||||
|
|
||||||
.PHONY: fuzz-ls
|
.PHONY: fuzz-ls
|
||||||
fuzz-ls:
|
fuzz-ls: ## List all fuzz targets.
|
||||||
cargo fuzz list --fuzz-dir tests-fuzz
|
cargo fuzz list --fuzz-dir tests-fuzz
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
@@ -138,7 +138,8 @@ Check the prerequisite:
|
|||||||
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||||
|
* Python toolchain (optional): Required only if using some test scripts.
|
||||||
|
|
||||||
Build GreptimeDB binary:
|
Build GreptimeDB binary:
|
||||||
|
|
||||||
@@ -228,4 +229,3 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
|
|||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
|
||||||
|
|||||||
@@ -26,6 +26,8 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
@@ -91,10 +93,12 @@
|
|||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
@@ -132,10 +136,10 @@
|
|||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
@@ -143,15 +147,15 @@
|
|||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -168,6 +172,8 @@
|
|||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -212,9 +218,11 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
@@ -293,9 +301,11 @@
|
|||||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||||
|
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
|
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
@@ -323,7 +333,7 @@
|
|||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||||
@@ -378,7 +388,7 @@
|
|||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
@@ -466,10 +476,10 @@
|
|||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
@@ -477,15 +487,15 @@
|
|||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -502,6 +512,8 @@
|
|||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -534,12 +546,18 @@
|
|||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ body_limit = "64MB"
|
|||||||
addr = "127.0.0.1:3001"
|
addr = "127.0.0.1:3001"
|
||||||
## The hostname advertised to the metasrv,
|
## The hostname advertised to the metasrv,
|
||||||
## and used for connections from outside the host
|
## and used for connections from outside the host
|
||||||
hostname = "127.0.0.1"
|
hostname = "127.0.0.1:3001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
## The maximum receive message size for gRPC server.
|
## The maximum receive message size for gRPC server.
|
||||||
@@ -475,18 +475,18 @@ auto_flush_interval = "1h"
|
|||||||
## @toml2docs:none-default="Auto"
|
## @toml2docs:none-default="Auto"
|
||||||
#+ selector_result_cache_size = "512MB"
|
#+ selector_result_cache_size = "512MB"
|
||||||
|
|
||||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||||
enable_experimental_write_cache = false
|
enable_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}`.
|
## File system path for write cache, defaults to `{data_home}`.
|
||||||
experimental_write_cache_path = ""
|
write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
experimental_write_cache_size = "5GiB"
|
write_cache_size = "5GiB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
experimental_write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
@@ -516,6 +516,15 @@ aux_path = ""
|
|||||||
## The max capacity of the staging directory.
|
## The max capacity of the staging directory.
|
||||||
staging_size = "2GB"
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
@@ -543,15 +552,6 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "8MiB"
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
@@ -622,6 +622,12 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
## Enable the file engine.
|
## Enable the file engine.
|
||||||
[region_engine.file]
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
|||||||
@@ -5,6 +5,12 @@ mode = "distributed"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
node_id = 14
|
node_id = 14
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
@@ -19,6 +25,16 @@ max_recv_message_size = "512MB"
|
|||||||
## The maximum send message size for gRPC server.
|
## The maximum send message size for gRPC server.
|
||||||
max_send_message_size = "512MB"
|
max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
|
[http]
|
||||||
|
## The address to bind the HTTP server.
|
||||||
|
addr = "127.0.0.1:4000"
|
||||||
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
|
timeout = "30s"
|
||||||
|
## HTTP request body limit.
|
||||||
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
|
body_limit = "64MB"
|
||||||
|
|
||||||
## The metasrv client options.
|
## The metasrv client options.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
|
|||||||
@@ -31,6 +31,12 @@ timeout = "30s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
@@ -38,7 +44,7 @@ body_limit = "64MB"
|
|||||||
addr = "127.0.0.1:4001"
|
addr = "127.0.0.1:4001"
|
||||||
## The hostname advertised to the metasrv,
|
## The hostname advertised to the metasrv,
|
||||||
## and used for connections from outside the host
|
## and used for connections from outside the host
|
||||||
hostname = "127.0.0.1"
|
hostname = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
|
|||||||
@@ -8,13 +8,29 @@ bind_addr = "127.0.0.1:3002"
|
|||||||
server_addr = "127.0.0.1:3002"
|
server_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## Store server address default to etcd store.
|
## Store server address default to etcd store.
|
||||||
|
## For postgres store, the format is:
|
||||||
|
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||||
|
## For etcd store, the format is:
|
||||||
|
## "127.0.0.1:2379"
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
store_key_prefix = ""
|
store_key_prefix = ""
|
||||||
|
|
||||||
## The datastore for meta server.
|
## The datastore for meta server.
|
||||||
backend = "EtcdStore"
|
## Available values:
|
||||||
|
## - `etcd_store` (default value)
|
||||||
|
## - `memory_store`
|
||||||
|
## - `postgres_store`
|
||||||
|
backend = "etcd_store"
|
||||||
|
|
||||||
|
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||||
|
## **Only used when backend is `postgres_store`.**
|
||||||
|
meta_table_name = "greptime_metakv"
|
||||||
|
|
||||||
|
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||||
|
## Only used when backend is `postgres_store`.
|
||||||
|
meta_election_lock_id = 1
|
||||||
|
|
||||||
## Datanode selector type.
|
## Datanode selector type.
|
||||||
## - `round_robin` (default value)
|
## - `round_robin` (default value)
|
||||||
@@ -113,6 +129,8 @@ num_topics = 64
|
|||||||
selector_type = "round_robin"
|
selector_type = "round_robin"
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
## Only accepts strings that match the following regular expression pattern:
|
||||||
|
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,12 @@ timeout = "30s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
@@ -284,6 +290,12 @@ max_retry_times = 3
|
|||||||
## Initial retry delay of procedures, increases exponentially
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
@@ -337,7 +349,7 @@ data_home = "/tmp/greptimedb/"
|
|||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ cache_path = ""
|
#+ cache_path = ""
|
||||||
|
|
||||||
@@ -518,18 +530,18 @@ auto_flush_interval = "1h"
|
|||||||
## @toml2docs:none-default="Auto"
|
## @toml2docs:none-default="Auto"
|
||||||
#+ selector_result_cache_size = "512MB"
|
#+ selector_result_cache_size = "512MB"
|
||||||
|
|
||||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||||
enable_experimental_write_cache = false
|
enable_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
## File system path for write cache, defaults to `{data_home}`.
|
||||||
experimental_write_cache_path = ""
|
write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
experimental_write_cache_size = "5GiB"
|
write_cache_size = "5GiB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
experimental_write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
@@ -559,6 +571,15 @@ aux_path = ""
|
|||||||
## The max capacity of the staging directory.
|
## The max capacity of the staging directory.
|
||||||
staging_size = "2GB"
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
@@ -586,15 +607,6 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "8MiB"
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
@@ -665,6 +677,12 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
## Enable the file engine.
|
## Enable the file engine.
|
||||||
[region_engine.file]
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
|||||||
75
cyborg/bin/bump-doc-version.ts
Normal file
75
cyborg/bin/bump-doc-version.ts
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import {obtainClient} from "@/common";
|
||||||
|
|
||||||
|
async function triggerWorkflow(workflowId: string, version: string) {
|
||||||
|
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||||
|
try {
|
||||||
|
await docsClient.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: "GreptimeTeam",
|
||||||
|
repo: "docs",
|
||||||
|
workflow_id: workflowId,
|
||||||
|
ref: "main",
|
||||||
|
inputs: {
|
||||||
|
version,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function determineWorkflow(version: string): [string, string] {
|
||||||
|
// Check if it's a nightly version
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
return ['bump-nightly-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const parts = version.split('.');
|
||||||
|
|
||||||
|
if (parts.length !== 3) {
|
||||||
|
throw new Error('Invalid version format');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If patch version (last number) is 0, it's a major version
|
||||||
|
// Return only major.minor version
|
||||||
|
if (parts[2] === '0') {
|
||||||
|
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise it's a patch version, use full version
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const version = process.env.VERSION;
|
||||||
|
if (!version) {
|
||||||
|
core.setFailed("VERSION environment variable is required");
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove 'v' prefix if exists
|
||||||
|
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||||
|
triggerWorkflow(workflowId, apiVersion);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Error processing version: ${error.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
|
|||||||
@@ -7,10 +7,8 @@ ARG OUTPUT_DIR
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
|
|||||||
@@ -9,16 +9,20 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
|
|||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
|
unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3 \
|
|
||||||
python3-dev \
|
# Install protoc
|
||||||
python3-pip \
|
ARG PROTOBUF_VERSION=29.3
|
||||||
&& pip3 install --upgrade pip \
|
|
||||||
&& pip3 install pyarrow
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Trust workdir
|
# Trust workdir
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|||||||
@@ -12,18 +12,21 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel \
|
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
ARG PROTOBUF_VERSION=29.3
|
||||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|
||||||
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Install Rust toolchains.
|
# Install Rust toolchains.
|
||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
|
|||||||
@@ -6,11 +6,8 @@ ARG DOCKER_BUILD_ROOT=.
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
@@ -20,39 +17,24 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3.10 \
|
|
||||||
python3.10-dev
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN echo "target platform: $TARGETPLATFORM"
|
RUN echo "target platform: $TARGETPLATFORM"
|
||||||
|
|
||||||
|
ARG PROTOBUF_VERSION=29.3
|
||||||
|
|
||||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
||||||
unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
|
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
||||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
||||||
fi
|
fi
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
|
||||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
|
||||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
|
||||||
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
|
|
||||||
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
|
|
||||||
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
|
|
||||||
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
|
|
||||||
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
|
|
||||||
|
|
||||||
# Remove Python 3.8 and install pip.
|
|
||||||
RUN apt-get -y purge python3.8 && \
|
|
||||||
apt-get -y autoremove && \
|
|
||||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
|
||||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
|
||||||
|
|
||||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||||
@@ -65,10 +47,6 @@ RUN apt-get -y purge python3.8 && \
|
|||||||
# it can be a different user that have prepared the submodules.
|
# it can be a different user that have prepared the submodules.
|
||||||
RUN git config --global --add safe.directory '*'
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
# Install Python dependencies.
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
pkg-config
|
pkg-config
|
||||||
|
|
||||||
# Install protoc.
|
# Install protoc.
|
||||||
ENV PROTOC_VERSION=25.1
|
ENV PROTOC_VERSION=29.3
|
||||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
|||||||
@@ -39,14 +39,16 @@ services:
|
|||||||
container_name: metasrv
|
container_name: metasrv
|
||||||
ports:
|
ports:
|
||||||
- 3002:3002
|
- 3002:3002
|
||||||
|
- 3000:3000
|
||||||
command:
|
command:
|
||||||
- metasrv
|
- metasrv
|
||||||
- start
|
- start
|
||||||
- --bind-addr=0.0.0.0:3002
|
- --bind-addr=0.0.0.0:3002
|
||||||
- --server-addr=metasrv:3002
|
- --server-addr=metasrv:3002
|
||||||
- --store-addrs=etcd0:2379
|
- --store-addrs=etcd0:2379
|
||||||
|
- --http-addr=0.0.0.0:3000
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 5
|
retries: 5
|
||||||
@@ -73,10 +75,10 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 5
|
retries: 10
|
||||||
depends_on:
|
depends_on:
|
||||||
metasrv:
|
metasrv:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@@ -115,6 +117,7 @@ services:
|
|||||||
container_name: flownode0
|
container_name: flownode0
|
||||||
ports:
|
ports:
|
||||||
- 4004:4004
|
- 4004:4004
|
||||||
|
- 4005:4005
|
||||||
command:
|
command:
|
||||||
- flownode
|
- flownode
|
||||||
- start
|
- start
|
||||||
@@ -122,9 +125,15 @@ services:
|
|||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --rpc-addr=0.0.0.0:4004
|
- --rpc-addr=0.0.0.0:4004
|
||||||
- --rpc-hostname=flownode0:4004
|
- --rpc-hostname=flownode0:4004
|
||||||
|
- --http-addr=0.0.0.0:4005
|
||||||
depends_on:
|
depends_on:
|
||||||
frontend0:
|
frontend0:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
networks:
|
networks:
|
||||||
- greptimedb
|
- greptimedb
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
numpy>=1.24.2
|
|
||||||
pandas>=1.5.3
|
|
||||||
pyarrow>=11.0.0
|
|
||||||
requests>=2.28.2
|
|
||||||
scipy>=1.10.1
|
|
||||||
100
flake.lock
generated
Normal file
100
flake.lock
generated
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737613896,
|
||||||
|
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "303a062fdd8e89f233db05868468975d17855d80",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1731533236,
|
||||||
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737569578,
|
||||||
|
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-24.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"fenix": "fenix",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737581772,
|
||||||
|
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
56
flake.nix
Normal file
56
flake.nix
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{
|
||||||
|
description = "Development environment flake";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, fenix, flake-utils }:
|
||||||
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
libgit2
|
||||||
|
libz
|
||||||
|
];
|
||||||
|
lib = nixpkgs.lib;
|
||||||
|
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||||
|
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||||
|
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
pkg-config
|
||||||
|
git
|
||||||
|
clang
|
||||||
|
gcc
|
||||||
|
protobuf
|
||||||
|
gnumake
|
||||||
|
mold
|
||||||
|
(rustToolchain.withComponents [
|
||||||
|
"cargo"
|
||||||
|
"clippy"
|
||||||
|
"rust-src"
|
||||||
|
"rustc"
|
||||||
|
"rustfmt"
|
||||||
|
"rust-analyzer"
|
||||||
|
"llvm-tools"
|
||||||
|
])
|
||||||
|
cargo-nextest
|
||||||
|
cargo-llvm-cov
|
||||||
|
taplo
|
||||||
|
curl
|
||||||
|
gnuplot ## for cargo bench
|
||||||
|
];
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-10-19"
|
channel = "nightly-2024-12-25"
|
||||||
components = ["rust-analyzer"]
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
from multiprocessing import Pool
|
||||||
|
|
||||||
|
|
||||||
def find_rust_files(directory):
|
def find_rust_files(directory):
|
||||||
@@ -33,13 +34,11 @@ def extract_branch_names(file_content):
|
|||||||
return pattern.findall(file_content)
|
return pattern.findall(file_content)
|
||||||
|
|
||||||
|
|
||||||
def check_snafu_in_files(branch_name, rust_files):
|
def check_snafu_in_files(branch_name, rust_files_content):
|
||||||
branch_name_snafu = f"{branch_name}Snafu"
|
branch_name_snafu = f"{branch_name}Snafu"
|
||||||
for rust_file in rust_files:
|
for content in rust_files_content.values():
|
||||||
with open(rust_file, "r") as file:
|
if branch_name_snafu in content:
|
||||||
content = file.read()
|
return True
|
||||||
if branch_name_snafu in content:
|
|
||||||
return True
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@@ -49,21 +48,24 @@ def main():
|
|||||||
|
|
||||||
for error_file in error_files:
|
for error_file in error_files:
|
||||||
with open(error_file, "r") as file:
|
with open(error_file, "r") as file:
|
||||||
content = file.read()
|
branch_names.extend(extract_branch_names(file.read()))
|
||||||
branch_names.extend(extract_branch_names(content))
|
|
||||||
|
|
||||||
unused_snafu = [
|
# Read all rust files into memory once
|
||||||
branch_name
|
rust_files_content = {}
|
||||||
for branch_name in branch_names
|
for rust_file in other_rust_files:
|
||||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
with open(rust_file, "r") as file:
|
||||||
]
|
rust_files_content[rust_file] = file.read()
|
||||||
|
|
||||||
|
with Pool() as pool:
|
||||||
|
results = pool.starmap(
|
||||||
|
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
|
||||||
|
)
|
||||||
|
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
|
||||||
|
|
||||||
if unused_snafu:
|
if unused_snafu:
|
||||||
print("Unused error variants:")
|
print("Unused error variants:")
|
||||||
for name in unused_snafu:
|
for name in unused_snafu:
|
||||||
print(name)
|
print(name)
|
||||||
|
|
||||||
if unused_snafu:
|
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
27
shell.nix
27
shell.nix
@@ -1,27 +0,0 @@
|
|||||||
let
|
|
||||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
|
|
||||||
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
|
||||||
pkgs = import nixpkgs { config = {}; overlays = []; };
|
|
||||||
in
|
|
||||||
|
|
||||||
pkgs.mkShell rec {
|
|
||||||
nativeBuildInputs = with pkgs; [
|
|
||||||
pkg-config
|
|
||||||
git
|
|
||||||
clang
|
|
||||||
gcc
|
|
||||||
protobuf
|
|
||||||
mold
|
|
||||||
(fenix.fromToolchainFile {
|
|
||||||
dir = ./.;
|
|
||||||
})
|
|
||||||
cargo-nextest
|
|
||||||
taplo
|
|
||||||
];
|
|
||||||
|
|
||||||
buildInputs = with pkgs; [
|
|
||||||
libgit2
|
|
||||||
];
|
|
||||||
|
|
||||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
|
||||||
}
|
|
||||||
@@ -33,7 +33,7 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: prost::DecodeError,
|
error: prost::UnknownEnumValue,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ impl ColumnDataTypeWrapper {
|
|||||||
|
|
||||||
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
(self.datatype, self.datatype_ext.clone())
|
(self.datatype, self.datatype_ext)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -685,14 +685,18 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
||||||
values.interval_year_month_values,
|
values.interval_year_month_values,
|
||||||
)),
|
)),
|
||||||
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
|
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values(
|
||||||
values.interval_day_time_values,
|
values
|
||||||
|
.interval_day_time_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| IntervalDayTime::from_i64(*x).into()),
|
||||||
)),
|
)),
|
||||||
IntervalType::MonthDayNano(_) => {
|
IntervalType::MonthDayNano(_) => {
|
||||||
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
||||||
values.interval_month_day_nano_values.iter().map(|x| {
|
values
|
||||||
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
|
.interval_month_day_nano_values
|
||||||
}),
|
.iter()
|
||||||
|
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -1495,14 +1499,22 @@ mod tests {
|
|||||||
column.values.as_ref().unwrap().interval_year_month_values
|
column.values.as_ref().unwrap().interval_year_month_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
|
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![
|
||||||
|
IntervalDayTime::new(0, 4).into(),
|
||||||
|
IntervalDayTime::new(0, 5).into(),
|
||||||
|
IntervalDayTime::new(0, 6).into(),
|
||||||
|
]));
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vec![4, 5, 6],
|
vec![4, 5, 6],
|
||||||
column.values.as_ref().unwrap().interval_day_time_values
|
column.values.as_ref().unwrap().interval_day_time_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
|
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![
|
||||||
|
IntervalMonthDayNano::new(0, 0, 7).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 8).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 9).into(),
|
||||||
|
]));
|
||||||
let len = vector.len();
|
let len = vector.len();
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
(0..len).for_each(|i| {
|
(0..len).for_each(|i| {
|
||||||
|
|||||||
@@ -34,10 +34,8 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
|||||||
|
|
||||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type = ColumnDataTypeWrapper::try_new(
|
let data_type =
|
||||||
column_def.data_type,
|
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
|
||||||
column_def.datatype_extension.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
@@ -57,13 +55,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
}
|
}
|
||||||
if let Some(options) = column_def.options.as_ref() {
|
if let Some(options) = column_def.options.as_ref() {
|
||||||
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
||||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
|
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,7 +80,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||||
options
|
options
|
||||||
.options
|
.options
|
||||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
|
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
||||||
options
|
options
|
||||||
@@ -102,7 +100,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||||
options
|
options
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||||
@@ -181,14 +179,14 @@ mod tests {
|
|||||||
let options = options_from_column_schema(&schema);
|
let options = options_from_column_schema(&schema);
|
||||||
assert!(options.is_none());
|
assert!(options.is_none());
|
||||||
|
|
||||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||||
.with_fulltext_options(FulltextOptions {
|
.with_fulltext_options(FulltextOptions {
|
||||||
enable: true,
|
enable: true,
|
||||||
analyzer: FulltextAnalyzer::English,
|
analyzer: FulltextAnalyzer::English,
|
||||||
case_sensitive: false,
|
case_sensitive: false,
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap();
|
||||||
.set_inverted_index(true);
|
schema.set_inverted_index(true);
|
||||||
let options = options_from_column_schema(&schema).unwrap();
|
let options = options_from_column_schema(&schema).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
|
|||||||
@@ -122,13 +122,6 @@ pub enum Error {
|
|||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
|
||||||
CompileScriptInternal {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: BoxedError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||||
CreateTable {
|
CreateTable {
|
||||||
table_info: String,
|
table_info: String,
|
||||||
@@ -343,9 +336,7 @@ impl ErrorExt for Error {
|
|||||||
Error::DecodePlan { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. } => source.status_code(),
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
Error::Internal { source, .. } => source.status_code(),
|
||||||
source.status_code()
|
|
||||||
}
|
|
||||||
|
|
||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||||
|
|||||||
@@ -303,7 +303,7 @@ impl KvBackend for CachedKvBackend {
|
|||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |v| !self.validate_version(*v))
|
.is_some_and(|v| !self.validate_version(*v))
|
||||||
{
|
{
|
||||||
self.cache.invalidate(key).await;
|
self.cache.invalidate(key).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ pub mod information_schema {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub mod table_source;
|
pub mod table_source;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait CatalogManager: Send + Sync {
|
pub trait CatalogManager: Send + Sync {
|
||||||
fn as_any(&self) -> &dyn Any;
|
fn as_any(&self) -> &dyn Any;
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `uptime`: the uptime of the peer.
|
/// - `uptime`: the uptime of the peer.
|
||||||
/// - `active_time`: the time since the last activity of the peer.
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
///
|
///
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaClusterInfo {
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ use crate::error::{
|
|||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaColumns {
|
pub(super) struct InformationSchemaColumns {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
|
|||||||
pub const OPTIONS: &str = "options";
|
pub const OPTIONS: &str = "options";
|
||||||
|
|
||||||
/// The `information_schema.flows` to provides information about flows in databases.
|
/// The `information_schema.flows` to provides information about flows in databases.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaFlows {
|
pub(super) struct InformationSchemaFlows {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -58,8 +58,11 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
|||||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||||
/// Fulltext index constraint name
|
/// Fulltext index constraint name
|
||||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||||
|
/// Skipping index constraint name
|
||||||
|
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
|
||||||
|
|
||||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -225,6 +228,12 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
let keys = &table_info.meta.primary_key_indices;
|
let keys = &table_info.meta.primary_key_indices;
|
||||||
let schema = table.schema();
|
let schema = table.schema();
|
||||||
|
|
||||||
|
// For compatibility, use primary key columns as inverted index columns.
|
||||||
|
let pk_as_inverted_index = !schema
|
||||||
|
.column_schemas()
|
||||||
|
.iter()
|
||||||
|
.any(|c| c.has_inverted_index_key());
|
||||||
|
|
||||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||||
let mut constraints = vec![];
|
let mut constraints = vec![];
|
||||||
if column.is_time_index() {
|
if column.is_time_index() {
|
||||||
@@ -242,14 +251,20 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
// TODO(dimbtp): foreign key constraint not supported yet
|
// TODO(dimbtp): foreign key constraint not supported yet
|
||||||
if keys.contains(&idx) {
|
if keys.contains(&idx) {
|
||||||
constraints.push(PRI_CONSTRAINT_NAME);
|
constraints.push(PRI_CONSTRAINT_NAME);
|
||||||
|
|
||||||
|
if pk_as_inverted_index {
|
||||||
|
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if column.is_inverted_indexed() {
|
if column.is_inverted_indexed() {
|
||||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||||
}
|
}
|
||||||
|
if column.is_fulltext_indexed() {
|
||||||
if column.has_fulltext_index_key() {
|
|
||||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||||
}
|
}
|
||||||
|
if column.is_skipping_indexed() {
|
||||||
|
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
if !constraints.is_empty() {
|
if !constraints.is_empty() {
|
||||||
let aggregated_constraints = constraints.join(", ");
|
let aggregated_constraints = constraints.join(", ");
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// The `PARTITIONS` table provides information about partitioned tables.
|
/// The `PARTITIONS` table provides information about partitioned tables.
|
||||||
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
||||||
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaPartitions {
|
pub(super) struct InformationSchemaPartitions {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `end_time`: the ending execution time of the procedure.
|
/// - `end_time`: the ending execution time of the procedure.
|
||||||
/// - `status`: the status of the procedure.
|
/// - `status`: the status of the procedure.
|
||||||
/// - `lock_keys`: the lock keys of the procedure.
|
/// - `lock_keys`: the lock keys of the procedure.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaProcedureInfo {
|
pub(super) struct InformationSchemaProcedureInfo {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `is_leader`: whether the peer is the leader
|
/// - `is_leader`: whether the peer is the leader
|
||||||
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||||
/// - `down_seconds`: the duration of being offline, in seconds.
|
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaRegionPeers {
|
pub(super) struct InformationSchemaRegionPeers {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `index_size`: The sst index files size in bytes.
|
/// - `index_size`: The sst index files size in bytes.
|
||||||
/// - `engine`: The engine type.
|
/// - `engine`: The engine type.
|
||||||
/// - `region_role`: The region role.
|
/// - `region_role`: The region role.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaRegionStatistics {
|
pub(super) struct InformationSchemaRegionStatistics {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ use store_api::storage::{ScanRequest, TableId};
|
|||||||
use super::{InformationTable, RUNTIME_METRICS};
|
use super::{InformationTable, RUNTIME_METRICS};
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaMetrics {
|
pub(super) struct InformationSchemaMetrics {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ pub const SCHEMA_OPTS: &str = "options";
|
|||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
/// The `information_schema.schemata` table implementation.
|
/// The `information_schema.schemata` table implementation.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaSchemata {
|
pub(super) struct InformationSchemaSchemata {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ use crate::information_schema::Predicates;
|
|||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaTableConstraints {
|
pub(super) struct InformationSchemaTableConstraints {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -71,6 +71,7 @@ const TABLE_ID: &str = "table_id";
|
|||||||
pub const ENGINE: &str = "engine";
|
pub const ENGINE: &str = "engine";
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaTables {
|
pub(super) struct InformationSchemaTables {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
|||||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||||
|
|
||||||
/// The `information_schema.views` to provides information about views in databases.
|
/// The `information_schema.views` to provides information about views in databases.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaViews {
|
pub(super) struct InformationSchemaViews {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ use super::SystemTable;
|
|||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
/// A memory table with specified schema and columns.
|
/// A memory table with specified schema and columns.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) struct MemoryTable {
|
pub(crate) struct MemoryTable {
|
||||||
pub(crate) table_id: TableId,
|
pub(crate) table_id: TableId,
|
||||||
pub(crate) table_name: &'static str,
|
pub(crate) table_name: &'static str,
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
mod pg_catalog_memory_table;
|
mod pg_catalog_memory_table;
|
||||||
mod pg_class;
|
mod pg_class;
|
||||||
|
mod pg_database;
|
||||||
mod pg_namespace;
|
mod pg_namespace;
|
||||||
mod table_names;
|
mod table_names;
|
||||||
|
|
||||||
@@ -26,6 +27,7 @@ use lazy_static::lazy_static;
|
|||||||
use paste::paste;
|
use paste::paste;
|
||||||
use pg_catalog_memory_table::get_schema_columns;
|
use pg_catalog_memory_table::get_schema_columns;
|
||||||
use pg_class::PGClass;
|
use pg_class::PGClass;
|
||||||
|
use pg_database::PGDatabase;
|
||||||
use pg_namespace::PGNamespace;
|
use pg_namespace::PGNamespace;
|
||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
@@ -113,6 +115,10 @@ impl PGCatalogProvider {
|
|||||||
PG_CLASS.to_string(),
|
PG_CLASS.to_string(),
|
||||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||||
);
|
);
|
||||||
|
tables.insert(
|
||||||
|
PG_DATABASE.to_string(),
|
||||||
|
self.build_table(PG_DATABASE).expect(PG_DATABASE),
|
||||||
|
);
|
||||||
self.tables = tables;
|
self.tables = tables;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -135,6 +141,11 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
|||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
self.namespace_oid_map.clone(),
|
self.namespace_oid_map.clone(),
|
||||||
))),
|
))),
|
||||||
|
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.namespace_oid_map.clone(),
|
||||||
|
))),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -100,6 +101,15 @@ impl PGClass {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PGClass {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("PGClass")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SystemTable for PGClass {
|
impl SystemTable for PGClass {
|
||||||
fn table_id(&self) -> table::metadata::TableId {
|
fn table_id(&self) -> table::metadata::TableId {
|
||||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||||
|
|||||||
223
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
223
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::schema::{Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
|
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||||
|
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::Predicates;
|
||||||
|
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||||
|
use crate::system_schema::SystemTable;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
// === column name ===
|
||||||
|
pub const DATNAME: &str = "datname";
|
||||||
|
|
||||||
|
/// The initial capacity of the vector builders.
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `pg_catalog.database` table implementation.
|
||||||
|
pub(super) struct PGDatabase {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
// Workaround to convert schema_name to a numeric id
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for PGDatabase {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("PGDatabase")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PGDatabase {
|
||||||
|
pub(super) fn new(
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
namespace_oid_map,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
u32_column(OID_COLUMN_NAME),
|
||||||
|
string_column(DATNAME),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> PGCDatabaseBuilder {
|
||||||
|
PGCDatabaseBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.namespace_oid_map.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for PGDatabase {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_database(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SystemTable for PGDatabase {
|
||||||
|
fn table_id(&self) -> table::metadata::TableId {
|
||||||
|
PG_CATALOG_PG_DATABASE_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
PG_DATABASE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(
|
||||||
|
&self,
|
||||||
|
request: ScanRequest,
|
||||||
|
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_database(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the `pg_catalog.pg_database` table row by row
|
||||||
|
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||||
|
/// `nspname` is the schema name.
|
||||||
|
struct PGCDatabaseBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
|
||||||
|
oid: UInt32VectorBuilder,
|
||||||
|
datname: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PGCDatabaseBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
namespace_oid_map,
|
||||||
|
|
||||||
|
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
for schema_name in catalog_manager
|
||||||
|
.schema_names(&catalog_name, query_ctx())
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
self.add_database(&predicates, &schema_name);
|
||||||
|
}
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||||
|
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||||
|
let row: [(&str, &Value); 2] = [
|
||||||
|
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||||
|
(DATNAME, &Value::from(schema_name)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.oid.push(Some(oid));
|
||||||
|
self.datname.push(Some(schema_name));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let columns: Vec<VectorRef> =
|
||||||
|
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
pub(super) mod oid_map;
|
pub(super) mod oid_map;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -87,6 +88,15 @@ impl PGNamespace {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PGNamespace {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("PGNamespace")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SystemTable for PGNamespace {
|
impl SystemTable for PGNamespace {
|
||||||
fn schema(&self) -> SchemaRef {
|
fn schema(&self) -> SchemaRef {
|
||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
|
|||||||
@@ -12,7 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub const PG_DATABASE: &str = "pg_databases";
|
// https://www.postgresql.org/docs/current/catalog-pg-database.html
|
||||||
|
pub const PG_DATABASE: &str = "pg_database";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
|
||||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-class.html
|
||||||
pub const PG_CLASS: &str = "pg_class";
|
pub const PG_CLASS: &str = "pg_class";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-type.html
|
||||||
pub const PG_TYPE: &str = "pg_type";
|
pub const PG_TYPE: &str = "pg_type";
|
||||||
|
|||||||
@@ -365,7 +365,7 @@ mod tests {
|
|||||||
Projection: person.id AS a, person.name AS b
|
Projection: person.id AS a, person.name AS b
|
||||||
Filter: person.id > Int32(500)
|
Filter: person.id > Int32(500)
|
||||||
TableScan: person"#,
|
TableScan: person"#,
|
||||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
format!("\n{}", source.get_logical_plan().unwrap())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,12 +15,12 @@
|
|||||||
//! Dummy catalog for region server.
|
//! Dummy catalog for region server.
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use datafusion::catalog::schema::SchemaProvider;
|
use datafusion::catalog::{CatalogProvider, CatalogProviderList, SchemaProvider};
|
||||||
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
|
||||||
use datafusion::datasource::TableProvider;
|
use datafusion::datasource::TableProvider;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
@@ -41,6 +41,12 @@ impl DummyCatalogList {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummyCatalogList {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummyCatalogList").finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl CatalogProviderList for DummyCatalogList {
|
impl CatalogProviderList for DummyCatalogList {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
@@ -91,6 +97,14 @@ impl CatalogProvider for DummyCatalogProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummyCatalogProvider {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummyCatalogProvider")
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A dummy schema provider for [DummyCatalogList].
|
/// A dummy schema provider for [DummyCatalogList].
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct DummySchemaProvider {
|
struct DummySchemaProvider {
|
||||||
@@ -127,3 +141,12 @@ impl SchemaProvider for DummySchemaProvider {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummySchemaProvider {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummySchemaProvider")
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.field("schema_name", &self.schema_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[features]
|
||||||
|
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
@@ -56,7 +59,6 @@ tokio.workspace = true
|
|||||||
tracing-appender.workspace = true
|
tracing-appender.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util.workspace = true
|
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|||||||
@@ -22,6 +22,9 @@ use clap::Parser;
|
|||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
use common_meta::kv_backend::postgres::PgStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
@@ -55,18 +58,34 @@ where
|
|||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct BenchTableMetadataCommand {
|
pub struct BenchTableMetadataCommand {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
etcd_addr: String,
|
etcd_addr: Option<String>,
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
#[clap(long)]
|
||||||
|
postgres_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
count: u32,
|
count: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BenchTableMetadataCommand {
|
impl BenchTableMetadataCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
|
||||||
.await
|
info!("Using etcd as kv backend");
|
||||||
.unwrap();
|
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
|
||||||
|
} else {
|
||||||
|
Arc::new(MemoryKvBackend::new())
|
||||||
|
};
|
||||||
|
|
||||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
|
||||||
|
info!("Using postgres as kv backend");
|
||||||
|
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
} else {
|
||||||
|
kv_backend
|
||||||
|
};
|
||||||
|
|
||||||
|
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||||
|
|
||||||
let tool = BenchTableMetadata {
|
let tool = BenchTableMetadata {
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use std::time::Duration;
|
|||||||
use base64::engine::general_purpose;
|
use base64::engine::general_purpose;
|
||||||
use base64::Engine;
|
use base64::Engine;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
use humantime::format_duration;
|
use humantime::format_duration;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||||
@@ -24,7 +25,9 @@ use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
|||||||
use servers::http::GreptimeQueryOutput;
|
use servers::http::GreptimeQueryOutput;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
|
use crate::error::{
|
||||||
|
BuildClientSnafu, HttpQuerySqlSnafu, ParseProxyOptsSnafu, Result, SerdeJsonSnafu,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct DatabaseClient {
|
pub struct DatabaseClient {
|
||||||
@@ -32,6 +35,23 @@ pub struct DatabaseClient {
|
|||||||
catalog: String,
|
catalog: String,
|
||||||
auth_header: Option<String>,
|
auth_header: Option<String>,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
|
proxy: Option<reqwest::Proxy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_proxy_opts(
|
||||||
|
proxy: Option<String>,
|
||||||
|
no_proxy: bool,
|
||||||
|
) -> std::result::Result<Option<reqwest::Proxy>, BoxedError> {
|
||||||
|
if no_proxy {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
proxy
|
||||||
|
.map(|proxy| {
|
||||||
|
reqwest::Proxy::all(proxy)
|
||||||
|
.context(ParseProxyOptsSnafu)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseClient {
|
impl DatabaseClient {
|
||||||
@@ -40,6 +60,7 @@ impl DatabaseClient {
|
|||||||
catalog: String,
|
catalog: String,
|
||||||
auth_basic: Option<String>,
|
auth_basic: Option<String>,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
|
proxy: Option<reqwest::Proxy>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let auth_header = if let Some(basic) = auth_basic {
|
let auth_header = if let Some(basic) = auth_basic {
|
||||||
let encoded = general_purpose::STANDARD.encode(basic);
|
let encoded = general_purpose::STANDARD.encode(basic);
|
||||||
@@ -48,11 +69,18 @@ impl DatabaseClient {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if let Some(ref proxy) = proxy {
|
||||||
|
common_telemetry::info!("Using proxy: {:?}", proxy);
|
||||||
|
} else {
|
||||||
|
common_telemetry::info!("Using system proxy(if any)");
|
||||||
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
addr,
|
addr,
|
||||||
catalog,
|
catalog,
|
||||||
auth_header,
|
auth_header,
|
||||||
timeout,
|
timeout,
|
||||||
|
proxy,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +95,13 @@ impl DatabaseClient {
|
|||||||
("db", format!("{}-{}", self.catalog, schema)),
|
("db", format!("{}-{}", self.catalog, schema)),
|
||||||
("sql", sql.to_string()),
|
("sql", sql.to_string()),
|
||||||
];
|
];
|
||||||
let mut request = reqwest::Client::new()
|
let client = self
|
||||||
|
.proxy
|
||||||
|
.clone()
|
||||||
|
.map(|proxy| reqwest::Client::builder().proxy(proxy).build())
|
||||||
|
.unwrap_or_else(|| Ok(reqwest::Client::new()))
|
||||||
|
.context(BuildClientSnafu)?;
|
||||||
|
let mut request = client
|
||||||
.post(&url)
|
.post(&url)
|
||||||
.form(¶ms)
|
.form(¶ms)
|
||||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||||
|
|||||||
@@ -86,6 +86,22 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to parse proxy options: {}", error))]
|
||||||
|
ParseProxyOpts {
|
||||||
|
#[snafu(source)]
|
||||||
|
error: reqwest::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build reqwest client: {}", error))]
|
||||||
|
BuildClient {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: reqwest::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||||
InvalidReplCommand { reason: String },
|
InvalidReplCommand { reason: String },
|
||||||
|
|
||||||
@@ -278,7 +294,8 @@ impl ErrorExt for Error {
|
|||||||
| Error::InitTimezone { .. }
|
| Error::InitTimezone { .. }
|
||||||
| Error::ConnectEtcd { .. }
|
| Error::ConnectEtcd { .. }
|
||||||
| Error::CreateDir { .. }
|
| Error::CreateDir { .. }
|
||||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
| Error::EmptyResult { .. }
|
||||||
|
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
@@ -298,7 +315,8 @@ impl ErrorExt for Error {
|
|||||||
Error::SerdeJson { .. }
|
Error::SerdeJson { .. }
|
||||||
| Error::FileIo { .. }
|
| Error::FileIo { .. }
|
||||||
| Error::SpawnThread { .. }
|
| Error::SpawnThread { .. }
|
||||||
| Error::InitTlsProvider { .. } => StatusCode::Unexpected,
|
| Error::InitTlsProvider { .. }
|
||||||
|
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::Other { source, .. } => source.status_code(),
|
Error::Other { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ use tokio::io::{AsyncWriteExt, BufWriter};
|
|||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::database::DatabaseClient;
|
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||||
use crate::{database, Tool};
|
use crate::{database, Tool};
|
||||||
|
|
||||||
@@ -91,19 +91,30 @@ pub struct ExportCommand {
|
|||||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||||
#[clap(long, value_parser = humantime::parse_duration)]
|
#[clap(long, value_parser = humantime::parse_duration)]
|
||||||
timeout: Option<Duration>,
|
timeout: Option<Duration>,
|
||||||
|
|
||||||
|
/// The proxy server address to connect, if set, will override the system proxy.
|
||||||
|
///
|
||||||
|
/// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
|
||||||
|
#[clap(long)]
|
||||||
|
proxy: Option<String>,
|
||||||
|
|
||||||
|
/// Disable proxy server, if set, will not use any proxy.
|
||||||
|
#[clap(long)]
|
||||||
|
no_proxy: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExportCommand {
|
impl ExportCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
let (catalog, schema) =
|
let (catalog, schema) =
|
||||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||||
|
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
|
||||||
let database_client = DatabaseClient::new(
|
let database_client = DatabaseClient::new(
|
||||||
self.addr.clone(),
|
self.addr.clone(),
|
||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
self.auth_basic.clone(),
|
self.auth_basic.clone(),
|
||||||
// Treats `None` as `0s` to disable server-side default timeout.
|
// Treats `None` as `0s` to disable server-side default timeout.
|
||||||
self.timeout.unwrap_or_default(),
|
self.timeout.unwrap_or_default(),
|
||||||
|
proxy,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Box::new(Export {
|
Ok(Box::new(Export {
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::database::DatabaseClient;
|
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||||
use crate::{database, Tool};
|
use crate::{database, Tool};
|
||||||
|
|
||||||
@@ -76,18 +76,30 @@ pub struct ImportCommand {
|
|||||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||||
#[clap(long, value_parser = humantime::parse_duration)]
|
#[clap(long, value_parser = humantime::parse_duration)]
|
||||||
timeout: Option<Duration>,
|
timeout: Option<Duration>,
|
||||||
|
|
||||||
|
/// The proxy server address to connect, if set, will override the system proxy.
|
||||||
|
///
|
||||||
|
/// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
|
||||||
|
#[clap(long)]
|
||||||
|
proxy: Option<String>,
|
||||||
|
|
||||||
|
/// Disable proxy server, if set, will not use any proxy.
|
||||||
|
#[clap(long, default_value = "false")]
|
||||||
|
no_proxy: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ImportCommand {
|
impl ImportCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
let (catalog, schema) =
|
let (catalog, schema) =
|
||||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||||
|
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
|
||||||
let database_client = DatabaseClient::new(
|
let database_client = DatabaseClient::new(
|
||||||
self.addr.clone(),
|
self.addr.clone(),
|
||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
self.auth_basic.clone(),
|
self.auth_basic.clone(),
|
||||||
// Treats `None` as `0s` to disable server-side default timeout.
|
// Treats `None` as `0s` to disable server-side default timeout.
|
||||||
self.timeout.unwrap_or_default(),
|
self.timeout.unwrap_or_default(),
|
||||||
|
proxy,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Box::new(Import {
|
Ok(Box::new(Import {
|
||||||
|
|||||||
@@ -10,9 +10,8 @@ name = "greptime"
|
|||||||
path = "src/bin/greptime.rs"
|
path = "src/bin/greptime.rs"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["python", "servers/pprof", "servers/mem-prof"]
|
default = ["servers/pprof", "servers/mem-prof"]
|
||||||
tokio-console = ["common-telemetry/tokio-console"]
|
tokio-console = ["common-telemetry/tokio-console"]
|
||||||
python = ["frontend/python"]
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
@@ -58,6 +57,7 @@ humantime.workspace = true
|
|||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
meta-srv.workspace = true
|
meta-srv.workspace = true
|
||||||
|
metric-engine.workspace = true
|
||||||
mito2.workspace = true
|
mito2.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
|
|||||||
@@ -51,8 +51,7 @@ impl App for Instance {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
self.start().await.unwrap();
|
self.start().await
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn wait_signal(&self) -> bool {
|
fn wait_signal(&self) -> bool {
|
||||||
|
|||||||
@@ -62,6 +62,11 @@ impl Instance {
|
|||||||
pub fn datanode(&self) -> &Datanode {
|
pub fn datanode(&self) -> &Datanode {
|
||||||
&self.datanode
|
&self.datanode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// allow customizing datanode for downstream projects
|
||||||
|
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||||
|
&mut self.datanode
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -271,7 +276,8 @@ impl StartCommand {
|
|||||||
info!("Datanode options: {:#?}", opts);
|
info!("Datanode options: {:#?}", opts);
|
||||||
|
|
||||||
let plugin_opts = opts.plugins;
|
let plugin_opts = opts.plugins;
|
||||||
let opts = opts.component;
|
let mut opts = opts.component;
|
||||||
|
opts.grpc.detect_hostname();
|
||||||
let mut plugins = Plugins::new();
|
let mut plugins = Plugins::new();
|
||||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
|
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -345,6 +345,13 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build wal options allocator"))]
|
||||||
|
BuildWalOptionsAllocator {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -378,7 +385,8 @@ impl ErrorExt for Error {
|
|||||||
|
|
||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
Error::BuildWalOptionsAllocator { source, .. }
|
||||||
|
| Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||||
StatusCode::Internal
|
StatusCode::Internal
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::information_extension::DistributedInformationExtension;
|
use catalog::information_extension::DistributedInformationExtension;
|
||||||
@@ -66,6 +67,11 @@ impl Instance {
|
|||||||
pub fn flownode(&self) -> &FlownodeInstance {
|
pub fn flownode(&self) -> &FlownodeInstance {
|
||||||
&self.flownode
|
&self.flownode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// allow customizing flownode for downstream projects
|
||||||
|
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||||
|
&mut self.flownode
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -137,6 +143,11 @@ struct StartCommand {
|
|||||||
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
|
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
|
||||||
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
|
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
|
||||||
env_prefix: String,
|
env_prefix: String,
|
||||||
|
#[clap(long)]
|
||||||
|
http_addr: Option<String>,
|
||||||
|
/// HTTP request timeout in seconds.
|
||||||
|
#[clap(long)]
|
||||||
|
http_timeout: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
@@ -193,6 +204,14 @@ impl StartCommand {
|
|||||||
opts.mode = Mode::Distributed;
|
opts.mode = Mode::Distributed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(http_addr) = &self.http_addr {
|
||||||
|
opts.http.addr.clone_from(http_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(http_timeout) = self.http_timeout {
|
||||||
|
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||||
|
}
|
||||||
|
|
||||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||||
return MissingConfigSnafu {
|
return MissingConfigSnafu {
|
||||||
msg: "Missing node id option",
|
msg: "Missing node id option",
|
||||||
@@ -217,7 +236,8 @@ impl StartCommand {
|
|||||||
info!("Flownode start command: {:#?}", self);
|
info!("Flownode start command: {:#?}", self);
|
||||||
info!("Flownode options: {:#?}", opts);
|
info!("Flownode options: {:#?}", opts);
|
||||||
|
|
||||||
let opts = opts.component;
|
let mut opts = opts.component;
|
||||||
|
opts.grpc.detect_hostname();
|
||||||
|
|
||||||
// TODO(discord9): make it not optionale after cluster id is required
|
// TODO(discord9): make it not optionale after cluster id is required
|
||||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||||
|
|||||||
@@ -268,7 +268,8 @@ impl StartCommand {
|
|||||||
info!("Frontend options: {:#?}", opts);
|
info!("Frontend options: {:#?}", opts);
|
||||||
|
|
||||||
let plugin_opts = opts.plugins;
|
let plugin_opts = opts.plugins;
|
||||||
let opts = opts.component;
|
let mut opts = opts.component;
|
||||||
|
opts.grpc.detect_hostname();
|
||||||
let mut plugins = Plugins::new();
|
let mut plugins = Plugins::new();
|
||||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -249,8 +249,6 @@ impl StartCommand {
|
|||||||
|
|
||||||
if let Some(backend) = &self.backend {
|
if let Some(backend) = &self.backend {
|
||||||
opts.backend.clone_from(backend);
|
opts.backend.clone_from(backend);
|
||||||
} else {
|
|
||||||
opts.backend = BackendImpl::default()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable dashboard in metasrv.
|
// Disable dashboard in metasrv.
|
||||||
@@ -274,7 +272,8 @@ impl StartCommand {
|
|||||||
info!("Metasrv options: {:#?}", opts);
|
info!("Metasrv options: {:#?}", opts);
|
||||||
|
|
||||||
let plugin_opts = opts.plugins;
|
let plugin_opts = opts.plugins;
|
||||||
let opts = opts.component;
|
let mut opts = opts.component;
|
||||||
|
opts.detect_server_addr();
|
||||||
let mut plugins = Plugins::new();
|
let mut plugins = Plugins::new();
|
||||||
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
|
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ use common_meta::node_manager::NodeManagerRef;
|
|||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||||
use common_meta::sequence::SequenceBuilder;
|
use common_meta::sequence::SequenceBuilder;
|
||||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||||
@@ -54,7 +54,7 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
|
|||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use datanode::region_server::RegionServer;
|
use datanode::region_server::RegionServer;
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
|
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||||
@@ -76,10 +76,10 @@ use tokio::sync::{broadcast, RwLock};
|
|||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
|
BuildCacheRegistrySnafu, BuildWalOptionsAllocatorSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
||||||
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu, Result,
|
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu,
|
||||||
ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
|
Result, ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu,
|
||||||
StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
StartDatanodeSnafu, StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||||
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
@@ -145,6 +145,7 @@ pub struct StandaloneOptions {
|
|||||||
pub storage: StorageConfig,
|
pub storage: StorageConfig,
|
||||||
pub metadata_store: KvBackendConfig,
|
pub metadata_store: KvBackendConfig,
|
||||||
pub procedure: ProcedureConfig,
|
pub procedure: ProcedureConfig,
|
||||||
|
pub flow: FlowConfig,
|
||||||
pub logging: LoggingOptions,
|
pub logging: LoggingOptions,
|
||||||
pub user_provider: Option<String>,
|
pub user_provider: Option<String>,
|
||||||
/// Options for different store engines.
|
/// Options for different store engines.
|
||||||
@@ -173,6 +174,7 @@ impl Default for StandaloneOptions {
|
|||||||
storage: StorageConfig::default(),
|
storage: StorageConfig::default(),
|
||||||
metadata_store: KvBackendConfig::default(),
|
metadata_store: KvBackendConfig::default(),
|
||||||
procedure: ProcedureConfig::default(),
|
procedure: ProcedureConfig::default(),
|
||||||
|
flow: FlowConfig::default(),
|
||||||
logging: LoggingOptions::default(),
|
logging: LoggingOptions::default(),
|
||||||
export_metrics: ExportMetricsOption::default(),
|
export_metrics: ExportMetricsOption::default(),
|
||||||
user_provider: None,
|
user_provider: None,
|
||||||
@@ -461,7 +463,8 @@ impl StartCommand {
|
|||||||
|
|
||||||
let mut plugins = Plugins::new();
|
let mut plugins = Plugins::new();
|
||||||
let plugin_opts = opts.plugins;
|
let plugin_opts = opts.plugins;
|
||||||
let opts = opts.component;
|
let mut opts = opts.component;
|
||||||
|
opts.grpc.detect_hostname();
|
||||||
let fe_opts = opts.frontend_options();
|
let fe_opts = opts.frontend_options();
|
||||||
let dn_opts = opts.datanode_options();
|
let dn_opts = opts.datanode_options();
|
||||||
|
|
||||||
@@ -522,8 +525,12 @@ impl StartCommand {
|
|||||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||||
|
|
||||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||||
|
let flownode_options = FlownodeOptions {
|
||||||
|
flow: opts.flow.clone(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
let flow_builder = FlownodeBuilder::new(
|
let flow_builder = FlownodeBuilder::new(
|
||||||
Default::default(),
|
flownode_options,
|
||||||
plugins.clone(),
|
plugins.clone(),
|
||||||
table_metadata_manager.clone(),
|
table_metadata_manager.clone(),
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
@@ -562,10 +569,11 @@ impl StartCommand {
|
|||||||
.step(10)
|
.step(10)
|
||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
let kafka_options = opts.wal.clone().into();
|
||||||
opts.wal.clone().into(),
|
let wal_options_allocator = build_wal_options_allocator(&kafka_options, kv_backend.clone())
|
||||||
kv_backend.clone(),
|
.await
|
||||||
));
|
.context(BuildWalOptionsAllocatorSnafu)?;
|
||||||
|
let wal_options_allocator = Arc::new(wal_options_allocator);
|
||||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||||
table_id_sequence,
|
table_id_sequence,
|
||||||
wal_options_allocator.clone(),
|
wal_options_allocator.clone(),
|
||||||
|
|||||||
@@ -25,14 +25,16 @@ use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_E
|
|||||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||||
use file_engine::config::EngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use meta_srv::metasrv::MetasrvOptions;
|
use meta_srv::metasrv::MetasrvOptions;
|
||||||
use meta_srv::selector::SelectorType;
|
use meta_srv::selector::SelectorType;
|
||||||
|
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
||||||
use mito2::config::MitoConfig;
|
use mito2::config::MitoConfig;
|
||||||
use servers::export_metrics::ExportMetricsOption;
|
use servers::export_metrics::ExportMetricsOption;
|
||||||
use servers::grpc::GrpcOptions;
|
use servers::grpc::GrpcOptions;
|
||||||
|
use servers::http::HttpOptions;
|
||||||
|
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
#[test]
|
#[test]
|
||||||
@@ -69,10 +71,13 @@ fn test_load_datanode_example_config() {
|
|||||||
region_engine: vec![
|
region_engine: vec![
|
||||||
RegionEngineConfig::Mito(MitoConfig {
|
RegionEngineConfig::Mito(MitoConfig {
|
||||||
auto_flush_interval: Duration::from_secs(3600),
|
auto_flush_interval: Duration::from_secs(3600),
|
||||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
RegionEngineConfig::File(EngineConfig {}),
|
RegionEngineConfig::File(FileEngineConfig {}),
|
||||||
|
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||||
|
experimental_sparse_primary_key_encoding: false,
|
||||||
|
}),
|
||||||
],
|
],
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
@@ -85,7 +90,9 @@ fn test_load_datanode_example_config() {
|
|||||||
remote_write: Some(Default::default()),
|
remote_write: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
|
grpc: GrpcOptions::default()
|
||||||
|
.with_addr("127.0.0.1:3001")
|
||||||
|
.with_hostname("127.0.0.1:3001"),
|
||||||
rpc_addr: Some("127.0.0.1:3001".to_string()),
|
rpc_addr: Some("127.0.0.1:3001".to_string()),
|
||||||
rpc_hostname: Some("127.0.0.1".to_string()),
|
rpc_hostname: Some("127.0.0.1".to_string()),
|
||||||
rpc_runtime_size: Some(8),
|
rpc_runtime_size: Some(8),
|
||||||
@@ -137,6 +144,11 @@ fn test_load_frontend_example_config() {
|
|||||||
remote_write: Some(Default::default()),
|
remote_write: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
grpc: GrpcOptions::default().with_hostname("127.0.0.1:4001"),
|
||||||
|
http: HttpOptions {
|
||||||
|
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -154,6 +166,7 @@ fn test_load_metasrv_example_config() {
|
|||||||
component: MetasrvOptions {
|
component: MetasrvOptions {
|
||||||
selector: SelectorType::default(),
|
selector: SelectorType::default(),
|
||||||
data_home: "/tmp/metasrv/".to_string(),
|
data_home: "/tmp/metasrv/".to_string(),
|
||||||
|
server_addr: "127.0.0.1:3002".to_string(),
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
dir: "/tmp/greptimedb/logs".to_string(),
|
dir: "/tmp/greptimedb/logs".to_string(),
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
@@ -203,10 +216,13 @@ fn test_load_standalone_example_config() {
|
|||||||
region_engine: vec![
|
region_engine: vec![
|
||||||
RegionEngineConfig::Mito(MitoConfig {
|
RegionEngineConfig::Mito(MitoConfig {
|
||||||
auto_flush_interval: Duration::from_secs(3600),
|
auto_flush_interval: Duration::from_secs(3600),
|
||||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
RegionEngineConfig::File(EngineConfig {}),
|
RegionEngineConfig::File(FileEngineConfig {}),
|
||||||
|
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||||
|
experimental_sparse_primary_key_encoding: false,
|
||||||
|
}),
|
||||||
],
|
],
|
||||||
storage: StorageConfig {
|
storage: StorageConfig {
|
||||||
data_home: "/tmp/greptimedb/".to_string(),
|
data_home: "/tmp/greptimedb/".to_string(),
|
||||||
@@ -223,6 +239,10 @@ fn test_load_standalone_example_config() {
|
|||||||
remote_write: Some(Default::default()),
|
remote_write: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
http: HttpOptions {
|
||||||
|
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[features]
|
||||||
|
testing = []
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use std::io;
|
|||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
@@ -33,19 +34,22 @@ pub struct Metadata {
|
|||||||
pub content_length: u64,
|
pub content_length: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `RangeReader` reads a range of bytes from a source.
|
/// `SizeAwareRangeReader` is a `RangeReader` that supports setting a file size hint.
|
||||||
#[async_trait]
|
pub trait SizeAwareRangeReader: RangeReader {
|
||||||
pub trait RangeReader: Send + Unpin {
|
|
||||||
/// Sets the file size hint for the reader.
|
/// Sets the file size hint for the reader.
|
||||||
///
|
///
|
||||||
/// It's used to optimize the reading process by reducing the number of remote requests.
|
/// It's used to optimize the reading process by reducing the number of remote requests.
|
||||||
fn with_file_size_hint(&mut self, file_size_hint: u64);
|
fn with_file_size_hint(&mut self, file_size_hint: u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `RangeReader` reads a range of bytes from a source.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait RangeReader: Sync + Send + Unpin {
|
||||||
/// Returns the metadata of the source.
|
/// Returns the metadata of the source.
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata>;
|
async fn metadata(&self) -> io::Result<Metadata>;
|
||||||
|
|
||||||
/// Reads the bytes in the given range.
|
/// Reads the bytes in the given range.
|
||||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
|
async fn read(&self, range: Range<u64>) -> io::Result<Bytes>;
|
||||||
|
|
||||||
/// Reads the bytes in the given range into the buffer.
|
/// Reads the bytes in the given range into the buffer.
|
||||||
///
|
///
|
||||||
@@ -53,18 +57,14 @@ pub trait RangeReader: Send + Unpin {
|
|||||||
/// - If the buffer is insufficient to hold the bytes, it will either:
|
/// - If the buffer is insufficient to hold the bytes, it will either:
|
||||||
/// - Allocate additional space (e.g., for `Vec<u8>`)
|
/// - Allocate additional space (e.g., for `Vec<u8>`)
|
||||||
/// - Panic (e.g., for `&mut [u8]`)
|
/// - Panic (e.g., for `&mut [u8]`)
|
||||||
async fn read_into(
|
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
|
||||||
&mut self,
|
|
||||||
range: Range<u64>,
|
|
||||||
buf: &mut (impl BufMut + Send),
|
|
||||||
) -> io::Result<()> {
|
|
||||||
let bytes = self.read(range).await?;
|
let bytes = self.read(range).await?;
|
||||||
buf.put_slice(&bytes);
|
buf.put_slice(&bytes);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reads the bytes in the given ranges.
|
/// Reads the bytes in the given ranges.
|
||||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||||
let mut result = Vec::with_capacity(ranges.len());
|
let mut result = Vec::with_capacity(ranges.len());
|
||||||
for range in ranges {
|
for range in ranges {
|
||||||
result.push(self.read(range.clone()).await?);
|
result.push(self.read(range.clone()).await?);
|
||||||
@@ -74,25 +74,20 @@ pub trait RangeReader: Send + Unpin {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
impl<R: ?Sized + RangeReader> RangeReader for &R {
|
||||||
fn with_file_size_hint(&mut self, file_size_hint: u64) {
|
async fn metadata(&self) -> io::Result<Metadata> {
|
||||||
(*self).with_file_size_hint(file_size_hint)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
|
||||||
(*self).metadata().await
|
(*self).metadata().await
|
||||||
}
|
}
|
||||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
|
||||||
|
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
|
||||||
(*self).read(range).await
|
(*self).read(range).await
|
||||||
}
|
}
|
||||||
async fn read_into(
|
|
||||||
&mut self,
|
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
|
||||||
range: Range<u64>,
|
|
||||||
buf: &mut (impl BufMut + Send),
|
|
||||||
) -> io::Result<()> {
|
|
||||||
(*self).read_into(range, buf).await
|
(*self).read_into(range, buf).await
|
||||||
}
|
}
|
||||||
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
|
||||||
|
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
|
||||||
(*self).read_vec(ranges).await
|
(*self).read_vec(ranges).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -120,7 +115,7 @@ pub struct AsyncReadAdapter<R> {
|
|||||||
|
|
||||||
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
|
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
|
||||||
pub async fn new(inner: R) -> io::Result<Self> {
|
pub async fn new(inner: R) -> io::Result<Self> {
|
||||||
let mut inner = inner;
|
let inner = inner;
|
||||||
let metadata = inner.metadata().await?;
|
let metadata = inner.metadata().await?;
|
||||||
Ok(AsyncReadAdapter {
|
Ok(AsyncReadAdapter {
|
||||||
inner: Arc::new(Mutex::new(inner)),
|
inner: Arc::new(Mutex::new(inner)),
|
||||||
@@ -160,7 +155,7 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
|||||||
let range = *this.position..(*this.position + size);
|
let range = *this.position..(*this.position + size);
|
||||||
let inner = this.inner.clone();
|
let inner = this.inner.clone();
|
||||||
let fut = async move {
|
let fut = async move {
|
||||||
let mut inner = inner.lock().await;
|
let inner = inner.lock().await;
|
||||||
inner.read(range).await
|
inner.read(range).await
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -195,27 +190,24 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl RangeReader for Vec<u8> {
|
impl RangeReader for Vec<u8> {
|
||||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
async fn metadata(&self) -> io::Result<Metadata> {
|
||||||
// do nothing
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
|
||||||
Ok(Metadata {
|
Ok(Metadata {
|
||||||
content_length: self.len() as u64,
|
content_length: self.len() as u64,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
|
||||||
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(weny): considers replacing `tokio::fs::File` with opendal reader.
|
||||||
/// `FileReader` is a `RangeReader` for reading a file.
|
/// `FileReader` is a `RangeReader` for reading a file.
|
||||||
pub struct FileReader {
|
pub struct FileReader {
|
||||||
content_length: u64,
|
content_length: u64,
|
||||||
position: u64,
|
position: AtomicU64,
|
||||||
file: tokio::fs::File,
|
file: Mutex<tokio::fs::File>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FileReader {
|
impl FileReader {
|
||||||
@@ -225,32 +217,36 @@ impl FileReader {
|
|||||||
let metadata = file.metadata().await?;
|
let metadata = file.metadata().await?;
|
||||||
Ok(FileReader {
|
Ok(FileReader {
|
||||||
content_length: metadata.len(),
|
content_length: metadata.len(),
|
||||||
position: 0,
|
position: AtomicU64::new(0),
|
||||||
file,
|
file: Mutex::new(file),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl SizeAwareRangeReader for FileReader {
|
||||||
|
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl RangeReader for FileReader {
|
impl RangeReader for FileReader {
|
||||||
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
|
async fn metadata(&self) -> io::Result<Metadata> {
|
||||||
// do nothing
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
|
||||||
Ok(Metadata {
|
Ok(Metadata {
|
||||||
content_length: self.content_length,
|
content_length: self.content_length,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
async fn read(&self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||||
if range.start != self.position {
|
let mut file = self.file.lock().await;
|
||||||
self.file.seek(io::SeekFrom::Start(range.start)).await?;
|
|
||||||
self.position = range.start;
|
if range.start != self.position.load(Ordering::Relaxed) {
|
||||||
|
file.seek(io::SeekFrom::Start(range.start)).await?;
|
||||||
|
self.position.store(range.start, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
range.end = range.end.min(self.content_length);
|
range.end = range.end.min(self.content_length);
|
||||||
if range.end <= self.position {
|
if range.end <= self.position.load(Ordering::Relaxed) {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::UnexpectedEof,
|
io::ErrorKind::UnexpectedEof,
|
||||||
"Start of range is out of bounds",
|
"Start of range is out of bounds",
|
||||||
@@ -259,8 +255,8 @@ impl RangeReader for FileReader {
|
|||||||
|
|
||||||
let mut buf = vec![0; (range.end - range.start) as usize];
|
let mut buf = vec![0; (range.end - range.start) as usize];
|
||||||
|
|
||||||
self.file.read_exact(&mut buf).await?;
|
file.read_exact(&mut buf).await?;
|
||||||
self.position = range.end;
|
self.position.store(range.end, Ordering::Relaxed);
|
||||||
|
|
||||||
Ok(Bytes::from(buf))
|
Ok(Bytes::from(buf))
|
||||||
}
|
}
|
||||||
@@ -301,7 +297,7 @@ mod tests {
|
|||||||
let data = b"hello world";
|
let data = b"hello world";
|
||||||
tokio::fs::write(path, data).await.unwrap();
|
tokio::fs::write(path, data).await.unwrap();
|
||||||
|
|
||||||
let mut reader = FileReader::new(path).await.unwrap();
|
let reader = FileReader::new(path).await.unwrap();
|
||||||
let metadata = reader.metadata().await.unwrap();
|
let metadata = reader.metadata().await.unwrap();
|
||||||
assert_eq!(metadata.content_length, data.len() as u64);
|
assert_eq!(metadata.content_length, data.len() as u64);
|
||||||
|
|
||||||
|
|||||||
@@ -109,6 +109,7 @@ pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
|||||||
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
||||||
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
||||||
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
|
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
|
||||||
|
pub const PG_CATALOG_PG_DATABASE_TABLE_ID: u32 = 259;
|
||||||
|
|
||||||
// ----- End of pg_catalog tables -----
|
// ----- End of pg_catalog tables -----
|
||||||
|
|
||||||
|
|||||||
@@ -73,14 +73,21 @@ pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
|
|||||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||||
}
|
}
|
||||||
|
|
||||||
let opts = layered_config
|
let mut opts: Self = layered_config
|
||||||
.build()
|
.build()
|
||||||
.and_then(|x| x.try_deserialize())
|
.and_then(|x| x.try_deserialize())
|
||||||
.context(LoadLayeredConfigSnafu)?;
|
.context(LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
|
opts.validate_sanitize()?;
|
||||||
|
|
||||||
Ok(opts)
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Validate(and possibly sanitize) the configuration.
|
||||||
|
fn validate_sanitize(&mut self) -> Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// List of toml keys that should be parsed as a list.
|
/// List of toml keys that should be parsed as a list.
|
||||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
None
|
None
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ derive_builder.workspace = true
|
|||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599", default-features = false, features = [
|
orc-rust = { version = "0.5", default-features = false, features = [
|
||||||
"async",
|
"async",
|
||||||
] }
|
] }
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
||||||
ParseFormat {
|
ParseFormat {
|
||||||
key: &'static str,
|
key: String,
|
||||||
value: String,
|
value: String,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
|
|||||||
@@ -126,8 +126,7 @@ impl ArrowDecoder for arrow::csv::reader::Decoder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(deprecated)]
|
impl ArrowDecoder for arrow::json::reader::Decoder {
|
||||||
impl ArrowDecoder for arrow::json::RawDecoder {
|
|
||||||
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
|
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
|
||||||
self.decode(buf)
|
self.decode(buf)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,8 +17,7 @@ use std::str::FromStr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow::csv;
|
use arrow::csv;
|
||||||
#[allow(deprecated)]
|
use arrow::csv::reader::Format;
|
||||||
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
|
|
||||||
use arrow::record_batch::RecordBatch;
|
use arrow::record_batch::RecordBatch;
|
||||||
use arrow_schema::{Schema, SchemaRef};
|
use arrow_schema::{Schema, SchemaRef};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -161,7 +160,6 @@ impl FileOpener for CsvOpener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(deprecated)]
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for CsvFormat {
|
impl FileFormat for CsvFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
@@ -188,9 +186,12 @@ impl FileFormat for CsvFormat {
|
|||||||
common_runtime::spawn_blocking_global(move || {
|
common_runtime::spawn_blocking_global(move || {
|
||||||
let reader = SyncIoBridge::new(decoded);
|
let reader = SyncIoBridge::new(decoded);
|
||||||
|
|
||||||
let (schema, _records_read) =
|
let format = Format::default()
|
||||||
infer_csv_schema(reader, delimiter, schema_infer_max_record, has_header)
|
.with_delimiter(delimiter)
|
||||||
.context(error::InferSchemaSnafu)?;
|
.with_header(has_header);
|
||||||
|
let (schema, _records_read) = format
|
||||||
|
.infer_schema(reader, schema_infer_max_record)
|
||||||
|
.context(error::InferSchemaSnafu)?;
|
||||||
Ok(schema)
|
Ok(schema)
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
@@ -253,7 +254,7 @@ mod tests {
|
|||||||
"c7: Int64: NULL",
|
"c7: Int64: NULL",
|
||||||
"c8: Int64: NULL",
|
"c8: Int64: NULL",
|
||||||
"c9: Int64: NULL",
|
"c9: Int64: NULL",
|
||||||
"c10: Int64: NULL",
|
"c10: Utf8: NULL",
|
||||||
"c11: Float64: NULL",
|
"c11: Float64: NULL",
|
||||||
"c12: Float64: NULL",
|
"c12: Float64: NULL",
|
||||||
"c13: Utf8: NULL"
|
"c13: Utf8: NULL"
|
||||||
|
|||||||
@@ -20,8 +20,7 @@ use std::sync::Arc;
|
|||||||
use arrow::datatypes::SchemaRef;
|
use arrow::datatypes::SchemaRef;
|
||||||
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
|
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
|
||||||
use arrow::json::writer::LineDelimited;
|
use arrow::json::writer::LineDelimited;
|
||||||
#[allow(deprecated)]
|
use arrow::json::{self, ReaderBuilder};
|
||||||
use arrow::json::{self, RawReaderBuilder};
|
|
||||||
use arrow::record_batch::RecordBatch;
|
use arrow::record_batch::RecordBatch;
|
||||||
use arrow_schema::Schema;
|
use arrow_schema::Schema;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -140,7 +139,6 @@ impl JsonOpener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(deprecated)]
|
|
||||||
impl FileOpener for JsonOpener {
|
impl FileOpener for JsonOpener {
|
||||||
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
|
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
|
||||||
open_with_decoder(
|
open_with_decoder(
|
||||||
@@ -148,7 +146,7 @@ impl FileOpener for JsonOpener {
|
|||||||
meta.location().to_string(),
|
meta.location().to_string(),
|
||||||
self.compression_type,
|
self.compression_type,
|
||||||
|| {
|
|| {
|
||||||
RawReaderBuilder::new(self.projected_schema.clone())
|
ReaderBuilder::new(self.projected_schema.clone())
|
||||||
.with_batch_size(self.batch_size)
|
.with_batch_size(self.batch_size)
|
||||||
.build_decoder()
|
.build_decoder()
|
||||||
.map_err(DataFusionError::from)
|
.map_err(DataFusionError::from)
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ struct Test<'a, T: FileOpener> {
|
|||||||
expected: Vec<&'a str>,
|
expected: Vec<&'a str>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: FileOpener> Test<'a, T> {
|
impl<T: FileOpener> Test<'_, T> {
|
||||||
pub async fn run(self) {
|
pub async fn run(self) {
|
||||||
let result = FileStream::new(
|
let result = FileStream::new(
|
||||||
&self.config,
|
&self.config,
|
||||||
|
|||||||
@@ -35,10 +35,23 @@ data = {
|
|||||||
"bigint_other": [5, -5, 1, 5, 5],
|
"bigint_other": [5, -5, 1, 5, 5],
|
||||||
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
|
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
|
||||||
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
|
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
|
||||||
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
|
"timestamp_simple": [
|
||||||
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
|
datetime.datetime(2023, 4, 1, 20, 15, 30, 2000),
|
||||||
|
datetime.datetime.fromtimestamp(int("1629617204525777000") / 1000000000),
|
||||||
|
datetime.datetime(2023, 1, 1),
|
||||||
|
datetime.datetime(2023, 2, 1),
|
||||||
|
datetime.datetime(2023, 3, 1),
|
||||||
|
],
|
||||||
|
"date_simple": [
|
||||||
|
datetime.date(2023, 4, 1),
|
||||||
|
datetime.date(2023, 3, 1),
|
||||||
|
datetime.date(2023, 1, 1),
|
||||||
|
datetime.date(2023, 2, 1),
|
||||||
|
datetime.date(2023, 3, 1),
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def infer_schema(data):
|
def infer_schema(data):
|
||||||
schema = "struct<"
|
schema = "struct<"
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
@@ -56,7 +69,7 @@ def infer_schema(data):
|
|||||||
elif key.startswith("date"):
|
elif key.startswith("date"):
|
||||||
dt = "date"
|
dt = "date"
|
||||||
else:
|
else:
|
||||||
print(key,value,dt)
|
print(key, value, dt)
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
if key.startswith("double"):
|
if key.startswith("double"):
|
||||||
dt = "double"
|
dt = "double"
|
||||||
@@ -68,7 +81,6 @@ def infer_schema(data):
|
|||||||
return schema
|
return schema
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _write(
|
def _write(
|
||||||
schema: str,
|
schema: str,
|
||||||
data,
|
data,
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ impl FromStr for Decimal128 {
|
|||||||
type Err = Error;
|
type Err = Error;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
let len = s.as_bytes().len();
|
let len = s.len();
|
||||||
if len <= BYTES_TO_OVERFLOW_RUST_DECIMAL {
|
if len <= BYTES_TO_OVERFLOW_RUST_DECIMAL {
|
||||||
let rd = RustDecimal::from_str_exact(s).context(ParseRustDecimalStrSnafu { raw: s })?;
|
let rd = RustDecimal::from_str_exact(s).context(ParseRustDecimalStrSnafu { raw: s })?;
|
||||||
Ok(Self::from(rd))
|
Ok(Self::from(rd))
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ use crate::scalars::expression::ExpressionFunction;
|
|||||||
use crate::scalars::json::JsonFunction;
|
use crate::scalars::json::JsonFunction;
|
||||||
use crate::scalars::matches::MatchesFunction;
|
use crate::scalars::matches::MatchesFunction;
|
||||||
use crate::scalars::math::MathFunction;
|
use crate::scalars::math::MathFunction;
|
||||||
use crate::scalars::numpy::NumpyFunction;
|
|
||||||
use crate::scalars::timestamp::TimestampFunction;
|
use crate::scalars::timestamp::TimestampFunction;
|
||||||
use crate::scalars::vector::VectorFunction;
|
use crate::scalars::vector::VectorFunction;
|
||||||
use crate::system::SystemFunction;
|
use crate::system::SystemFunction;
|
||||||
@@ -103,7 +102,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
|||||||
|
|
||||||
// Utility functions
|
// Utility functions
|
||||||
MathFunction::register(&function_registry);
|
MathFunction::register(&function_registry);
|
||||||
NumpyFunction::register(&function_registry);
|
|
||||||
TimestampFunction::register(&function_registry);
|
TimestampFunction::register(&function_registry);
|
||||||
DateFunction::register(&function_registry);
|
DateFunction::register(&function_registry);
|
||||||
ExpressionFunction::register(&function_registry);
|
ExpressionFunction::register(&function_registry);
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ pub mod geo;
|
|||||||
pub mod json;
|
pub mod json;
|
||||||
pub mod matches;
|
pub mod matches;
|
||||||
pub mod math;
|
pub mod math;
|
||||||
pub mod numpy;
|
|
||||||
pub mod vector;
|
pub mod vector;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
|
|||||||
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
||||||
|
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
use crate::scalars::vector::product::VectorProductCreator;
|
||||||
use crate::scalars::vector::sum::VectorSumCreator;
|
use crate::scalars::vector::sum::VectorSumCreator;
|
||||||
|
|
||||||
/// A function creates `AggregateFunctionCreator`.
|
/// A function creates `AggregateFunctionCreator`.
|
||||||
@@ -93,6 +94,7 @@ impl AggregateFunctions {
|
|||||||
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
||||||
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
||||||
register_aggr_func!("vec_sum", 1, VectorSumCreator);
|
register_aggr_func!("vec_sum", 1, VectorSumCreator);
|
||||||
|
register_aggr_func!("vec_product", 1, VectorProductCreator);
|
||||||
|
|
||||||
#[cfg(feature = "geo")]
|
#[cfg(feature = "geo")]
|
||||||
register_aggr_func!(
|
register_aggr_func!(
|
||||||
|
|||||||
@@ -91,6 +91,7 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::prelude::{TypeSignature, Volatility};
|
use common_query::prelude::{TypeSignature, Volatility};
|
||||||
|
use datatypes::arrow::datatypes::IntervalDayTime;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
@@ -134,7 +135,12 @@ mod tests {
|
|||||||
|
|
||||||
let times = vec![Some(123), None, Some(42), None];
|
let times = vec![Some(123), None, Some(42), None];
|
||||||
// Intervals in milliseconds
|
// Intervals in milliseconds
|
||||||
let intervals = vec![1000, 2000, 3000, 1000];
|
let intervals = vec![
|
||||||
|
IntervalDayTime::new(0, 1000),
|
||||||
|
IntervalDayTime::new(0, 2000),
|
||||||
|
IntervalDayTime::new(0, 3000),
|
||||||
|
IntervalDayTime::new(0, 1000),
|
||||||
|
];
|
||||||
let results = [Some(124), None, Some(45), None];
|
let results = [Some(124), None, Some(45), None];
|
||||||
|
|
||||||
let time_vector = TimestampSecondVector::from(times.clone());
|
let time_vector = TimestampSecondVector::from(times.clone());
|
||||||
|
|||||||
@@ -91,6 +91,7 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_query::prelude::{TypeSignature, Volatility};
|
use common_query::prelude::{TypeSignature, Volatility};
|
||||||
|
use datatypes::arrow::datatypes::IntervalDayTime;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
@@ -139,7 +140,12 @@ mod tests {
|
|||||||
|
|
||||||
let times = vec![Some(123), None, Some(42), None];
|
let times = vec![Some(123), None, Some(42), None];
|
||||||
// Intervals in milliseconds
|
// Intervals in milliseconds
|
||||||
let intervals = vec![1000, 2000, 3000, 1000];
|
let intervals = vec![
|
||||||
|
IntervalDayTime::new(0, 1000),
|
||||||
|
IntervalDayTime::new(0, 2000),
|
||||||
|
IntervalDayTime::new(0, 3000),
|
||||||
|
IntervalDayTime::new(0, 1000),
|
||||||
|
];
|
||||||
let results = [Some(122), None, Some(39), None];
|
let results = [Some(122), None, Some(39), None];
|
||||||
|
|
||||||
let time_vector = TimestampSecondVector::from(times.clone());
|
let time_vector = TimestampSecondVector::from(times.clone());
|
||||||
|
|||||||
@@ -21,10 +21,9 @@ use common_query::error::{
|
|||||||
};
|
};
|
||||||
use datafusion::common::tree_node::{Transformed, TreeNode, TreeNodeIterator, TreeNodeRecursion};
|
use datafusion::common::tree_node::{Transformed, TreeNode, TreeNodeIterator, TreeNodeRecursion};
|
||||||
use datafusion::common::{DFSchema, Result as DfResult};
|
use datafusion::common::{DFSchema, Result as DfResult};
|
||||||
use datafusion::execution::context::SessionState;
|
use datafusion::execution::SessionStateBuilder;
|
||||||
use datafusion::logical_expr::{self, Expr, Volatility};
|
use datafusion::logical_expr::{self, Expr, Volatility};
|
||||||
use datafusion::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
|
use datafusion::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
|
||||||
use datafusion::prelude::SessionConfig;
|
|
||||||
use datatypes::arrow::array::RecordBatch;
|
use datatypes::arrow::array::RecordBatch;
|
||||||
use datatypes::arrow::datatypes::{DataType, Field};
|
use datatypes::arrow::datatypes::{DataType, Field};
|
||||||
use datatypes::prelude::VectorRef;
|
use datatypes::prelude::VectorRef;
|
||||||
@@ -104,8 +103,7 @@ impl MatchesFunction {
|
|||||||
let like_expr = ast.into_like_expr(col_name);
|
let like_expr = ast.into_like_expr(col_name);
|
||||||
|
|
||||||
let input_schema = Self::input_schema();
|
let input_schema = Self::input_schema();
|
||||||
let session_state =
|
let session_state = SessionStateBuilder::new().with_default_features().build();
|
||||||
SessionState::new_with_config_rt(SessionConfig::default(), Arc::default());
|
|
||||||
let planner = DefaultPhysicalPlanner::default();
|
let planner = DefaultPhysicalPlanner::default();
|
||||||
let physical_expr = planner
|
let physical_expr = planner
|
||||||
.create_physical_expr(&like_expr, &input_schema, &session_state)
|
.create_physical_expr(&like_expr, &input_schema, &session_state)
|
||||||
@@ -131,7 +129,7 @@ impl MatchesFunction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn input_schema() -> DFSchema {
|
fn input_schema() -> DFSchema {
|
||||||
DFSchema::from_unqualifed_fields(
|
DFSchema::from_unqualified_fields(
|
||||||
[Arc::new(Field::new("data", DataType::Utf8, true))].into(),
|
[Arc::new(Field::new("data", DataType::Utf8, true))].into(),
|
||||||
HashMap::new(),
|
HashMap::new(),
|
||||||
)
|
)
|
||||||
@@ -725,7 +723,8 @@ struct Tokenizer {
|
|||||||
impl Tokenizer {
|
impl Tokenizer {
|
||||||
pub fn tokenize(mut self, pattern: &str) -> Result<Vec<Token>> {
|
pub fn tokenize(mut self, pattern: &str) -> Result<Vec<Token>> {
|
||||||
let mut tokens = vec![];
|
let mut tokens = vec![];
|
||||||
while self.cursor < pattern.len() {
|
let char_len = pattern.chars().count();
|
||||||
|
while self.cursor < char_len {
|
||||||
// TODO: collect pattern into Vec<char> if this tokenizer is bottleneck in the future
|
// TODO: collect pattern into Vec<char> if this tokenizer is bottleneck in the future
|
||||||
let c = pattern.chars().nth(self.cursor).unwrap();
|
let c = pattern.chars().nth(self.cursor).unwrap();
|
||||||
match c {
|
match c {
|
||||||
@@ -794,7 +793,8 @@ impl Tokenizer {
|
|||||||
let mut phase = String::new();
|
let mut phase = String::new();
|
||||||
let mut is_quote_present = false;
|
let mut is_quote_present = false;
|
||||||
|
|
||||||
while self.cursor < pattern.len() {
|
let char_len = pattern.chars().count();
|
||||||
|
while self.cursor < char_len {
|
||||||
let mut c = pattern.chars().nth(self.cursor).unwrap();
|
let mut c = pattern.chars().nth(self.cursor).unwrap();
|
||||||
|
|
||||||
match c {
|
match c {
|
||||||
@@ -899,6 +899,26 @@ mod test {
|
|||||||
Phase("c".to_string()),
|
Phase("c".to_string()),
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
r#"中文 测试"#,
|
||||||
|
vec![Phase("中文".to_string()), Phase("测试".to_string())],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
r#"中文 AND 测试"#,
|
||||||
|
vec![Phase("中文".to_string()), And, Phase("测试".to_string())],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
r#"中文 +测试"#,
|
||||||
|
vec![Phase("中文".to_string()), Must, Phase("测试".to_string())],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
r#"中文 -测试"#,
|
||||||
|
vec![
|
||||||
|
Phase("中文".to_string()),
|
||||||
|
Negative,
|
||||||
|
Phase("测试".to_string()),
|
||||||
|
],
|
||||||
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
for (query, expected) in cases {
|
for (query, expected) in cases {
|
||||||
@@ -1030,6 +1050,61 @@ mod test {
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
r#"中文 测试"#,
|
||||||
|
PatternAst::Binary {
|
||||||
|
op: BinaryOp::Or,
|
||||||
|
children: vec![
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Optional,
|
||||||
|
pattern: "中文".to_string(),
|
||||||
|
},
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Optional,
|
||||||
|
pattern: "测试".to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
r#"中文 AND 测试"#,
|
||||||
|
PatternAst::Binary {
|
||||||
|
op: BinaryOp::And,
|
||||||
|
children: vec![
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Optional,
|
||||||
|
pattern: "中文".to_string(),
|
||||||
|
},
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Optional,
|
||||||
|
pattern: "测试".to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
r#"中文 +测试"#,
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Must,
|
||||||
|
pattern: "测试".to_string(),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
r#"中文 -测试"#,
|
||||||
|
PatternAst::Binary {
|
||||||
|
op: BinaryOp::And,
|
||||||
|
children: vec![
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Negative,
|
||||||
|
pattern: "测试".to_string(),
|
||||||
|
},
|
||||||
|
PatternAst::Literal {
|
||||||
|
op: UnaryOp::Optional,
|
||||||
|
pattern: "中文".to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
for (query, expected) in cases {
|
for (query, expected) in cases {
|
||||||
|
|||||||
@@ -1,298 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_query::error::Result;
|
|
||||||
use common_query::prelude::{Signature, Volatility};
|
|
||||||
use datatypes::arrow::compute;
|
|
||||||
use datatypes::arrow::datatypes::ArrowPrimitiveType;
|
|
||||||
use datatypes::data_type::ConcreteDataType;
|
|
||||||
use datatypes::prelude::*;
|
|
||||||
use datatypes::vectors::PrimitiveVector;
|
|
||||||
use paste::paste;
|
|
||||||
|
|
||||||
use crate::function::{Function, FunctionContext};
|
|
||||||
use crate::scalars::expression::{scalar_binary_op, EvalContext};
|
|
||||||
|
|
||||||
/// numpy.clip function, <https://numpy.org/doc/stable/reference/generated/numpy.clip.html>
|
|
||||||
#[derive(Clone, Debug, Default)]
|
|
||||||
pub struct ClipFunction;
|
|
||||||
|
|
||||||
macro_rules! define_eval {
|
|
||||||
($O: ident) => {
|
|
||||||
paste! {
|
|
||||||
fn [<eval_ $O>](columns: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
fn cast_vector(input: &VectorRef) -> VectorRef {
|
|
||||||
Arc::new(PrimitiveVector::<<$O as WrapperType>::LogicalType>::try_from_arrow_array(
|
|
||||||
compute::cast(&input.to_arrow_array(), &<<<$O as WrapperType>::LogicalType as LogicalPrimitiveType>::ArrowPrimitive as ArrowPrimitiveType>::DATA_TYPE).unwrap()
|
|
||||||
).unwrap()) as _
|
|
||||||
}
|
|
||||||
let operator_1 = cast_vector(&columns[0]);
|
|
||||||
let operator_2 = cast_vector(&columns[1]);
|
|
||||||
let operator_3 = cast_vector(&columns[2]);
|
|
||||||
|
|
||||||
// clip(a, min, max) is equals to min(max(a, min), max)
|
|
||||||
let col: VectorRef = Arc::new(scalar_binary_op::<$O, $O, $O, _>(
|
|
||||||
&operator_1,
|
|
||||||
&operator_2,
|
|
||||||
scalar_max,
|
|
||||||
&mut EvalContext::default(),
|
|
||||||
)?);
|
|
||||||
let col = scalar_binary_op::<$O, $O, $O, _>(
|
|
||||||
&col,
|
|
||||||
&operator_3,
|
|
||||||
scalar_min,
|
|
||||||
&mut EvalContext::default(),
|
|
||||||
)?;
|
|
||||||
Ok(Arc::new(col))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
define_eval!(i64);
|
|
||||||
define_eval!(u64);
|
|
||||||
define_eval!(f64);
|
|
||||||
|
|
||||||
impl Function for ClipFunction {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
"clip"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
|
||||||
if input_types.iter().all(ConcreteDataType::is_signed) {
|
|
||||||
Ok(ConcreteDataType::int64_datatype())
|
|
||||||
} else if input_types.iter().all(ConcreteDataType::is_unsigned) {
|
|
||||||
Ok(ConcreteDataType::uint64_datatype())
|
|
||||||
} else {
|
|
||||||
Ok(ConcreteDataType::float64_datatype())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature(&self) -> Signature {
|
|
||||||
Signature::uniform(3, ConcreteDataType::numerics(), Volatility::Immutable)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
if columns.iter().all(|v| v.data_type().is_signed()) {
|
|
||||||
eval_i64(columns)
|
|
||||||
} else if columns.iter().all(|v| v.data_type().is_unsigned()) {
|
|
||||||
eval_u64(columns)
|
|
||||||
} else {
|
|
||||||
eval_f64(columns)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn min<T: PartialOrd>(input: T, min: T) -> T {
|
|
||||||
if input < min {
|
|
||||||
input
|
|
||||||
} else {
|
|
||||||
min
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn max<T: PartialOrd>(input: T, max: T) -> T {
|
|
||||||
if input > max {
|
|
||||||
input
|
|
||||||
} else {
|
|
||||||
max
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn scalar_min<O>(left: Option<O>, right: Option<O>, _ctx: &mut EvalContext) -> Option<O>
|
|
||||||
where
|
|
||||||
O: Scalar + Copy + PartialOrd,
|
|
||||||
{
|
|
||||||
match (left, right) {
|
|
||||||
(Some(left), Some(right)) => Some(min(left, right)),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn scalar_max<O>(left: Option<O>, right: Option<O>, _ctx: &mut EvalContext) -> Option<O>
|
|
||||||
where
|
|
||||||
O: Scalar + Copy + PartialOrd,
|
|
||||||
{
|
|
||||||
match (left, right) {
|
|
||||||
(Some(left), Some(right)) => Some(max(left, right)),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for ClipFunction {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "CLIP")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use common_query::prelude::TypeSignature;
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use datatypes::vectors::{
|
|
||||||
ConstantVector, Float32Vector, Int16Vector, Int32Vector, Int8Vector, UInt16Vector,
|
|
||||||
UInt32Vector, UInt8Vector,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_clip_signature() {
|
|
||||||
let clip = ClipFunction;
|
|
||||||
|
|
||||||
assert_eq!("clip", clip.name());
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
clip.return_type(&[]).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
clip.return_type(&[
|
|
||||||
ConcreteDataType::int16_datatype(),
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
ConcreteDataType::int8_datatype()
|
|
||||||
])
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::uint64_datatype(),
|
|
||||||
clip.return_type(&[
|
|
||||||
ConcreteDataType::uint16_datatype(),
|
|
||||||
ConcreteDataType::uint64_datatype(),
|
|
||||||
ConcreteDataType::uint8_datatype()
|
|
||||||
])
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
ConcreteDataType::float64_datatype(),
|
|
||||||
clip.return_type(&[
|
|
||||||
ConcreteDataType::uint16_datatype(),
|
|
||||||
ConcreteDataType::int64_datatype(),
|
|
||||||
ConcreteDataType::uint8_datatype()
|
|
||||||
])
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(matches!(clip.signature(),
|
|
||||||
Signature {
|
|
||||||
type_signature: TypeSignature::Uniform(3, valid_types),
|
|
||||||
volatility: Volatility::Immutable
|
|
||||||
} if valid_types == ConcreteDataType::numerics()
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_clip_fn_signed() {
|
|
||||||
// eval with signed integers
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Int32Vector::from_values(0..10)),
|
|
||||||
Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(Int8Vector::from_vec(vec![3])),
|
|
||||||
10,
|
|
||||||
)),
|
|
||||||
Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(Int16Vector::from_vec(vec![6])),
|
|
||||||
10,
|
|
||||||
)),
|
|
||||||
];
|
|
||||||
|
|
||||||
let vector = ClipFunction
|
|
||||||
.eval(FunctionContext::default(), &args)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(10, vector.len());
|
|
||||||
|
|
||||||
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
|
|
||||||
for i in 0..10 {
|
|
||||||
if i <= 3 {
|
|
||||||
assert!(matches!(vector.get(i), Value::Int64(v) if v == 3));
|
|
||||||
} else if i <= 6 {
|
|
||||||
assert!(matches!(vector.get(i), Value::Int64(v) if v == (i as i64)));
|
|
||||||
} else {
|
|
||||||
assert!(matches!(vector.get(i), Value::Int64(v) if v == 6));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_clip_fn_unsigned() {
|
|
||||||
// eval with unsigned integers
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(UInt8Vector::from_values(0..10)),
|
|
||||||
Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(UInt32Vector::from_vec(vec![3])),
|
|
||||||
10,
|
|
||||||
)),
|
|
||||||
Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(UInt16Vector::from_vec(vec![6])),
|
|
||||||
10,
|
|
||||||
)),
|
|
||||||
];
|
|
||||||
|
|
||||||
let vector = ClipFunction
|
|
||||||
.eval(FunctionContext::default(), &args)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(10, vector.len());
|
|
||||||
|
|
||||||
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
|
|
||||||
for i in 0..10 {
|
|
||||||
if i <= 3 {
|
|
||||||
assert!(matches!(vector.get(i), Value::UInt64(v) if v == 3));
|
|
||||||
} else if i <= 6 {
|
|
||||||
assert!(matches!(vector.get(i), Value::UInt64(v) if v == (i as u64)));
|
|
||||||
} else {
|
|
||||||
assert!(matches!(vector.get(i), Value::UInt64(v) if v == 6));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_clip_fn_float() {
|
|
||||||
// eval with floats
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Int8Vector::from_values(0..10)),
|
|
||||||
Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(UInt32Vector::from_vec(vec![3])),
|
|
||||||
10,
|
|
||||||
)),
|
|
||||||
Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(Float32Vector::from_vec(vec![6f32])),
|
|
||||||
10,
|
|
||||||
)),
|
|
||||||
];
|
|
||||||
|
|
||||||
let vector = ClipFunction
|
|
||||||
.eval(FunctionContext::default(), &args)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(10, vector.len());
|
|
||||||
|
|
||||||
// clip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 6) = [3, 3, 3, 3, 4, 5, 6, 6, 6, 6]
|
|
||||||
for i in 0..10 {
|
|
||||||
if i <= 3 {
|
|
||||||
assert!(matches!(vector.get(i), Value::Float64(v) if v == 3.0));
|
|
||||||
} else if i <= 6 {
|
|
||||||
assert!(matches!(vector.get(i), Value::Float64(v) if v == (i as f64)));
|
|
||||||
} else {
|
|
||||||
assert!(matches!(vector.get(i), Value::Float64(v) if v == 6.0));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,360 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_query::error::{self, Result};
|
|
||||||
use datatypes::arrow::compute::cast;
|
|
||||||
use datatypes::arrow::datatypes::DataType as ArrowDataType;
|
|
||||||
use datatypes::data_type::DataType;
|
|
||||||
use datatypes::prelude::ScalarVector;
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use datatypes::vectors::{Float64Vector, Vector, VectorRef};
|
|
||||||
use datatypes::with_match_primitive_type_id;
|
|
||||||
use snafu::{ensure, ResultExt};
|
|
||||||
|
|
||||||
/// search the biggest number that smaller than x in xp
|
|
||||||
fn linear_search_ascending_vector(x: Value, xp: &Float64Vector) -> usize {
|
|
||||||
for i in 0..xp.len() {
|
|
||||||
if x < xp.get(i) {
|
|
||||||
return i - 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
xp.len() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
/// search the biggest number that smaller than x in xp
|
|
||||||
fn binary_search_ascending_vector(key: Value, xp: &Float64Vector) -> usize {
|
|
||||||
let mut left = 0;
|
|
||||||
let mut right = xp.len();
|
|
||||||
/* If len <= 4 use linear search. */
|
|
||||||
if xp.len() <= 4 {
|
|
||||||
return linear_search_ascending_vector(key, xp);
|
|
||||||
}
|
|
||||||
/* find index by bisection */
|
|
||||||
while left < right {
|
|
||||||
let mid = left + ((right - left) >> 1);
|
|
||||||
if key >= xp.get(mid) {
|
|
||||||
left = mid + 1;
|
|
||||||
} else {
|
|
||||||
right = mid;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
left - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn concrete_type_to_primitive_vector(arg: &VectorRef) -> Result<Float64Vector> {
|
|
||||||
with_match_primitive_type_id!(arg.data_type().logical_type_id(), |$S| {
|
|
||||||
let tmp = arg.to_arrow_array();
|
|
||||||
let array = cast(&tmp, &ArrowDataType::Float64).context(error::TypeCastSnafu {
|
|
||||||
typ: ArrowDataType::Float64,
|
|
||||||
})?;
|
|
||||||
// Safety: array has been cast to Float64Array.
|
|
||||||
Ok(Float64Vector::try_from_arrow_array(array).unwrap())
|
|
||||||
},{
|
|
||||||
unreachable!()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// One-dimensional linear interpolation for monotonically increasing sample points. Refers to
|
|
||||||
/// <https://github.com/numpy/numpy/blob/b101756ac02e390d605b2febcded30a1da50cc2c/numpy/core/src/multiarray/compiled_base.c#L491>
|
|
||||||
#[allow(unused)]
|
|
||||||
pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
let mut left = None;
|
|
||||||
let mut right = None;
|
|
||||||
|
|
||||||
ensure!(
|
|
||||||
args.len() >= 3,
|
|
||||||
error::InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the args is not enough, expect at least: {}, have: {}",
|
|
||||||
3,
|
|
||||||
args.len()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
let x = concrete_type_to_primitive_vector(&args[0])?;
|
|
||||||
let xp = concrete_type_to_primitive_vector(&args[1])?;
|
|
||||||
let fp = concrete_type_to_primitive_vector(&args[2])?;
|
|
||||||
|
|
||||||
// make sure the args.len() is 3 or 5
|
|
||||||
if args.len() > 3 {
|
|
||||||
ensure!(
|
|
||||||
args.len() == 5,
|
|
||||||
error::InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the args is not enough, expect at least: {}, have: {}",
|
|
||||||
5,
|
|
||||||
args.len()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
left = concrete_type_to_primitive_vector(&args[3])
|
|
||||||
.unwrap()
|
|
||||||
.get_data(0);
|
|
||||||
right = concrete_type_to_primitive_vector(&args[4])
|
|
||||||
.unwrap()
|
|
||||||
.get_data(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
ensure!(
|
|
||||||
x.len() != 0,
|
|
||||||
error::InvalidFuncArgsSnafu {
|
|
||||||
err_msg: "The sample x is empty",
|
|
||||||
}
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
xp.len() != 0,
|
|
||||||
error::InvalidFuncArgsSnafu {
|
|
||||||
err_msg: "The sample xp is empty",
|
|
||||||
}
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
fp.len() != 0,
|
|
||||||
error::InvalidFuncArgsSnafu {
|
|
||||||
err_msg: "The sample fp is empty",
|
|
||||||
}
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
xp.len() == fp.len(),
|
|
||||||
error::InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the len1: {} don't match the length of the len2: {}",
|
|
||||||
xp.len(),
|
|
||||||
fp.len()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
/* Get left and right fill values. */
|
|
||||||
let left = match left {
|
|
||||||
Some(left) => Some(left),
|
|
||||||
_ => fp.get_data(0),
|
|
||||||
};
|
|
||||||
|
|
||||||
let right = match right {
|
|
||||||
Some(right) => Some(right),
|
|
||||||
_ => fp.get_data(fp.len() - 1),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res;
|
|
||||||
if xp.len() == 1 {
|
|
||||||
let data = x
|
|
||||||
.iter_data()
|
|
||||||
.map(|x| {
|
|
||||||
if Value::from(x) < xp.get(0) {
|
|
||||||
left
|
|
||||||
} else if Value::from(x) > xp.get(xp.len() - 1) {
|
|
||||||
right
|
|
||||||
} else {
|
|
||||||
fp.get_data(0)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
res = Float64Vector::from(data);
|
|
||||||
} else {
|
|
||||||
let mut j = 0;
|
|
||||||
/* only pre-calculate slopes if there are relatively few of them. */
|
|
||||||
let mut slopes: Option<Vec<_>> = None;
|
|
||||||
if x.len() >= xp.len() {
|
|
||||||
let mut slopes_tmp = Vec::with_capacity(xp.len() - 1);
|
|
||||||
for i in 0..xp.len() - 1 {
|
|
||||||
let slope = match (
|
|
||||||
fp.get_data(i + 1),
|
|
||||||
fp.get_data(i),
|
|
||||||
xp.get_data(i + 1),
|
|
||||||
xp.get_data(i),
|
|
||||||
) {
|
|
||||||
(Some(fp1), Some(fp2), Some(xp1), Some(xp2)) => {
|
|
||||||
if xp1 == xp2 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some((fp1 - fp2) / (xp1 - xp2))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
slopes_tmp.push(slope);
|
|
||||||
}
|
|
||||||
slopes = Some(slopes_tmp);
|
|
||||||
}
|
|
||||||
let data = x
|
|
||||||
.iter_data()
|
|
||||||
.map(|x| match x {
|
|
||||||
Some(xi) => {
|
|
||||||
if Value::from(xi) > xp.get(xp.len() - 1) {
|
|
||||||
right
|
|
||||||
} else if Value::from(xi) < xp.get(0) {
|
|
||||||
left
|
|
||||||
} else {
|
|
||||||
j = binary_search_ascending_vector(Value::from(xi), &xp);
|
|
||||||
if j == xp.len() - 1 || xp.get(j) == Value::from(xi) {
|
|
||||||
fp.get_data(j)
|
|
||||||
} else {
|
|
||||||
let slope = match &slopes {
|
|
||||||
Some(slopes) => slopes[j],
|
|
||||||
_ => match (
|
|
||||||
fp.get_data(j + 1),
|
|
||||||
fp.get_data(j),
|
|
||||||
xp.get_data(j + 1),
|
|
||||||
xp.get_data(j),
|
|
||||||
) {
|
|
||||||
(Some(fp1), Some(fp2), Some(xp1), Some(xp2)) => {
|
|
||||||
if xp1 == xp2 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some((fp1 - fp2) / (xp1 - xp2))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/* If we get nan in one direction, try the other */
|
|
||||||
let ans = match (slope, xp.get_data(j), fp.get_data(j)) {
|
|
||||||
(Some(slope), Some(xp), Some(fp)) => Some(slope * (xi - xp) + fp),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let ans = match ans {
|
|
||||||
Some(ans) => Some(ans),
|
|
||||||
_ => match (slope, xp.get_data(j + 1), fp.get_data(j + 1)) {
|
|
||||||
(Some(slope), Some(xp), Some(fp)) => {
|
|
||||||
Some(slope * (xi - xp) + fp)
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let ans = match ans {
|
|
||||||
Some(ans) => Some(ans),
|
|
||||||
_ => {
|
|
||||||
if fp.get_data(j) == fp.get_data(j + 1) {
|
|
||||||
fp.get_data(j)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
ans
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
res = Float64Vector::from(data);
|
|
||||||
}
|
|
||||||
Ok(Arc::new(res) as _)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use datatypes::vectors::{Int32Vector, Int64Vector};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
#[test]
|
|
||||||
fn test_basic_interp() {
|
|
||||||
// x xp fp
|
|
||||||
let x = 2.5;
|
|
||||||
let xp = vec![1i32, 2i32, 3i32];
|
|
||||||
let fp = vec![3i64, 2i64, 0i64];
|
|
||||||
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Float64Vector::from_vec(vec![x])),
|
|
||||||
Arc::new(Int32Vector::from_vec(xp.clone())),
|
|
||||||
Arc::new(Int64Vector::from_vec(fp.clone())),
|
|
||||||
];
|
|
||||||
let vector = interp(&args).unwrap();
|
|
||||||
assert_eq!(vector.len(), 1);
|
|
||||||
|
|
||||||
assert!(matches!(vector.get(0), Value::Float64(v) if v==1.0));
|
|
||||||
|
|
||||||
let x = vec![0.0, 1.0, 1.5, 3.2];
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Float64Vector::from_vec(x)),
|
|
||||||
Arc::new(Int32Vector::from_vec(xp)),
|
|
||||||
Arc::new(Int64Vector::from_vec(fp)),
|
|
||||||
];
|
|
||||||
let vector = interp(&args).unwrap();
|
|
||||||
assert_eq!(4, vector.len());
|
|
||||||
let res = [3.0, 3.0, 2.5, 0.0];
|
|
||||||
for (i, item) in res.iter().enumerate().take(vector.len()) {
|
|
||||||
assert!(matches!(vector.get(i),Value::Float64(v) if v==*item));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_left_right() {
|
|
||||||
let x = vec![0.0, 1.0, 1.5, 2.0, 3.0, 4.0];
|
|
||||||
let xp = vec![1i32, 2i32, 3i32];
|
|
||||||
let fp = vec![3i64, 2i64, 0i64];
|
|
||||||
let left = vec![-1];
|
|
||||||
let right = vec![2];
|
|
||||||
|
|
||||||
let expect = [-1.0, 3.0, 2.5, 2.0, 0.0, 2.0];
|
|
||||||
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Float64Vector::from_vec(x)),
|
|
||||||
Arc::new(Int32Vector::from_vec(xp)),
|
|
||||||
Arc::new(Int64Vector::from_vec(fp)),
|
|
||||||
Arc::new(Int32Vector::from_vec(left)),
|
|
||||||
Arc::new(Int32Vector::from_vec(right)),
|
|
||||||
];
|
|
||||||
let vector = interp(&args).unwrap();
|
|
||||||
|
|
||||||
for (i, item) in expect.iter().enumerate().take(vector.len()) {
|
|
||||||
assert!(matches!(vector.get(i),Value::Float64(v) if v==*item));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_scalar_interpolation_point() {
|
|
||||||
// x=0 output:0
|
|
||||||
let x = vec![0];
|
|
||||||
let xp = vec![0, 1, 5];
|
|
||||||
let fp = vec![0, 1, 5];
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Int64Vector::from_vec(x.clone())),
|
|
||||||
Arc::new(Int64Vector::from_vec(xp.clone())),
|
|
||||||
Arc::new(Int64Vector::from_vec(fp.clone())),
|
|
||||||
];
|
|
||||||
let vector = interp(&args).unwrap();
|
|
||||||
assert!(matches!(vector.get(0), Value::Float64(v) if v==x[0] as f64));
|
|
||||||
|
|
||||||
// x=0.3 output:0.3
|
|
||||||
let x = vec![0.3];
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(Float64Vector::from_vec(x.clone())),
|
|
||||||
Arc::new(Int64Vector::from_vec(xp.clone())),
|
|
||||||
Arc::new(Int64Vector::from_vec(fp.clone())),
|
|
||||||
];
|
|
||||||
let vector = interp(&args).unwrap();
|
|
||||||
assert!(matches!(vector.get(0), Value::Float64(v) if v == x[0]));
|
|
||||||
|
|
||||||
// x=None output:Null
|
|
||||||
let input = vec![None, Some(0.0), Some(0.3)];
|
|
||||||
let x = Float64Vector::from(input);
|
|
||||||
let args: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(x),
|
|
||||||
Arc::new(Int64Vector::from_vec(xp)),
|
|
||||||
Arc::new(Int64Vector::from_vec(fp)),
|
|
||||||
];
|
|
||||||
let vector = interp(&args).unwrap();
|
|
||||||
assert!(matches!(vector.get(0), Value::Null));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,14 +14,17 @@
|
|||||||
|
|
||||||
mod convert;
|
mod convert;
|
||||||
mod distance;
|
mod distance;
|
||||||
|
mod elem_product;
|
||||||
mod elem_sum;
|
mod elem_sum;
|
||||||
pub mod impl_conv;
|
pub mod impl_conv;
|
||||||
|
pub(crate) mod product;
|
||||||
mod scalar_add;
|
mod scalar_add;
|
||||||
mod scalar_mul;
|
mod scalar_mul;
|
||||||
mod sub;
|
mod sub;
|
||||||
pub(crate) mod sum;
|
pub(crate) mod sum;
|
||||||
mod vector_div;
|
mod vector_div;
|
||||||
mod vector_mul;
|
mod vector_mul;
|
||||||
|
mod vector_norm;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -46,8 +49,10 @@ impl VectorFunction {
|
|||||||
|
|
||||||
// vector calculation
|
// vector calculation
|
||||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||||
|
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||||
registry.register(Arc::new(sub::SubFunction));
|
registry.register(Arc::new(sub::SubFunction));
|
||||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||||
|
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
142
src/common/function/src/scalars/vector/elem_product.rs
Normal file
142
src/common/function/src/scalars/vector/elem_product.rs
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use common_query::error::InvalidFuncArgsSnafu;
|
||||||
|
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||||
|
use nalgebra::DVectorView;
|
||||||
|
use snafu::ensure;
|
||||||
|
|
||||||
|
use crate::function::{Function, FunctionContext};
|
||||||
|
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||||
|
|
||||||
|
const NAME: &str = "vec_elem_product";
|
||||||
|
|
||||||
|
/// Multiplies all elements of the vector, returns a scalar.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```sql
|
||||||
|
/// SELECT vec_elem_product(parse_vec('[1.0, 2.0, 3.0, 4.0]'));
|
||||||
|
///
|
||||||
|
// +-----------------------------------------------------------+
|
||||||
|
// | vec_elem_product(parse_vec(Utf8("[1.0, 2.0, 3.0, 4.0]"))) |
|
||||||
|
// +-----------------------------------------------------------+
|
||||||
|
// | 24.0 |
|
||||||
|
// +-----------------------------------------------------------+
|
||||||
|
/// ``````
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct ElemProductFunction;
|
||||||
|
|
||||||
|
impl Function for ElemProductFunction {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_type(
|
||||||
|
&self,
|
||||||
|
_input_types: &[ConcreteDataType],
|
||||||
|
) -> common_query::error::Result<ConcreteDataType> {
|
||||||
|
Ok(ConcreteDataType::float32_datatype())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn signature(&self) -> Signature {
|
||||||
|
Signature::one_of(
|
||||||
|
vec![
|
||||||
|
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||||
|
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||||
|
],
|
||||||
|
Volatility::Immutable,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eval(
|
||||||
|
&self,
|
||||||
|
_func_ctx: FunctionContext,
|
||||||
|
columns: &[VectorRef],
|
||||||
|
) -> common_query::error::Result<VectorRef> {
|
||||||
|
ensure!(
|
||||||
|
columns.len() == 1,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect exactly one, have: {}",
|
||||||
|
columns.len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let arg0 = &columns[0];
|
||||||
|
|
||||||
|
let len = arg0.len();
|
||||||
|
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||||
|
if len == 0 {
|
||||||
|
return Ok(result.to_vector());
|
||||||
|
}
|
||||||
|
|
||||||
|
let arg0_const = as_veclit_if_const(arg0)?;
|
||||||
|
|
||||||
|
for i in 0..len {
|
||||||
|
let arg0 = match arg0_const.as_ref() {
|
||||||
|
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||||
|
None => as_veclit(arg0.get_ref(i))?,
|
||||||
|
};
|
||||||
|
let Some(arg0) = arg0 else {
|
||||||
|
result.push_null();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
result.push(Some(DVectorView::from_slice(&arg0, arg0.len()).product()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result.to_vector())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ElemProductFunction {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datatypes::vectors::StringVector;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::function::FunctionContext;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_elem_product() {
|
||||||
|
let func = ElemProductFunction;
|
||||||
|
|
||||||
|
let input0 = Arc::new(StringVector::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
Some("[4.0,5.0,6.0]".to_string()),
|
||||||
|
None,
|
||||||
|
]));
|
||||||
|
|
||||||
|
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||||
|
|
||||||
|
let result = result.as_ref();
|
||||||
|
assert_eq!(result.len(), 3);
|
||||||
|
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(6.0));
|
||||||
|
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(120.0));
|
||||||
|
assert_eq!(result.get_ref(2).as_f32().unwrap(), None);
|
||||||
|
}
|
||||||
|
}
|
||||||
211
src/common/function/src/scalars/vector/product.rs
Normal file
211
src/common/function/src/scalars/vector/product.rs
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||||
|
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||||
|
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||||
|
use common_query::prelude::AccumulatorCreatorFunction;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||||
|
use datatypes::vectors::VectorRef;
|
||||||
|
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||||
|
use snafu::ensure;
|
||||||
|
|
||||||
|
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||||
|
|
||||||
|
/// Aggregates by multiplying elements across the same dimension, returns a vector.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct VectorProduct {
|
||||||
|
product: Option<OVector<f32, Dyn>>,
|
||||||
|
has_null: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[as_aggr_func_creator]
|
||||||
|
#[derive(Debug, Default, AggrFuncTypeStore)]
|
||||||
|
pub struct VectorProductCreator {}
|
||||||
|
|
||||||
|
impl AggregateFunctionCreator for VectorProductCreator {
|
||||||
|
fn creator(&self) -> AccumulatorCreatorFunction {
|
||||||
|
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
|
||||||
|
ensure!(
|
||||||
|
types.len() == 1,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect exactly one, have: {}",
|
||||||
|
types.len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let input_type = &types[0];
|
||||||
|
match input_type {
|
||||||
|
ConcreteDataType::String(_) | ConcreteDataType::Binary(_) => {
|
||||||
|
Ok(Box::new(VectorProduct::default()))
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
let err_msg = format!(
|
||||||
|
"\"VEC_PRODUCT\" aggregate function not support data type {:?}",
|
||||||
|
input_type.logical_type_id(),
|
||||||
|
);
|
||||||
|
CreateAccumulatorSnafu { err_msg }.fail()?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
creator
|
||||||
|
}
|
||||||
|
|
||||||
|
fn output_type(&self) -> common_query::error::Result<ConcreteDataType> {
|
||||||
|
Ok(ConcreteDataType::binary_datatype())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state_types(&self) -> common_query::error::Result<Vec<ConcreteDataType>> {
|
||||||
|
Ok(vec![self.output_type()?])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VectorProduct {
|
||||||
|
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||||
|
self.product.get_or_insert_with(|| {
|
||||||
|
OVector::from_iterator_generic(Dyn(len), Const::<1>, (0..len).map(|_| 1.0))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, values: &[VectorRef], is_update: bool) -> Result<(), Error> {
|
||||||
|
if values.is_empty() || self.has_null {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let column = &values[0];
|
||||||
|
let len = column.len();
|
||||||
|
|
||||||
|
match as_veclit_if_const(column)? {
|
||||||
|
Some(column) => {
|
||||||
|
let vec_column = DVectorView::from_slice(&column, column.len()).scale(len as f32);
|
||||||
|
*self.inner(vec_column.len()) =
|
||||||
|
(*self.inner(vec_column.len())).component_mul(&vec_column);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
for i in 0..len {
|
||||||
|
let Some(arg0) = as_veclit(column.get_ref(i))? else {
|
||||||
|
if is_update {
|
||||||
|
self.has_null = true;
|
||||||
|
self.product = None;
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let vec_column = DVectorView::from_slice(&arg0, arg0.len());
|
||||||
|
*self.inner(vec_column.len()) =
|
||||||
|
(*self.inner(vec_column.len())).component_mul(&vec_column);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Accumulator for VectorProduct {
|
||||||
|
fn state(&self) -> common_query::error::Result<Vec<Value>> {
|
||||||
|
self.evaluate().map(|v| vec![v])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_batch(&mut self, values: &[VectorRef]) -> common_query::error::Result<()> {
|
||||||
|
self.update(values, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_batch(&mut self, states: &[VectorRef]) -> common_query::error::Result<()> {
|
||||||
|
self.update(states, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn evaluate(&self) -> common_query::error::Result<Value> {
|
||||||
|
match &self.product {
|
||||||
|
None => Ok(Value::Null),
|
||||||
|
Some(vector) => {
|
||||||
|
let v = vector.as_slice();
|
||||||
|
Ok(Value::from(veclit_to_binlit(v)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datatypes::vectors::{ConstantVector, StringVector};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_update_batch() {
|
||||||
|
// test update empty batch, expect not updating anything
|
||||||
|
let mut vec_product = VectorProduct::default();
|
||||||
|
vec_product.update_batch(&[]).unwrap();
|
||||||
|
assert!(vec_product.product.is_none());
|
||||||
|
assert!(!vec_product.has_null);
|
||||||
|
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||||
|
|
||||||
|
// test update one not-null value
|
||||||
|
let mut vec_product = VectorProduct::default();
|
||||||
|
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Some(
|
||||||
|
"[1.0,2.0,3.0]".to_string(),
|
||||||
|
)]))];
|
||||||
|
vec_product.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
Value::from(veclit_to_binlit(&[1.0, 2.0, 3.0])),
|
||||||
|
vec_product.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// test update one null value
|
||||||
|
let mut vec_product = VectorProduct::default();
|
||||||
|
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Option::<String>::None]))];
|
||||||
|
vec_product.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||||
|
|
||||||
|
// test update no null-value batch
|
||||||
|
let mut vec_product = VectorProduct::default();
|
||||||
|
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
Some("[4.0,5.0,6.0]".to_string()),
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
]))];
|
||||||
|
vec_product.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
Value::from(veclit_to_binlit(&[28.0, 80.0, 162.0])),
|
||||||
|
vec_product.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// test update null-value batch
|
||||||
|
let mut vec_product = VectorProduct::default();
|
||||||
|
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
None,
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
]))];
|
||||||
|
vec_product.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
|
||||||
|
|
||||||
|
// test update with constant vector
|
||||||
|
let mut vec_product = VectorProduct::default();
|
||||||
|
let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
|
||||||
|
4,
|
||||||
|
))];
|
||||||
|
|
||||||
|
vec_product.update_batch(&v).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Value::from(veclit_to_binlit(&[4.0, 8.0, 12.0])),
|
||||||
|
vec_product.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user