mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 04:12:55 +00:00
Compare commits
40 Commits
v0.12.0-ni
...
chore/debu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e37847f48 | ||
|
|
2b89970d45 | ||
|
|
53d006292d | ||
|
|
d18c8b5e16 | ||
|
|
e0949c4a11 | ||
|
|
5cf931c417 | ||
|
|
cc5b1d42b0 | ||
|
|
55b7656956 | ||
|
|
75e4f307c9 | ||
|
|
89f2e15ffb | ||
|
|
13ed10556a | ||
|
|
d1108ab581 | ||
|
|
1287d4cb9f | ||
|
|
109fe04d17 | ||
|
|
f1eb76f489 | ||
|
|
11bab0c47c | ||
|
|
588f6755f0 | ||
|
|
dad8ac6f71 | ||
|
|
ef13c52814 | ||
|
|
7471f55c2e | ||
|
|
f4b2d393be | ||
|
|
0cf44e1e47 | ||
|
|
00ad27dd2e | ||
|
|
5ba8bd09fb | ||
|
|
a9f21915ef | ||
|
|
039989f77b | ||
|
|
abf34b845c | ||
|
|
4051be4214 | ||
|
|
5e88c80394 | ||
|
|
6a46f391cc | ||
|
|
c96903e60c | ||
|
|
a23f269bb1 | ||
|
|
f33b378e45 | ||
|
|
267941bbb5 | ||
|
|
074846bbc2 | ||
|
|
88d46a38ae | ||
|
|
de0beabf34 | ||
|
|
68dd2916fb | ||
|
|
d51b65a8bf | ||
|
|
2082c4b6e4 |
@@ -54,7 +54,7 @@ runs:
|
||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/$PROFILE_TARGET/greptime
|
||||
target-files: ./target/$PROFILE_TARGET/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
@@ -72,6 +72,6 @@ runs:
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/aarch64-linux-android/release/greptime
|
||||
target-files: ./target/aarch64-linux-android/release/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
4
.github/actions/build-images/action.yml
vendored
4
.github/actions/build-images/action.yml
vendored
@@ -41,8 +41,8 @@ runs:
|
||||
image-name: ${{ inputs.image-name }}
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/ubuntu/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
|
||||
14
.github/actions/build-linux-artifacts/action.yml
vendored
14
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -48,19 +48,7 @@ runs:
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build standard greptime
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: pyo3_backend,servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
- name: Build greptime
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
|
||||
@@ -90,5 +90,5 @@ runs:
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -33,15 +33,6 @@ runs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow numpy
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
@@ -76,5 +67,5 @@ runs:
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
14
.github/actions/upload-artifacts/action.yml
vendored
14
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,8 +4,8 @@ inputs:
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
target-file:
|
||||
description: The path of the target artifact
|
||||
target-files:
|
||||
description: The multiple target files to upload, separated by comma
|
||||
required: false
|
||||
version:
|
||||
description: Version of the artifact
|
||||
@@ -18,12 +18,16 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
if: ${{ inputs.target-file != '' }}
|
||||
if: ${{ inputs.target-files != '' }}
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
set -e
|
||||
mkdir -p ${{ inputs.artifacts-dir }}
|
||||
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
||||
for file in "${FILES[@]}"; do
|
||||
cp "$file" ${{ inputs.artifacts-dir }}/
|
||||
done
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
|
||||
34
.github/workflows/develop.yml
vendored
34
.github/workflows/develop.yml
vendored
@@ -10,17 +10,6 @@ on:
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
@@ -84,7 +73,7 @@ jobs:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-toml"
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
- name: Run taplo
|
||||
run: taplo format --check
|
||||
|
||||
@@ -107,7 +96,7 @@ jobs:
|
||||
shared-key: "build-binaries"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
@@ -163,7 +152,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -220,7 +209,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz cargo-gc-bin
|
||||
cargo install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -268,7 +257,7 @@ jobs:
|
||||
shared-key: "build-greptime-ci"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
@@ -338,7 +327,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -487,7 +476,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -653,6 +642,7 @@ jobs:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy, fmt]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -678,12 +668,6 @@ jobs:
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow numpy
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
@@ -697,7 +681,7 @@ jobs:
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
10
.github/workflows/nightly-ci.yml
vendored
10
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 23 * * 1-5"
|
||||
- cron: "0 23 * * 1-4"
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -91,18 +91,12 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow numpy
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
run: cargo nextest run -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -222,18 +222,10 @@ jobs:
|
||||
arch: aarch64-apple-darwin
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: pyo3_backend,servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||
@@ -271,10 +263,6 @@ jobs:
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||
|
||||
54
Cargo.lock
generated
54
Cargo.lock
generated
@@ -2016,6 +2016,7 @@ dependencies = [
|
||||
name = "common-error"
|
||||
version = "0.12.0"
|
||||
dependencies = [
|
||||
"http 0.2.12",
|
||||
"snafu 0.8.5",
|
||||
"strum 0.25.0",
|
||||
"tonic 0.11.0",
|
||||
@@ -2350,6 +2351,8 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-metrics",
|
||||
"tokio-metrics-collector",
|
||||
"tokio-test",
|
||||
"tokio-util",
|
||||
]
|
||||
@@ -4061,6 +4064,7 @@ dependencies = [
|
||||
"get-size-derive2",
|
||||
"get-size2",
|
||||
"greptime-proto",
|
||||
"http 0.2.12",
|
||||
"hydroflow",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
@@ -4152,6 +4156,7 @@ dependencies = [
|
||||
"futures",
|
||||
"humantime-serde",
|
||||
"lazy_static",
|
||||
"log-query",
|
||||
"log-store",
|
||||
"meta-client",
|
||||
"opentelemetry-proto 0.5.0",
|
||||
@@ -5285,6 +5290,7 @@ dependencies = [
|
||||
"futures",
|
||||
"greptime-proto",
|
||||
"mockall",
|
||||
"parquet",
|
||||
"pin-project",
|
||||
"prost 0.12.6",
|
||||
"rand",
|
||||
@@ -6122,6 +6128,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"table",
|
||||
]
|
||||
@@ -7470,8 +7477,7 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.50.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb28bb6c64e116ceaf8dd4e87099d3cfea4a58e85e62b104fef74c91afba0f44"
|
||||
source = "git+https://github.com/GreptimeTeam/opendal.git?rev=c82605177f2feec83e49dcaa537c505639d94024#c82605177f2feec83e49dcaa537c505639d94024"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -8160,7 +8166,7 @@ dependencies = [
|
||||
"rand",
|
||||
"ring 0.17.8",
|
||||
"rust_decimal",
|
||||
"thiserror 2.0.4",
|
||||
"thiserror 2.0.6",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.0",
|
||||
"tokio-util",
|
||||
@@ -9098,8 +9104,10 @@ dependencies = [
|
||||
"humantime",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
"log-query",
|
||||
"meter-core",
|
||||
"meter-macros",
|
||||
"nalgebra 0.33.2",
|
||||
"num",
|
||||
"num-traits",
|
||||
"object-store",
|
||||
@@ -10952,6 +10960,7 @@ dependencies = [
|
||||
"json5",
|
||||
"jsonb",
|
||||
"lazy_static",
|
||||
"log-query",
|
||||
"loki-api",
|
||||
"mime_guess",
|
||||
"mysql_async",
|
||||
@@ -12434,11 +12443,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.4"
|
||||
version = "2.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490"
|
||||
checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
|
||||
dependencies = [
|
||||
"thiserror-impl 2.0.4",
|
||||
"thiserror-impl 2.0.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -12454,9 +12463,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "2.0.4"
|
||||
version = "2.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061"
|
||||
checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -12612,9 +12621,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.40.0"
|
||||
version = "1.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
|
||||
checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
@@ -12650,6 +12659,31 @@ dependencies = [
|
||||
"syn 2.0.90",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-metrics"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-metrics-collector"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8092b7a97ed5dac2f44892db190eca8f476ede0fa585bc87664de4151cd0b64"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"parking_lot 0.12.3",
|
||||
"prometheus",
|
||||
"tokio",
|
||||
"tokio-metrics",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.12"
|
||||
|
||||
@@ -126,6 +126,7 @@ futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
|
||||
hex = "0.4"
|
||||
http = "0.2"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -134,6 +135,7 @@ lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
notify = "6.1"
|
||||
num_cpus = "1.16"
|
||||
once_cell = "1.18"
|
||||
@@ -238,6 +240,7 @@ file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-query = { path = "src/log-query" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
|
||||
16
README.md
16
README.md
@@ -70,23 +70,23 @@ Our core developers have been building time-series data platforms for years. Bas
|
||||
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
|
||||
* **Cloud-Edge Collaboration**
|
||||
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
|
||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||
|
||||
@@ -138,7 +138,7 @@ Check the prerequisite:
|
||||
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
|
||||
Build GreptimeDB binary:
|
||||
|
||||
@@ -154,6 +154,10 @@ cargo run -- standalone start
|
||||
|
||||
## Tools & Extensions
|
||||
|
||||
### Kubernetes
|
||||
|
||||
- [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
|
||||
### Dashboard
|
||||
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -156,6 +157,11 @@
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
@@ -195,6 +201,7 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
@@ -421,7 +428,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -460,7 +467,7 @@
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
@@ -484,6 +491,11 @@
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
|
||||
@@ -294,7 +294,7 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
@@ -478,7 +478,7 @@ auto_flush_interval = "1h"
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
@@ -576,6 +576,30 @@ apply_on_query = "auto"
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## The options for bloom filter index in Mito engine.
|
||||
[region_engine.mito.bloom_filter_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for the index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
|
||||
@@ -18,6 +18,10 @@ max_concurrent_queries = 0
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
@@ -615,6 +619,30 @@ apply_on_query = "auto"
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## The options for bloom filter in Mito engine.
|
||||
[region_engine.mito.bloom_filter_index]
|
||||
|
||||
## Whether to create the bloom filter on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the bloom filter on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the bloom filter on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for bloom filter creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
|
||||
@@ -13,8 +13,6 @@ RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
@@ -43,8 +41,6 @@ RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
WORKDIR /greptime
|
||||
|
||||
@@ -20,10 +20,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
pkg-config
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -46,15 +43,8 @@ ARG OUTPUT_DIR
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
@@ -7,9 +7,7 @@ RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
centos-release-scl
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
@@ -8,15 +8,8 @@ ARG TARGET_BIN=greptime
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
@@ -20,3 +20,31 @@ Sample at 49 Hertz, for 10 seconds, output report in text format.
|
||||
```bash
|
||||
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
||||
```
|
||||
|
||||
## Using `perf`
|
||||
|
||||
First find the pid of GreptimeDB:
|
||||
|
||||
Using `perf record` to profile GreptimeDB, at the sampling frequency of 99 hertz, and a duration of 60 seconds:
|
||||
|
||||
```bash
|
||||
perf record -p <pid> --call-graph dwarf -F 99 -- sleep 60
|
||||
```
|
||||
|
||||
The result will be saved to file `perf.data`.
|
||||
|
||||
Then
|
||||
|
||||
```bash
|
||||
perf script --no-inline > perf.out
|
||||
```
|
||||
|
||||
Produce a flame graph out of it:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/brendangregg/FlameGraph
|
||||
|
||||
FlameGraph/stackcollapse-perf.pl perf.out > perf.folded
|
||||
|
||||
FlameGraph/flamegraph.pl perf.folded > perf.svg
|
||||
```
|
||||
|
||||
@@ -25,6 +25,7 @@ pub enum PermissionReq<'a> {
|
||||
GrpcRequest(&'a Request),
|
||||
SqlStatement(&'a Statement),
|
||||
PromQuery,
|
||||
LogQuery,
|
||||
Opentsdb,
|
||||
LineProtocol,
|
||||
PromStoreWrite,
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
@@ -37,6 +37,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::metrics::{
|
||||
METRIC_CATALOG_KV_BATCH_GET, METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET,
|
||||
METRIC_META_CLIENT_GET,
|
||||
};
|
||||
|
||||
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
@@ -292,7 +293,7 @@ impl KvBackend for CachedKvBackend {
|
||||
}
|
||||
.map_err(|e| {
|
||||
GetKvCacheSnafu {
|
||||
err_msg: e.to_string(),
|
||||
err_msg: e.output_msg(),
|
||||
}
|
||||
.build()
|
||||
});
|
||||
@@ -445,6 +446,8 @@ impl KvBackend for MetaKvBackend {
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let _timer = METRIC_META_CLIENT_GET.start_timer();
|
||||
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
|
||||
@@ -38,7 +38,7 @@ pub fn new_table_cache(
|
||||
) -> TableCache {
|
||||
let init = init_factory(table_info_cache, table_name_cache);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
|
||||
@@ -34,4 +34,6 @@ lazy_static! {
|
||||
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
|
||||
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
|
||||
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
|
||||
pub static ref METRIC_META_CLIENT_GET: Histogram =
|
||||
register_histogram!("greptime_meta_client_get", "meta client get").unwrap();
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ cache.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
@@ -56,7 +56,6 @@ tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -62,6 +62,13 @@ impl Instance {
|
||||
pub fn datanode(&self) -> &Datanode {
|
||||
&self.datanode
|
||||
}
|
||||
|
||||
/// Get mutable Datanode instance for changing some internal state, before starting it.
|
||||
// Useful for wrapping Datanode instance. Please do not remove this method even if you find
|
||||
// nowhere it is called.
|
||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||
&mut self.datanode
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -22,6 +22,7 @@ use catalog::information_schema::InformationExtension;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use client::api::v1::meta::RegionRole;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
@@ -152,6 +153,7 @@ pub struct StandaloneOptions {
|
||||
pub tracing: TracingOptions,
|
||||
pub init_regions_in_background: bool,
|
||||
pub init_regions_parallelism: usize,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -181,6 +183,7 @@ impl Default for StandaloneOptions {
|
||||
tracing: TracingOptions::default(),
|
||||
init_regions_in_background: false,
|
||||
init_regions_parallelism: 16,
|
||||
max_in_flight_write_bytes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -218,6 +221,7 @@ impl StandaloneOptions {
|
||||
user_provider: cloned_opts.user_provider,
|
||||
// Handle the export metrics task run by standalone to frontend for execution
|
||||
export_metrics: cloned_opts.export_metrics,
|
||||
max_in_flight_write_bytes: cloned_opts.max_in_flight_write_bytes,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
http.workspace = true
|
||||
snafu.workspace = true
|
||||
strum.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -18,9 +18,30 @@ pub mod ext;
|
||||
pub mod mock;
|
||||
pub mod status_code;
|
||||
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
pub use snafu;
|
||||
|
||||
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
|
||||
// please define in `src/servers/src/http/header.rs`.
|
||||
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
|
||||
pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
|
||||
|
||||
/// Create a http header map from error code and message.
|
||||
/// using `GREPTIME_DB_HEADER_ERROR_CODE` and `GREPTIME_DB_HEADER_ERROR_MSG` as keys.
|
||||
pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
|
||||
let mut header = HeaderMap::new();
|
||||
|
||||
let msg = HeaderValue::from_str(msg).unwrap_or_else(|_| {
|
||||
HeaderValue::from_bytes(
|
||||
&msg.as_bytes()
|
||||
.iter()
|
||||
.flat_map(|b| std::ascii::escape_default(*b))
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
.expect("Already escaped string should be valid ascii")
|
||||
});
|
||||
|
||||
header.insert(GREPTIME_DB_HEADER_ERROR_CODE, code.into());
|
||||
header.insert(GREPTIME_DB_HEADER_ERROR_MSG, msg);
|
||||
header
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
jsonb.workspace = true
|
||||
nalgebra = "0.33"
|
||||
nalgebra.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
|
||||
@@ -26,3 +26,4 @@ pub mod function_registry;
|
||||
pub mod handlers;
|
||||
pub mod helper;
|
||||
pub mod state;
|
||||
pub mod utils;
|
||||
|
||||
@@ -32,6 +32,7 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
|
||||
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::vector::sum::VectorSumCreator;
|
||||
|
||||
/// A function creates `AggregateFunctionCreator`.
|
||||
/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it.
|
||||
@@ -91,6 +92,7 @@ impl AggregateFunctions {
|
||||
register_aggr_func!("argmin", 1, ArgminAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
||||
register_aggr_func!("vec_sum", 1, VectorSumCreator);
|
||||
|
||||
#[cfg(feature = "geo")]
|
||||
register_aggr_func!(
|
||||
|
||||
@@ -204,20 +204,10 @@ impl PatternAst {
|
||||
fn convert_literal(column: &str, pattern: &str) -> Expr {
|
||||
logical_expr::col(column).like(logical_expr::lit(format!(
|
||||
"%{}%",
|
||||
Self::escape_pattern(pattern)
|
||||
crate::utils::escape_like_pattern(pattern)
|
||||
)))
|
||||
}
|
||||
|
||||
fn escape_pattern(pattern: &str) -> String {
|
||||
pattern
|
||||
.chars()
|
||||
.flat_map(|c| match c {
|
||||
'\\' | '%' | '_' => vec!['\\', c],
|
||||
_ => vec![c],
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
|
||||
/// Transform this AST with preset rules to make it correct.
|
||||
fn transform_ast(self) -> Result<Self> {
|
||||
self.transform_up(Self::collapse_binary_branch_fn)
|
||||
|
||||
@@ -14,9 +14,14 @@
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
pub(crate) mod impl_conv;
|
||||
mod elem_sum;
|
||||
pub mod impl_conv;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
mod sub;
|
||||
pub(crate) mod sum;
|
||||
mod vector_div;
|
||||
mod vector_mul;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -38,5 +43,11 @@ impl VectorFunction {
|
||||
// scalar calculation
|
||||
registry.register(Arc::new(scalar_add::ScalarAddFunction));
|
||||
registry.register(Arc::new(scalar_mul::ScalarMulFunction));
|
||||
|
||||
// vector calculation
|
||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(sub::SubFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
}
|
||||
}
|
||||
|
||||
129
src/common/function/src/scalars/vector/elem_sum.rs
Normal file
129
src/common/function/src/scalars/vector/elem_sum.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
const NAME: &str = "vec_elem_sum";
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ElemSumFunction;
|
||||
|
||||
impl Function for ElemSumFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
result.push(Some(DVectorView::from_slice(&arg0, arg0.len()).sum()));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ElemSumFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
|
||||
#[test]
|
||||
fn test_elem_sum() {
|
||||
let func = ElemSumFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(6.0));
|
||||
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(15.0));
|
||||
assert_eq!(result.get_ref(2).as_f32().unwrap(), None);
|
||||
}
|
||||
}
|
||||
223
src/common/function/src/scalars/vector/sub.rs
Normal file
223
src/common/function/src/scalars/vector/sub.rs
Normal file
@@ -0,0 +1,223 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_sub";
|
||||
|
||||
/// Subtracts corresponding elements of two vectors, returns a vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_sub("[1.0, 1.0]", "[1.0, 2.0]")) as result;
|
||||
///
|
||||
/// +---------------------------------------------------------------+
|
||||
/// | vec_to_string(vec_sub(Utf8("[1.0, 1.0]"),Utf8("[1.0, 2.0]"))) |
|
||||
/// +---------------------------------------------------------------+
|
||||
/// | [0,-1] |
|
||||
/// +---------------------------------------------------------------+
|
||||
///
|
||||
/// -- Negative scalar to simulate subtraction
|
||||
/// SELECT vec_to_string(vec_sub('[-1.0, -1.0]', '[1.0, 2.0]'));
|
||||
///
|
||||
/// +-----------------------------------------------------------------+
|
||||
/// | vec_to_string(vec_sub(Utf8("[-1.0, -1.0]"),Utf8("[1.0, 2.0]"))) |
|
||||
/// +-----------------------------------------------------------------+
|
||||
/// | [-2,-3] |
|
||||
/// +-----------------------------------------------------------------+
|
||||
///
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SubFunction;
|
||||
|
||||
impl Function for SubFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
let (Some(arg0), Some(arg1)) = (arg0, arg1) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
|
||||
let vec_res = vec0 - vec1;
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for SubFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sub() {
|
||||
let func = SubFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
Some("[2.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[6.0,5.0,4.0]".to_string()),
|
||||
Some("[3.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[0.0, 1.0, 2.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[-2.0, 0.0, 2.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sub_error() {
|
||||
let func = SubFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
Some("[2.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[6.0,5.0,4.0]".to_string()),
|
||||
Some("[3.0,2.0,2.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0, input1]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 4, args 1: 3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
202
src/common/function/src/scalars/vector/sum.rs
Normal file
202
src/common/function/src/scalars/vector/sum.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use datatypes::prelude::{ConcreteDataType, Value, *};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use nalgebra::{Const, DVectorView, Dyn, OVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VectorSum {
|
||||
sum: Option<OVector<f32, Dyn>>,
|
||||
has_null: bool,
|
||||
}
|
||||
|
||||
#[as_aggr_func_creator]
|
||||
#[derive(Debug, Default, AggrFuncTypeStore)]
|
||||
pub struct VectorSumCreator {}
|
||||
|
||||
impl AggregateFunctionCreator for VectorSumCreator {
|
||||
fn creator(&self) -> AccumulatorCreatorFunction {
|
||||
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
|
||||
ensure!(
|
||||
types.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let input_type = &types[0];
|
||||
match input_type {
|
||||
ConcreteDataType::String(_) | ConcreteDataType::Binary(_) => {
|
||||
Ok(Box::new(VectorSum::default()))
|
||||
}
|
||||
_ => {
|
||||
let err_msg = format!(
|
||||
"\"VEC_SUM\" aggregate function not support data type {:?}",
|
||||
input_type.logical_type_id(),
|
||||
);
|
||||
CreateAccumulatorSnafu { err_msg }.fail()?
|
||||
}
|
||||
}
|
||||
});
|
||||
creator
|
||||
}
|
||||
|
||||
fn output_type(&self) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn state_types(&self) -> common_query::error::Result<Vec<ConcreteDataType>> {
|
||||
Ok(vec![self.output_type()?])
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorSum {
|
||||
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||
self.sum
|
||||
.get_or_insert_with(|| OVector::zeros_generic(Dyn(len), Const::<1>))
|
||||
}
|
||||
|
||||
fn update(&mut self, values: &[VectorRef], is_update: bool) -> Result<(), Error> {
|
||||
if values.is_empty() || self.has_null {
|
||||
return Ok(());
|
||||
};
|
||||
let column = &values[0];
|
||||
let len = column.len();
|
||||
|
||||
match as_veclit_if_const(column)? {
|
||||
Some(column) => {
|
||||
let vec_column = DVectorView::from_slice(&column, column.len()).scale(len as f32);
|
||||
*self.inner(vec_column.len()) += vec_column;
|
||||
}
|
||||
None => {
|
||||
for i in 0..len {
|
||||
let Some(arg0) = as_veclit(column.get_ref(i))? else {
|
||||
if is_update {
|
||||
self.has_null = true;
|
||||
self.sum = None;
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
let vec_column = DVectorView::from_slice(&arg0, arg0.len());
|
||||
*self.inner(vec_column.len()) += vec_column;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for VectorSum {
|
||||
fn state(&self) -> common_query::error::Result<Vec<Value>> {
|
||||
self.evaluate().map(|v| vec![v])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(values, true)
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[VectorRef]) -> common_query::error::Result<()> {
|
||||
self.update(states, false)
|
||||
}
|
||||
|
||||
fn evaluate(&self) -> common_query::error::Result<Value> {
|
||||
match &self.sum {
|
||||
None => Ok(Value::Null),
|
||||
Some(vector) => Ok(Value::from(veclit_to_binlit(vector.as_slice()))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{ConstantVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_update_batch() {
|
||||
// test update empty batch, expect not updating anything
|
||||
let mut vec_sum = VectorSum::default();
|
||||
vec_sum.update_batch(&[]).unwrap();
|
||||
assert!(vec_sum.sum.is_none());
|
||||
assert!(!vec_sum.has_null);
|
||||
assert_eq!(Value::Null, vec_sum.evaluate().unwrap());
|
||||
|
||||
// test update one not-null value
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Some(
|
||||
"[1.0,2.0,3.0]".to_string(),
|
||||
)]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[1.0, 2.0, 3.0])),
|
||||
vec_sum.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update one null value
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Option::<String>::None]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_sum.evaluate().unwrap());
|
||||
|
||||
// test update no null-value batch
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[12.0, 15.0, 18.0])),
|
||||
vec_sum.evaluate().unwrap()
|
||||
);
|
||||
|
||||
// test update null-value batch
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
None,
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
]))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(Value::Null, vec_sum.evaluate().unwrap());
|
||||
|
||||
// test update with constant vector
|
||||
let mut vec_sum = VectorSum::default();
|
||||
let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
|
||||
4,
|
||||
))];
|
||||
vec_sum.update_batch(&v).unwrap();
|
||||
assert_eq!(
|
||||
Value::from(veclit_to_binlit(&[4.0, 8.0, 12.0])),
|
||||
vec_sum.evaluate().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
218
src/common/function/src/scalars/vector/vector_div.rs
Normal file
218
src/common/function/src/scalars/vector/vector_div.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_div";
|
||||
|
||||
/// Divides corresponding elements of two vectors.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_div("[2, 4, 6]", "[2, 2, 2]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [1,2,3] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorDivFunction;
|
||||
|
||||
impl Function for VectorDivFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(arg0), Some(arg1)) = (arg0, arg1) {
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the vectors must match for division, have: {} vs {}",
|
||||
arg0.len(),
|
||||
arg1.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
let vec_res = vec0.component_div(&vec1);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorDivFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vector_mul() {
|
||||
let func = VectorDivFunction;
|
||||
|
||||
let vec0 = vec![1.0, 2.0, 3.0];
|
||||
let vec1 = vec![1.0, 1.0];
|
||||
let (len0, len1) = (vec0.len(), vec1.len());
|
||||
let input0 = Arc::new(StringVector::from(vec![Some(format!("{vec0:?}"))]));
|
||||
let input1 = Arc::new(StringVector::from(vec![Some(format!("{vec1:?}"))]));
|
||||
|
||||
let err = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!(
|
||||
"The length of the vectors must match for division, have: {} vs {}",
|
||||
len0, len1
|
||||
)
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[8.0,10.0,12.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[2.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
Some("[3.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[1.0, 2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[4.0, 5.0, 6.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![Some("[1.0,-2.0]".to_string())]));
|
||||
let input1 = Arc::new(StringVector::from(vec![Some("[0.0,0.0]".to_string())]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[f64::INFINITY as f32, f64::NEG_INFINITY as f32]).as_slice())
|
||||
);
|
||||
}
|
||||
}
|
||||
205
src/common/function/src/scalars/vector/vector_mul.rs
Normal file
205
src/common/function/src/scalars/vector/vector_mul.rs
Normal file
@@ -0,0 +1,205 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_mul";
|
||||
|
||||
/// Multiplies corresponding elements of two vectors.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_mul("[1, 2, 3]", "[1, 2, 3]")) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [1,4,9] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorMulFunction;
|
||||
|
||||
impl Function for VectorMulFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(arg0), Some(arg1)) = (arg0, arg1) {
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the vectors must match for multiplying, have: {} vs {}",
|
||||
arg0.len(),
|
||||
arg1.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
let vec_res = vec1.component_mul(&vec0);
|
||||
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorMulFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vector_mul() {
|
||||
let func = VectorMulFunction;
|
||||
|
||||
let vec0 = vec![1.0, 2.0, 3.0];
|
||||
let vec1 = vec![1.0, 1.0];
|
||||
let (len0, len1) = (vec0.len(), vec1.len());
|
||||
let input0 = Arc::new(StringVector::from(vec![Some(format!("{vec0:?}"))]));
|
||||
let input1 = Arc::new(StringVector::from(vec![Some(format!("{vec1:?}"))]));
|
||||
|
||||
let err = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!(
|
||||
"The length of the vectors must match for multiplying, have: {} vs {}",
|
||||
len0, len1
|
||||
)
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[8.0,10.0,12.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[2.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
Some("[3.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[1.0, 2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[16.0, 20.0, 24.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
}
|
||||
58
src/common/function/src/utils.rs
Normal file
58
src/common/function/src/utils.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Escapes special characters in the provided pattern string for `LIKE`.
|
||||
///
|
||||
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
||||
/// characters with an additional backslash to ensure they are treated literally.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// let escaped = escape_pattern("100%_some\\path");
|
||||
/// assert_eq!(escaped, "100\\%\\_some\\\\path");
|
||||
/// ```
|
||||
pub fn escape_like_pattern(pattern: &str) -> String {
|
||||
pattern
|
||||
.chars()
|
||||
.flat_map(|c| match c {
|
||||
'\\' | '%' | '_' => vec!['\\', c],
|
||||
_ => vec![c],
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_escape_like_pattern() {
|
||||
assert_eq!(
|
||||
escape_like_pattern("100%_some\\path"),
|
||||
"100\\%\\_some\\\\path"
|
||||
);
|
||||
assert_eq!(escape_like_pattern(""), "");
|
||||
assert_eq!(escape_like_pattern("hello"), "hello");
|
||||
assert_eq!(escape_like_pattern("\\%_"), "\\\\\\%\\_");
|
||||
assert_eq!(escape_like_pattern("%%__\\\\"), "\\%\\%\\_\\_\\\\\\\\");
|
||||
assert_eq!(escape_like_pattern("abc123"), "abc123");
|
||||
assert_eq!(escape_like_pattern("%_\\"), "\\%\\_\\\\");
|
||||
assert_eq!(
|
||||
escape_like_pattern("%%__\\\\another%string"),
|
||||
"\\%\\%\\_\\_\\\\\\\\another\\%string"
|
||||
);
|
||||
assert_eq!(escape_like_pattern("foo%bar_"), "foo\\%bar\\_");
|
||||
assert_eq!(escape_like_pattern("\\_\\%"), "\\\\\\_\\\\\\%");
|
||||
}
|
||||
}
|
||||
44
src/common/meta/src/cache/container.rs
vendored
44
src/common/meta/src/cache/container.rs
vendored
@@ -43,7 +43,7 @@ pub struct CacheContainer<K, V, CacheToken> {
|
||||
cache: Cache<K, V>,
|
||||
invalidator: Invalidator<K, V, CacheToken>,
|
||||
initializer: Initializer<K, V>,
|
||||
token_filter: TokenFilter<CacheToken>,
|
||||
token_filter: fn(&CacheToken) -> bool,
|
||||
}
|
||||
|
||||
impl<K, V, CacheToken> CacheContainer<K, V, CacheToken>
|
||||
@@ -58,7 +58,7 @@ where
|
||||
cache: Cache<K, V>,
|
||||
invalidator: Invalidator<K, V, CacheToken>,
|
||||
initializer: Initializer<K, V>,
|
||||
token_filter: TokenFilter<CacheToken>,
|
||||
token_filter: fn(&CacheToken) -> bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
@@ -206,10 +206,13 @@ mod tests {
|
||||
name: &'a str,
|
||||
}
|
||||
|
||||
fn always_true_filter(_: &String) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get() {
|
||||
let cache: Cache<NameKey, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<NameKey, String> = Arc::new(move |_| {
|
||||
@@ -219,7 +222,13 @@ mod tests {
|
||||
let invalidator: Invalidator<NameKey, String, String> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let key = NameKey { name: "key" };
|
||||
let value = adv_cache.get(key).await.unwrap().unwrap();
|
||||
assert_eq!(value, "hi");
|
||||
@@ -233,7 +242,6 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_get_by_ref() {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
@@ -243,7 +251,13 @@ mod tests {
|
||||
let invalidator: Invalidator<String, String, String> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
assert_eq!(value, "hi");
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
@@ -257,13 +271,18 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_get_value_not_exits() {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let init: Initializer<String, String> =
|
||||
Arc::new(move |_| Box::pin(async { error::ValueNotExistSnafu {}.fail() }));
|
||||
let invalidator: Invalidator<String, String, String> =
|
||||
Box::new(|_, _| Box::pin(async { Ok(()) }));
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap();
|
||||
assert!(value.is_none());
|
||||
}
|
||||
@@ -271,7 +290,6 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_invalidate() {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<String> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
@@ -285,7 +303,13 @@ mod tests {
|
||||
})
|
||||
});
|
||||
|
||||
let adv_cache = CacheContainer::new("test".to_string(), cache, invalidator, init, filter);
|
||||
let adv_cache = CacheContainer::new(
|
||||
"test".to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
);
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
assert_eq!(value, "hi");
|
||||
let value = adv_cache.get_by_ref("foo").await.unwrap().unwrap();
|
||||
|
||||
@@ -45,7 +45,7 @@ pub fn new_table_flownode_set_cache(
|
||||
let table_flow_manager = Arc::new(TableFlowManager::new(kv_backend));
|
||||
let init = init_factory(table_flow_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId, FlownodeSet> {
|
||||
|
||||
22
src/common/meta/src/cache/registry.rs
vendored
22
src/common/meta/src/cache/registry.rs
vendored
@@ -151,12 +151,15 @@ mod tests {
|
||||
use crate::cache::*;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
fn always_true_filter(_: &CacheIdent) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn test_cache(
|
||||
name: &str,
|
||||
invalidator: Invalidator<String, String, CacheIdent>,
|
||||
) -> CacheContainer<String, String, CacheIdent> {
|
||||
let cache: Cache<String, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<String, String> = Arc::new(move |_| {
|
||||
@@ -164,7 +167,13 @@ mod tests {
|
||||
Box::pin(async { Ok(Some("hi".to_string())) })
|
||||
});
|
||||
|
||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
||||
CacheContainer::new(
|
||||
name.to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
)
|
||||
}
|
||||
|
||||
fn test_i32_cache(
|
||||
@@ -172,7 +181,6 @@ mod tests {
|
||||
invalidator: Invalidator<i32, String, CacheIdent>,
|
||||
) -> CacheContainer<i32, String, CacheIdent> {
|
||||
let cache: Cache<i32, String> = CacheBuilder::new(128).build();
|
||||
let filter: TokenFilter<CacheIdent> = Box::new(|_| true);
|
||||
let counter = Arc::new(AtomicI32::new(0));
|
||||
let moved_counter = counter.clone();
|
||||
let init: Initializer<i32, String> = Arc::new(move |_| {
|
||||
@@ -180,7 +188,13 @@ mod tests {
|
||||
Box::pin(async { Ok(Some("foo".to_string())) })
|
||||
});
|
||||
|
||||
CacheContainer::new(name.to_string(), cache, invalidator, init, filter)
|
||||
CacheContainer::new(
|
||||
name.to_string(),
|
||||
cache,
|
||||
invalidator,
|
||||
init,
|
||||
always_true_filter,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
2
src/common/meta/src/cache/table/schema.rs
vendored
2
src/common/meta/src/cache/table/schema.rs
vendored
@@ -36,7 +36,7 @@ pub fn new_schema_cache(
|
||||
let schema_manager = SchemaManager::new(kv_backend.clone());
|
||||
let init = init_factory(schema_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(schema_manager: SchemaManager) -> Initializer<SchemaName, Arc<SchemaNameValue>> {
|
||||
|
||||
@@ -41,7 +41,7 @@ pub fn new_table_info_cache(
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend));
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_info_manager: TableInfoManagerRef) -> Initializer<TableId, Arc<TableInfo>> {
|
||||
|
||||
@@ -41,7 +41,7 @@ pub fn new_table_name_cache(
|
||||
let table_name_manager = Arc::new(TableNameManager::new(kv_backend));
|
||||
let init = init_factory(table_name_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_name_manager: TableNameManagerRef) -> Initializer<TableName, TableId> {
|
||||
|
||||
@@ -65,7 +65,7 @@ pub fn new_table_route_cache(
|
||||
let table_info_manager = Arc::new(TableRouteManager::new(kv_backend));
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
|
||||
@@ -40,7 +40,7 @@ pub fn new_table_schema_cache(
|
||||
let table_info_manager = TableInfoManager::new(kv_backend);
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(table_info_manager: TableInfoManager) -> Initializer<TableId, Arc<SchemaName>> {
|
||||
|
||||
2
src/common/meta/src/cache/table/view_info.rs
vendored
2
src/common/meta/src/cache/table/view_info.rs
vendored
@@ -40,7 +40,7 @@ pub fn new_view_info_cache(
|
||||
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
||||
let init = init_factory(view_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, filter)
|
||||
}
|
||||
|
||||
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use futures::stream::BoxStream;
|
||||
@@ -146,7 +145,7 @@ impl CatalogManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(catalog_decoder),
|
||||
catalog_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
@@ -156,6 +155,8 @@ impl CatalogManager {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -166,7 +165,7 @@ impl DatanodeTableManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(datanode_table_value_decoder),
|
||||
datanode_table_value_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -201,7 +199,7 @@ impl FlowNameManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flow_name_decoder),
|
||||
flow_name_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -179,7 +177,7 @@ impl FlowRouteManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flow_route_decoder),
|
||||
flow_route_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use futures::TryStreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
@@ -179,7 +177,7 @@ impl FlownodeFlowManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flownode_flow_key_decoder),
|
||||
flownode_flow_key_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -206,7 +206,7 @@ impl TableFlowManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(table_flow_decoder),
|
||||
table_flow_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -28,13 +28,10 @@ pub type SchemaMetadataManagerRef = Arc<SchemaMetadataManager>;
|
||||
pub struct SchemaMetadataManager {
|
||||
table_id_schema_cache: TableSchemaCacheRef,
|
||||
schema_cache: SchemaCacheRef,
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
}
|
||||
|
||||
impl SchemaMetadataManager {
|
||||
/// Creates a new database meta
|
||||
#[cfg(not(any(test, feature = "testing")))]
|
||||
pub fn new(table_id_schema_cache: TableSchemaCacheRef, schema_cache: SchemaCacheRef) -> Self {
|
||||
Self {
|
||||
table_id_schema_cache,
|
||||
@@ -42,20 +39,6 @@ impl SchemaMetadataManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new database meta
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new(
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
table_id_schema_cache: TableSchemaCacheRef,
|
||||
schema_cache: SchemaCacheRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_id_schema_cache,
|
||||
schema_cache,
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets schema options by table id.
|
||||
pub async fn get_schema_options_by_table_id(
|
||||
&self,
|
||||
@@ -80,6 +63,7 @@ impl SchemaMetadataManager {
|
||||
schema_name: &str,
|
||||
catalog_name: &str,
|
||||
schema_value: Option<crate::key::schema_name::SchemaNameValue>,
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
) {
|
||||
use table::metadata::{RawTableInfo, TableType};
|
||||
let value = crate::key::table_info::TableInfoValue::new(RawTableInfo {
|
||||
@@ -91,19 +75,18 @@ impl SchemaMetadataManager {
|
||||
meta: Default::default(),
|
||||
table_type: TableType::Base,
|
||||
});
|
||||
let table_info_manager =
|
||||
crate::key::table_info::TableInfoManager::new(self.kv_backend.clone());
|
||||
let table_info_manager = crate::key::table_info::TableInfoManager::new(kv_backend.clone());
|
||||
let (txn, _) = table_info_manager
|
||||
.build_create_txn(table_id, &value)
|
||||
.unwrap();
|
||||
let resp = self.kv_backend.txn(txn).await.unwrap();
|
||||
let resp = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(resp.succeeded, "Failed to create table metadata");
|
||||
let key = crate::key::schema_name::SchemaNameKey {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
};
|
||||
|
||||
crate::key::schema_name::SchemaManager::new(self.kv_backend.clone())
|
||||
crate::key::schema_name::SchemaManager::new(kv_backend.clone())
|
||||
.create(key, schema_value, false)
|
||||
.await
|
||||
.expect("Failed to create schema metadata");
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_time::DatabaseTimeToLive;
|
||||
@@ -30,6 +29,7 @@ use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::metrics::METRIC_META_SCHEMA_INFO_GET;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
@@ -210,6 +210,8 @@ impl SchemaManager {
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
) -> Result<Option<DeserializedValueWithBytes<SchemaNameValue>>> {
|
||||
let _timer = METRIC_META_SCHEMA_INFO_GET.start_timer();
|
||||
|
||||
let raw_key = schema.to_bytes();
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
@@ -283,7 +285,7 @@ impl SchemaManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(schema_decoder),
|
||||
schema_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
@@ -308,6 +310,7 @@ impl<'a> From<&'a SchemaName> for SchemaNameKey<'a> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -29,6 +29,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::metrics::METRIC_META_TABLE_INFO_GET;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
|
||||
/// The key stores the metadata of the table.
|
||||
@@ -194,6 +195,8 @@ impl TableInfoManager {
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let _timer = METRIC_META_TABLE_INFO_GET.start_timer();
|
||||
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
self.kv_backend
|
||||
|
||||
@@ -269,7 +269,7 @@ impl TableNameManager {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(table_decoder),
|
||||
table_decoder,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::any::Any;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::error;
|
||||
use snafu::ResultExt;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{Client, NoTls};
|
||||
@@ -97,7 +98,11 @@ impl PgStore {
|
||||
let (client, conn) = tokio_postgres::connect(url, NoTls)
|
||||
.await
|
||||
.context(ConnectPostgresSnafu)?;
|
||||
tokio::spawn(async move { conn.await.context(ConnectPostgresSnafu) });
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
error!(e; "connection error");
|
||||
}
|
||||
});
|
||||
Self::with_pg_client(client).await
|
||||
}
|
||||
|
||||
|
||||
@@ -108,4 +108,9 @@ lazy_static! {
|
||||
&["name"]
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
pub static ref METRIC_META_TABLE_INFO_GET: Histogram =
|
||||
register_histogram!("greptime_meta_table_info_get", "get table info from kvbackend").unwrap();
|
||||
pub static ref METRIC_META_SCHEMA_INFO_GET: Histogram =
|
||||
register_histogram!("greptime_meta_schema_info_get", "get schema info from kvbackend").unwrap();
|
||||
}
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_telemetry::debug;
|
||||
use futures::Stream;
|
||||
@@ -148,7 +146,7 @@ impl PaginationStreamFactory {
|
||||
}
|
||||
|
||||
pub struct PaginationStream<T> {
|
||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
||||
decoder_fn: fn(KeyValue) -> Result<T>,
|
||||
factory: PaginationStreamFactory,
|
||||
}
|
||||
|
||||
@@ -158,7 +156,7 @@ impl<T> PaginationStream<T> {
|
||||
kv: KvBackendRef,
|
||||
req: RangeRequest,
|
||||
page_size: usize,
|
||||
decoder_fn: Arc<KeyValueDecoderFn<T>>,
|
||||
decoder_fn: fn(KeyValue) -> Result<T>,
|
||||
) -> Self {
|
||||
Self {
|
||||
decoder_fn,
|
||||
@@ -191,6 +189,7 @@ mod tests {
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::TryStreamExt;
|
||||
|
||||
@@ -250,7 +249,7 @@ mod tests {
|
||||
..Default::default()
|
||||
},
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(decoder),
|
||||
decoder,
|
||||
)
|
||||
.into_stream();
|
||||
let kv = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
@@ -290,7 +289,7 @@ mod tests {
|
||||
..Default::default()
|
||||
},
|
||||
2,
|
||||
Arc::new(decoder),
|
||||
decoder,
|
||||
);
|
||||
let kv = stream
|
||||
.into_stream()
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{DeleteStatesSnafu, ListStateSnafu, PutStateSnafu};
|
||||
@@ -171,7 +169,7 @@ impl StateStore for KvStateStore {
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
self.max_num_per_range_request.unwrap_or_default(),
|
||||
Arc::new(decode_kv),
|
||||
decode_kv,
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
|
||||
@@ -39,3 +39,7 @@ tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
|
||||
[target.'cfg(tokio_unstable)'.dependencies]
|
||||
tokio-metrics = { version = "0.3" }
|
||||
tokio-metrics-collector = { version = "0.2" }
|
||||
|
||||
@@ -224,7 +224,6 @@ impl DatanodeBuilder {
|
||||
cache_registry.get().context(MissingCacheSnafu)?;
|
||||
|
||||
let schema_metadata_manager = Arc::new(SchemaMetadataManager::new(
|
||||
kv_backend.clone(),
|
||||
table_id_schema_cache,
|
||||
schema_cache,
|
||||
));
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_telemetry::{info, warn};
|
||||
use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer};
|
||||
use object_store::services::Fs;
|
||||
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder, OBJECT_CACHE_DIR};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
@@ -147,12 +147,10 @@ async fn build_cache_layer(
|
||||
};
|
||||
|
||||
// Enable object cache by default
|
||||
// Set the cache_path to be `${data_home}/object_cache/read/{name}` by default
|
||||
// Set the cache_path to be `${data_home}` by default
|
||||
// if it's not present
|
||||
if cache_path.is_none() {
|
||||
let object_cache_path = join_dir(data_home, OBJECT_CACHE_DIR);
|
||||
let read_cache_path = join_dir(&object_cache_path, "read");
|
||||
let read_cache_path = join_dir(&read_cache_path, &name.to_lowercase());
|
||||
let read_cache_path = data_home.to_string();
|
||||
tokio::fs::create_dir_all(Path::new(&read_cache_path))
|
||||
.await
|
||||
.context(CreateDirSnafu {
|
||||
|
||||
@@ -29,7 +29,7 @@ use crate::error::{self, DuplicateColumnSnafu, Error, ProjectArrowSchemaSnafu, R
|
||||
use crate::prelude::ConcreteDataType;
|
||||
pub use crate::schema::column_schema::{
|
||||
ColumnSchema, FulltextAnalyzer, FulltextOptions, Metadata, SkippingIndexOptions,
|
||||
COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
|
||||
SkippingIndexType, COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
|
||||
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
|
||||
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
|
||||
|
||||
@@ -543,7 +543,7 @@ pub struct SkippingIndexOptions {
|
||||
pub granularity: u32,
|
||||
/// The type of the skip index.
|
||||
#[serde(default)]
|
||||
pub index_type: SkipIndexType,
|
||||
pub index_type: SkippingIndexType,
|
||||
}
|
||||
|
||||
impl fmt::Display for SkippingIndexOptions {
|
||||
@@ -556,15 +556,15 @@ impl fmt::Display for SkippingIndexOptions {
|
||||
|
||||
/// Skip index types.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
|
||||
pub enum SkipIndexType {
|
||||
pub enum SkippingIndexType {
|
||||
#[default]
|
||||
BloomFilter,
|
||||
}
|
||||
|
||||
impl fmt::Display for SkipIndexType {
|
||||
impl fmt::Display for SkippingIndexType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SkipIndexType::BloomFilter => write!(f, "BLOOM"),
|
||||
SkippingIndexType::BloomFilter => write!(f, "BLOOM"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -587,7 +587,7 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
// Parse index type with default value BloomFilter
|
||||
let index_type = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE) {
|
||||
Some(typ) => match typ.to_ascii_uppercase().as_str() {
|
||||
"BLOOM" => SkipIndexType::BloomFilter,
|
||||
"BLOOM" => SkippingIndexType::BloomFilter,
|
||||
_ => {
|
||||
return error::InvalidSkippingIndexOptionSnafu {
|
||||
msg: format!("Invalid index type: {typ}, expected: 'BLOOM'"),
|
||||
@@ -595,7 +595,7 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
|
||||
.fail();
|
||||
}
|
||||
},
|
||||
None => SkipIndexType::default(),
|
||||
None => SkippingIndexType::default(),
|
||||
};
|
||||
|
||||
Ok(SkippingIndexOptions {
|
||||
|
||||
@@ -45,6 +45,7 @@ get-size2 = "0.1.2"
|
||||
greptime-proto.workspace = true
|
||||
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version
|
||||
# otherwise it is the same with upstream repo
|
||||
http.workspace = true
|
||||
hydroflow = { git = "https://github.com/GreptimeTeam/hydroflow.git", branch = "main" }
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -30,7 +30,7 @@ use common_telemetry::{debug, info, trace};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::value::Value;
|
||||
use greptime_proto::v1;
|
||||
use itertools::Itertools;
|
||||
use itertools::{EitherOrBoth, Itertools};
|
||||
use meta_client::MetaClientOptions;
|
||||
use query::QueryEngine;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -46,17 +46,19 @@ use tokio::sync::{broadcast, watch, Mutex, RwLock};
|
||||
|
||||
pub(crate) use crate::adapter::node_context::FlownodeContext;
|
||||
use crate::adapter::table_source::TableSource;
|
||||
use crate::adapter::util::column_schemas_to_proto;
|
||||
use crate::adapter::util::{
|
||||
relation_desc_to_column_schemas_with_fallback, table_info_value_to_relation_desc,
|
||||
};
|
||||
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||
use crate::compute::ErrCollector;
|
||||
use crate::df_optimizer::sql_to_flow_plan;
|
||||
use crate::error::{
|
||||
EvalSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu, TableNotFoundSnafu,
|
||||
EvalSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu, InvalidQuerySnafu,
|
||||
UnexpectedSnafu,
|
||||
};
|
||||
use crate::expr::{Batch, GlobalId};
|
||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||
use crate::repr::{self, DiffRow, Row, BATCH_SIZE};
|
||||
use crate::expr::Batch;
|
||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
|
||||
|
||||
mod flownode_impl;
|
||||
mod parse_expr;
|
||||
@@ -245,16 +247,26 @@ impl FlowWorkerManager {
|
||||
let (catalog, schema) = (table_name[0].clone(), table_name[1].clone());
|
||||
let ctx = Arc::new(QueryContext::with(&catalog, &schema));
|
||||
|
||||
let (is_ts_placeholder, proto_schema) =
|
||||
self.try_fetch_or_create_table(&table_name).await?;
|
||||
let (is_ts_placeholder, proto_schema) = self
|
||||
.try_fetch_existing_table(&table_name)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Table not found: {}", table_name.join(".")),
|
||||
})?;
|
||||
let schema_len = proto_schema.len();
|
||||
|
||||
let total_rows = reqs.iter().map(|r| r.len()).sum::<usize>();
|
||||
trace!(
|
||||
"Sending {} writeback requests to table {}, reqs total rows={}",
|
||||
reqs.len(),
|
||||
table_name.join("."),
|
||||
reqs.iter().map(|r| r.len()).sum::<usize>()
|
||||
);
|
||||
|
||||
METRIC_FLOW_ROWS
|
||||
.with_label_values(&["out"])
|
||||
.inc_by(total_rows as u64);
|
||||
|
||||
let now = self.tick_manager.tick();
|
||||
for req in reqs {
|
||||
match req {
|
||||
@@ -390,14 +402,12 @@ impl FlowWorkerManager {
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Fetch table info or create table from flow's schema if not exist
|
||||
async fn try_fetch_or_create_table(
|
||||
/// Fetch table schema and primary key from table info source, if table not exist return None
|
||||
async fn fetch_table_pk_schema(
|
||||
&self,
|
||||
table_name: &TableName,
|
||||
) -> Result<(bool, Vec<api::v1::ColumnSchema>), Error> {
|
||||
// TODO(discord9): instead of auto build table from request schema, actually build table
|
||||
// before `create flow` to be able to assign pk and ts etc.
|
||||
let (primary_keys, schema, is_ts_placeholder) = if let Some(table_id) = self
|
||||
) -> Result<Option<(Vec<String>, Option<usize>, Vec<ColumnSchema>)>, Error> {
|
||||
if let Some(table_id) = self
|
||||
.table_info_source
|
||||
.get_table_id_from_name(table_name)
|
||||
.await?
|
||||
@@ -414,97 +424,64 @@ impl FlowWorkerManager {
|
||||
.map(|i| meta.schema.column_schemas[i].name.clone())
|
||||
.collect_vec();
|
||||
let schema = meta.schema.column_schemas;
|
||||
// check if the last column is the auto created timestamp column, hence the table is auto created from
|
||||
// flow's plan type
|
||||
let is_auto_create = {
|
||||
let correct_name = schema
|
||||
.last()
|
||||
.map(|s| s.name == AUTO_CREATED_PLACEHOLDER_TS_COL)
|
||||
.unwrap_or(false);
|
||||
let correct_time_index = meta.schema.timestamp_index == Some(schema.len() - 1);
|
||||
correct_name && correct_time_index
|
||||
};
|
||||
(primary_keys, schema, is_auto_create)
|
||||
let time_index = meta.schema.timestamp_index;
|
||||
Ok(Some((primary_keys, time_index, schema)))
|
||||
} else {
|
||||
// TODO(discord9): condiser remove buggy auto create by schema
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
let node_ctx = self.node_context.read().await;
|
||||
let gid: GlobalId = node_ctx
|
||||
.table_repr
|
||||
.get_by_name(table_name)
|
||||
.map(|x| x.1)
|
||||
.unwrap();
|
||||
let schema = node_ctx
|
||||
.schema
|
||||
.get(&gid)
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: format!("Table name = {:?}", table_name),
|
||||
})?
|
||||
.clone();
|
||||
// TODO(discord9): use default key from schema
|
||||
let primary_keys = schema
|
||||
.typ()
|
||||
.keys
|
||||
.first()
|
||||
.map(|v| {
|
||||
v.column_indices
|
||||
.iter()
|
||||
.map(|i| {
|
||||
schema
|
||||
.get_name(*i)
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("col_{i}"))
|
||||
})
|
||||
.collect_vec()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let update_at = ColumnSchema::new(
|
||||
UPDATE_AT_TS_COL,
|
||||
/// return (primary keys, schema and if the table have a placeholder timestamp column)
|
||||
/// schema of the table comes from flow's output plan
|
||||
///
|
||||
/// adjust to add `update_at` column and ts placeholder if needed
|
||||
async fn adjust_auto_created_table_schema(
|
||||
&self,
|
||||
schema: &RelationDesc,
|
||||
) -> Result<(Vec<String>, Vec<ColumnSchema>, bool), Error> {
|
||||
// TODO(discord9): condiser remove buggy auto create by schema
|
||||
|
||||
// TODO(discord9): use default key from schema
|
||||
let primary_keys = schema
|
||||
.typ()
|
||||
.keys
|
||||
.first()
|
||||
.map(|v| {
|
||||
v.column_indices
|
||||
.iter()
|
||||
.map(|i| {
|
||||
schema
|
||||
.get_name(*i)
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("col_{i}"))
|
||||
})
|
||||
.collect_vec()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let update_at = ColumnSchema::new(
|
||||
UPDATE_AT_TS_COL,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
);
|
||||
|
||||
let original_schema = relation_desc_to_column_schemas_with_fallback(schema);
|
||||
|
||||
let mut with_auto_added_col = original_schema.clone();
|
||||
with_auto_added_col.push(update_at);
|
||||
|
||||
// if no time index, add one as placeholder
|
||||
let no_time_index = schema.typ().time_index.is_none();
|
||||
if no_time_index {
|
||||
let ts_col = ColumnSchema::new(
|
||||
AUTO_CREATED_PLACEHOLDER_TS_COL,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
);
|
||||
)
|
||||
.with_time_index(true);
|
||||
with_auto_added_col.push(ts_col);
|
||||
}
|
||||
|
||||
let original_schema = schema
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(idx, typ)| {
|
||||
let name = schema
|
||||
.names
|
||||
.get(idx)
|
||||
.cloned()
|
||||
.flatten()
|
||||
.unwrap_or(format!("col_{}", idx));
|
||||
let ret = ColumnSchema::new(name, typ.scalar_type, typ.nullable);
|
||||
if schema.typ().time_index == Some(idx) {
|
||||
ret.with_time_index(true)
|
||||
} else {
|
||||
ret
|
||||
}
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let mut with_auto_added_col = original_schema.clone();
|
||||
with_auto_added_col.push(update_at);
|
||||
|
||||
// if no time index, add one as placeholder
|
||||
let no_time_index = schema.typ().time_index.is_none();
|
||||
if no_time_index {
|
||||
let ts_col = ColumnSchema::new(
|
||||
AUTO_CREATED_PLACEHOLDER_TS_COL,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
.with_time_index(true);
|
||||
with_auto_added_col.push(ts_col);
|
||||
}
|
||||
|
||||
(primary_keys, with_auto_added_col, no_time_index)
|
||||
};
|
||||
let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
|
||||
Ok((is_ts_placeholder, proto_schema))
|
||||
Ok((primary_keys, with_auto_added_col, no_time_index))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -807,7 +784,85 @@ impl FlowWorkerManager {
|
||||
let flow_plan = sql_to_flow_plan(&mut node_ctx, &self.query_engine, &sql).await?;
|
||||
|
||||
debug!("Flow {:?}'s Plan is {:?}", flow_id, flow_plan);
|
||||
node_ctx.assign_table_schema(&sink_table_name, flow_plan.schema.clone())?;
|
||||
|
||||
// check schema against actual table schema if exists
|
||||
// if not exist create sink table immediately
|
||||
if let Some((_, _, real_schema)) = self.fetch_table_pk_schema(&sink_table_name).await? {
|
||||
let auto_schema = relation_desc_to_column_schemas_with_fallback(&flow_plan.schema);
|
||||
|
||||
// for column schema, only `data_type` need to be check for equality
|
||||
// since one can omit flow's column name when write flow query
|
||||
// print a user friendly error message about mismatch and how to correct them
|
||||
for (idx, zipped) in auto_schema
|
||||
.iter()
|
||||
.zip_longest(real_schema.iter())
|
||||
.enumerate()
|
||||
{
|
||||
match zipped {
|
||||
EitherOrBoth::Both(auto, real) => {
|
||||
if auto.data_type != real.data_type {
|
||||
InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"Column {}(name is '{}', flow inferred name is '{}')'s data type mismatch, expect {:?} got {:?}",
|
||||
idx,
|
||||
real.name,
|
||||
auto.name,
|
||||
real.data_type,
|
||||
auto.data_type
|
||||
),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
}
|
||||
EitherOrBoth::Right(real) if real.data_type.is_timestamp() => {
|
||||
// if table is auto created, the last one or two column should be timestamp(update at and ts placeholder)
|
||||
continue;
|
||||
}
|
||||
_ => InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"schema length mismatched, expected {} found {}",
|
||||
real_schema.len(),
|
||||
auto_schema.len()
|
||||
),
|
||||
}
|
||||
.fail()?,
|
||||
}
|
||||
}
|
||||
|
||||
let table_id = self
|
||||
.table_info_source
|
||||
.get_table_id_from_name(&sink_table_name)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Can't get table id for table name {:?}", sink_table_name),
|
||||
})?;
|
||||
let table_info_value = self
|
||||
.table_info_source
|
||||
.get_table_info_value(&table_id)
|
||||
.await?
|
||||
.context(UnexpectedSnafu {
|
||||
reason: format!("Can't get table info value for table id {:?}", table_id),
|
||||
})?;
|
||||
let real_schema = table_info_value_to_relation_desc(table_info_value)?;
|
||||
node_ctx.assign_table_schema(&sink_table_name, real_schema.clone())?;
|
||||
} else {
|
||||
// assign inferred schema to sink table
|
||||
// create sink table
|
||||
node_ctx.assign_table_schema(&sink_table_name, flow_plan.schema.clone())?;
|
||||
let did_create = self
|
||||
.create_table_from_relation(
|
||||
&format!("flow-id={flow_id}"),
|
||||
&sink_table_name,
|
||||
&flow_plan.schema,
|
||||
)
|
||||
.await?;
|
||||
if !did_create {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Failed to create table {:?}", sink_table_name),
|
||||
}
|
||||
.fail()?;
|
||||
}
|
||||
}
|
||||
|
||||
let _ = comment;
|
||||
let _ = flow_options;
|
||||
|
||||
@@ -138,7 +138,7 @@ impl Flownode for FlowWorkerManager {
|
||||
}
|
||||
|
||||
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
|
||||
// using try_read makesure two things:
|
||||
// using try_read to ensure two things:
|
||||
// 1. flush wouldn't happen until inserts before it is inserted
|
||||
// 2. inserts happening concurrently with flush wouldn't be block by flush
|
||||
let _flush_lock = self.flush_lock.try_read();
|
||||
|
||||
@@ -331,12 +331,14 @@ impl FlownodeContext {
|
||||
} else {
|
||||
let global_id = self.new_global_id();
|
||||
|
||||
// table id is Some meaning db must have created the table
|
||||
if let Some(table_id) = table_id {
|
||||
let (known_table_name, schema) = srv_map.get_table_name_schema(&table_id).await?;
|
||||
table_name = table_name.or(Some(known_table_name));
|
||||
self.schema.insert(global_id, schema);
|
||||
} // if we don't have table id, it means database havn't assign one yet or we don't need it
|
||||
} // if we don't have table id, it means database haven't assign one yet or we don't need it
|
||||
|
||||
// still update the mapping with new global id
|
||||
self.table_repr.insert(table_name, table_id, global_id);
|
||||
Ok(global_id)
|
||||
}
|
||||
@@ -358,6 +360,7 @@ impl FlownodeContext {
|
||||
})?;
|
||||
|
||||
self.schema.insert(gid, schema);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -20,11 +20,12 @@ use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::adapter::util::table_info_value_to_relation_desc;
|
||||
use crate::adapter::TableName;
|
||||
use crate::error::{
|
||||
Error, ExternalSnafu, TableNotFoundMetaSnafu, TableNotFoundSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::repr::{self, ColumnType, RelationDesc, RelationType};
|
||||
use crate::repr::RelationDesc;
|
||||
|
||||
/// mapping of table name <-> table id should be query from tableinfo manager
|
||||
pub struct TableSource {
|
||||
@@ -61,7 +62,7 @@ impl TableSource {
|
||||
.map(|id| id.table_id())
|
||||
}
|
||||
|
||||
/// If the table havn't been created in database, the tableId returned would be null
|
||||
/// If the table haven't been created in database, the tableId returned would be null
|
||||
pub async fn get_table_id_from_name(&self, name: &TableName) -> Result<Option<TableId>, Error> {
|
||||
let ret = self
|
||||
.table_name_manager
|
||||
@@ -121,38 +122,7 @@ impl TableSource {
|
||||
table_name.table_name,
|
||||
];
|
||||
|
||||
let raw_schema = table_info_value.table_info.meta.schema;
|
||||
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
|
||||
.column_schemas
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|col| {
|
||||
(
|
||||
ColumnType {
|
||||
nullable: col.is_nullable(),
|
||||
scalar_type: col.data_type,
|
||||
},
|
||||
Some(col.name),
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
let key = table_info_value.table_info.meta.primary_key_indices;
|
||||
let keys = vec![repr::Key::from(key)];
|
||||
|
||||
let time_index = raw_schema.timestamp_index;
|
||||
Ok((
|
||||
table_name,
|
||||
RelationDesc {
|
||||
typ: RelationType {
|
||||
column_types,
|
||||
keys,
|
||||
time_index,
|
||||
// by default table schema's column are all non-auto
|
||||
auto_columns: vec![],
|
||||
},
|
||||
names: col_names,
|
||||
},
|
||||
))
|
||||
let desc = table_info_value_to_relation_desc(table_info_value)?;
|
||||
Ok((table_name, desc))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,16 +12,153 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column_def::options_from_column_schema;
|
||||
use api::v1::{ColumnDataType, ColumnDataTypeExtension, SemanticType};
|
||||
use api::v1::{ColumnDataType, ColumnDataTypeExtension, CreateTableExpr, SemanticType};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use operator::expr_factory::CreateExprFactory;
|
||||
use session::context::QueryContextBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{Error, ExternalSnafu};
|
||||
use crate::adapter::{TableName, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||
use crate::FlowWorkerManager;
|
||||
|
||||
impl FlowWorkerManager {
|
||||
/// Create table from given schema(will adjust to add auto column if needed), return true if table is created
|
||||
pub(crate) async fn create_table_from_relation(
|
||||
&self,
|
||||
flow_name: &str,
|
||||
table_name: &TableName,
|
||||
relation_desc: &RelationDesc,
|
||||
) -> Result<bool, Error> {
|
||||
if self.fetch_table_pk_schema(table_name).await?.is_some() {
|
||||
return Ok(false);
|
||||
}
|
||||
let (pks, tys, _) = self.adjust_auto_created_table_schema(relation_desc).await?;
|
||||
|
||||
//create sink table using pks, column types and is_ts_auto
|
||||
|
||||
let proto_schema = column_schemas_to_proto(tys.clone(), &pks)?;
|
||||
|
||||
// create sink table
|
||||
let create_expr = CreateExprFactory {}
|
||||
.create_table_expr_by_column_schemas(
|
||||
&TableReference {
|
||||
catalog: &table_name[0],
|
||||
schema: &table_name[1],
|
||||
table: &table_name[2],
|
||||
},
|
||||
&proto_schema,
|
||||
"mito",
|
||||
Some(&format!("Sink table for flow {}", flow_name)),
|
||||
)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
self.submit_create_sink_table_ddl(create_expr).await?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Try fetch table with adjusted schema(added auto column if needed)
|
||||
pub(crate) async fn try_fetch_existing_table(
|
||||
&self,
|
||||
table_name: &TableName,
|
||||
) -> Result<Option<(bool, Vec<api::v1::ColumnSchema>)>, Error> {
|
||||
if let Some((primary_keys, time_index, schema)) =
|
||||
self.fetch_table_pk_schema(table_name).await?
|
||||
{
|
||||
// check if the last column is the auto created timestamp column, hence the table is auto created from
|
||||
// flow's plan type
|
||||
let is_auto_create = {
|
||||
let correct_name = schema
|
||||
.last()
|
||||
.map(|s| s.name == AUTO_CREATED_PLACEHOLDER_TS_COL)
|
||||
.unwrap_or(false);
|
||||
let correct_time_index = time_index == Some(schema.len() - 1);
|
||||
correct_name && correct_time_index
|
||||
};
|
||||
let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
|
||||
Ok(Some((is_auto_create, proto_schema)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// submit a create table ddl
|
||||
pub(crate) async fn submit_create_sink_table_ddl(
|
||||
&self,
|
||||
mut create_table: CreateTableExpr,
|
||||
) -> Result<(), Error> {
|
||||
let stmt_exec = {
|
||||
self.frontend_invoker
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|f| f.statement_executor())
|
||||
}
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "Failed to get statement executor",
|
||||
})?;
|
||||
let ctx = Arc::new(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog(create_table.catalog_name.clone())
|
||||
.current_schema(create_table.schema_name.clone())
|
||||
.build(),
|
||||
);
|
||||
stmt_exec
|
||||
.create_table_inner(&mut create_table, None, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_info_value_to_relation_desc(
|
||||
table_info_value: TableInfoValue,
|
||||
) -> Result<RelationDesc, Error> {
|
||||
let raw_schema = table_info_value.table_info.meta.schema;
|
||||
let (column_types, col_names): (Vec<_>, Vec<_>) = raw_schema
|
||||
.column_schemas
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|col| {
|
||||
(
|
||||
ColumnType {
|
||||
nullable: col.is_nullable(),
|
||||
scalar_type: col.data_type,
|
||||
},
|
||||
Some(col.name),
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
let key = table_info_value.table_info.meta.primary_key_indices;
|
||||
let keys = vec![crate::repr::Key::from(key)];
|
||||
|
||||
let time_index = raw_schema.timestamp_index;
|
||||
|
||||
Ok(RelationDesc {
|
||||
typ: RelationType {
|
||||
column_types,
|
||||
keys,
|
||||
time_index,
|
||||
// by default table schema's column are all non-auto
|
||||
auto_columns: vec![],
|
||||
},
|
||||
names: col_names,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_proto_to_data_type(
|
||||
column_schema: &api::v1::ColumnSchema,
|
||||
@@ -75,3 +212,29 @@ pub fn column_schemas_to_proto(
|
||||
.collect();
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Convert `RelationDesc` to `ColumnSchema` list,
|
||||
/// if the column name is not present, use `col_{idx}` as the column name
|
||||
pub fn relation_desc_to_column_schemas_with_fallback(schema: &RelationDesc) -> Vec<ColumnSchema> {
|
||||
schema
|
||||
.typ()
|
||||
.column_types
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(idx, typ)| {
|
||||
let name = schema
|
||||
.names
|
||||
.get(idx)
|
||||
.cloned()
|
||||
.flatten()
|
||||
.unwrap_or(format!("col_{}", idx));
|
||||
let ret = ColumnSchema::new(name, typ.scalar_type, typ.nullable);
|
||||
if schema.typ().time_index == Some(idx) {
|
||||
ret.with_time_index(true)
|
||||
} else {
|
||||
ret
|
||||
}
|
||||
})
|
||||
.collect_vec()
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::new_null_array;
|
||||
use common_telemetry::trace;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::DataType;
|
||||
@@ -398,20 +399,54 @@ fn reduce_batch_subgraph(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: here reduce numbers of eq to minimal by keeping slicing key/val batch
|
||||
let key_data_types = output_type
|
||||
.column_types
|
||||
.iter()
|
||||
.map(|t| t.scalar_type.clone())
|
||||
.collect_vec();
|
||||
|
||||
// TODO(discord9): here reduce numbers of eq to minimal by keeping slicing key/val batch
|
||||
for key_row in distinct_keys {
|
||||
let key_scalar_value = {
|
||||
let mut key_scalar_value = Vec::with_capacity(key_row.len());
|
||||
for key in key_row.iter() {
|
||||
for (key_idx, key) in key_row.iter().enumerate() {
|
||||
let v =
|
||||
key.try_to_scalar_value(&key.data_type())
|
||||
.context(DataTypeSnafu {
|
||||
msg: "can't convert key values to datafusion value",
|
||||
})?;
|
||||
let arrow_value =
|
||||
|
||||
let key_data_type = key_data_types.get(key_idx).context(InternalSnafu {
|
||||
reason: format!(
|
||||
"Key index out of bound, expected at most {} but got {}",
|
||||
output_type.column_types.len(),
|
||||
key_idx
|
||||
),
|
||||
})?;
|
||||
|
||||
// if incoming value's datatype is null, it need to be handled specially, see below
|
||||
if key_data_type.as_arrow_type() != v.data_type()
|
||||
&& !v.data_type().is_null()
|
||||
{
|
||||
crate::expr::error::InternalSnafu {
|
||||
reason: format!(
|
||||
"Key data type mismatch, expected {:?} but got {:?}",
|
||||
key_data_type.as_arrow_type(),
|
||||
v.data_type()
|
||||
),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
|
||||
// handle single null key
|
||||
let arrow_value = if v.data_type().is_null() {
|
||||
let ret = new_null_array(&arrow::datatypes::DataType::Null, 1);
|
||||
arrow::array::Scalar::new(ret)
|
||||
} else {
|
||||
v.to_scalar().context(crate::expr::error::DatafusionSnafu {
|
||||
context: "can't convert key values to arrow value",
|
||||
})?;
|
||||
})?
|
||||
};
|
||||
key_scalar_value.push(arrow_value);
|
||||
}
|
||||
key_scalar_value
|
||||
@@ -423,7 +458,19 @@ fn reduce_batch_subgraph(
|
||||
.zip(key_batch.batch().iter())
|
||||
.map(|(key, col)| {
|
||||
// TODO(discord9): this takes half of the cpu! And this is redundant amount of `eq`!
|
||||
arrow::compute::kernels::cmp::eq(&key, &col.to_arrow_array().as_ref() as _)
|
||||
|
||||
// note that if lhs is a null, we still need to get all rows that are null! But can't use `eq` since
|
||||
// it will return null if input have null, so we need to use `is_null` instead
|
||||
if arrow::array::Datum::get(&key).0.data_type().is_null() {
|
||||
arrow::compute::kernels::boolean::is_null(
|
||||
col.to_arrow_array().as_ref() as _
|
||||
)
|
||||
} else {
|
||||
arrow::compute::kernels::cmp::eq(
|
||||
&key,
|
||||
&col.to_arrow_array().as_ref() as _,
|
||||
)
|
||||
}
|
||||
})
|
||||
.try_collect::<_, Vec<_>, _>()
|
||||
.context(ArrowSnafu {
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::collections::{BTreeMap, VecDeque};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use hydroflow::scheduled::graph::Hydroflow;
|
||||
use hydroflow::scheduled::handoff::TeeingHandoff;
|
||||
use hydroflow::scheduled::port::RecvPort;
|
||||
@@ -25,6 +26,7 @@ use itertools::Itertools;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::expr::{Batch, EvalError, ScalarExpr};
|
||||
use crate::metrics::METRIC_FLOW_ERRORS;
|
||||
use crate::repr::DiffRow;
|
||||
use crate::utils::ArrangeHandler;
|
||||
|
||||
@@ -185,6 +187,9 @@ impl ErrCollector {
|
||||
}
|
||||
|
||||
pub fn push_err(&self, err: EvalError) {
|
||||
METRIC_FLOW_ERRORS
|
||||
.with_label_values(&[err.status_code().as_ref()])
|
||||
.inc();
|
||||
self.inner.blocking_lock().push_back(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -492,7 +492,7 @@ impl ScalarUDFImpl for TumbleExpand {
|
||||
if let Some(start_time) = opt{
|
||||
if !matches!(start_time, Utf8 | Date32 | Date64 | Timestamp(_, _)){
|
||||
return Err(DataFusionError::Plan(
|
||||
format!("Expect start_time to either be date, timestampe or string, found {:?}", start_time)
|
||||
format!("Expect start_time to either be date, timestamp or string, found {:?}", start_time)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,12 +16,13 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::define_into_tonic_status;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::{define_into_tonic_status, from_err_code_msg_to_header};
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_telemetry::common_error::ext::ErrorExt;
|
||||
use common_telemetry::common_error::status_code::StatusCode;
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::metadata::MetadataMap;
|
||||
|
||||
use crate::adapter::FlowId;
|
||||
use crate::expr::EvalError;
|
||||
@@ -186,6 +187,20 @@ pub enum Error {
|
||||
},
|
||||
}
|
||||
|
||||
/// the outer message is the full error stack, and inner message in header is the last error message that can be show directly to user
|
||||
pub fn to_status_with_last_err(err: impl ErrorExt) -> tonic::Status {
|
||||
let msg = err.to_string();
|
||||
let last_err_msg = common_error::ext::StackError::last(&err).to_string();
|
||||
let code = err.status_code() as u32;
|
||||
let header = from_err_code_msg_to_header(code, &last_err_msg);
|
||||
|
||||
tonic::Status::with_metadata(
|
||||
tonic::Code::InvalidArgument,
|
||||
msg,
|
||||
MetadataMap::from_headers(header),
|
||||
)
|
||||
}
|
||||
|
||||
/// Result type for flow module
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -200,9 +215,8 @@ impl ErrorExt for Error {
|
||||
| Self::TableNotFoundMeta { .. }
|
||||
| Self::FlowNotFound { .. }
|
||||
| Self::ListFlows { .. } => StatusCode::TableNotFound,
|
||||
Self::InvalidQuery { .. } | Self::Plan { .. } | Self::Datatypes { .. } => {
|
||||
StatusCode::PlanQuery
|
||||
}
|
||||
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
|
||||
Self::InvalidQuery { .. } => StatusCode::EngineExecuteQuery,
|
||||
Self::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
|
||||
StatusCode::Unsupported
|
||||
|
||||
@@ -14,8 +14,11 @@
|
||||
|
||||
//! Error handling for expression evaluation.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
@@ -126,3 +129,29 @@ pub enum EvalError {
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for EvalError {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use EvalError::*;
|
||||
match self {
|
||||
DivisionByZero { .. }
|
||||
| TypeMismatch { .. }
|
||||
| TryFromValue { .. }
|
||||
| DataAlreadyExpired { .. }
|
||||
| InvalidArgument { .. }
|
||||
| Overflow { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
CastValue { source, .. } | DataType { source, .. } => source.status_code(),
|
||||
|
||||
Internal { .. }
|
||||
| Optimize { .. }
|
||||
| Arrow { .. }
|
||||
| Datafusion { .. }
|
||||
| External { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,4 +30,22 @@ lazy_static! {
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
||||
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
||||
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_flow_processed_rows",
|
||||
"Count of rows flowing through the system",
|
||||
&["direction"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_PROCESSING_TIME: HistogramVec = register_histogram_vec!(
|
||||
"greptime_flow_processing_time",
|
||||
"Time spent processing requests",
|
||||
&["type"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_ERRORS: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_flow_errors",
|
||||
"Count of errors in flow processing",
|
||||
&["code"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ pub const BROADCAST_CAP: usize = 1024;
|
||||
/// The maximum capacity of the send buffer, to prevent the buffer from growing too large
|
||||
pub const SEND_BUF_CAP: usize = BROADCAST_CAP * 2;
|
||||
|
||||
/// Flow worker will try to at least accumulate this many rows before processing them(if one second havn't passed)
|
||||
/// Flow worker will try to at least accumulate this many rows before processing them(if one second haven't passed)
|
||||
pub const BATCH_SIZE: usize = 32 * 16384;
|
||||
|
||||
/// Convert a value that is or can be converted to Datetime to internal timestamp
|
||||
|
||||
@@ -212,6 +212,8 @@ impl RelationType {
|
||||
for key in &mut self.keys {
|
||||
key.remove_col(time_index.unwrap_or(usize::MAX));
|
||||
}
|
||||
// remove empty keys
|
||||
self.keys.retain(|key| !key.is_empty());
|
||||
self
|
||||
}
|
||||
|
||||
|
||||
@@ -50,10 +50,11 @@ use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::adapter::{CreateFlowArgs, FlowWorkerManagerRef};
|
||||
use crate::error::{
|
||||
CacheRequiredSnafu, ExternalSnafu, FlowNotFoundSnafu, ListFlowsSnafu, ParseAddrSnafu,
|
||||
ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
|
||||
to_status_with_last_err, CacheRequiredSnafu, ExternalSnafu, FlowNotFoundSnafu, ListFlowsSnafu,
|
||||
ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
||||
use crate::transform::register_function_to_query_engine;
|
||||
use crate::utils::{SizeReportSender, StateReportHandler};
|
||||
use crate::{Error, FlowWorkerManager, FlownodeOptions};
|
||||
@@ -77,41 +78,52 @@ impl flow_server::Flow for FlowService {
|
||||
&self,
|
||||
request: Request<FlowRequest>,
|
||||
) -> Result<Response<FlowResponse>, Status> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["ddl"])
|
||||
.start_timer();
|
||||
|
||||
let request = request.into_inner();
|
||||
self.manager
|
||||
.handle(request)
|
||||
.await
|
||||
.map(Response::new)
|
||||
.map_err(|e| {
|
||||
let msg = format!("failed to handle request: {:?}", e);
|
||||
Status::internal(msg)
|
||||
})
|
||||
.map_err(to_status_with_last_err)
|
||||
}
|
||||
|
||||
async fn handle_mirror_request(
|
||||
&self,
|
||||
request: Request<InsertRequests>,
|
||||
) -> Result<Response<FlowResponse>, Status> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["insert"])
|
||||
.start_timer();
|
||||
|
||||
let request = request.into_inner();
|
||||
// TODO(discord9): fix protobuf import order shenanigans to remove this duplicated define
|
||||
let mut row_count = 0;
|
||||
let request = api::v1::region::InsertRequests {
|
||||
requests: request
|
||||
.requests
|
||||
.into_iter()
|
||||
.map(|insert| api::v1::region::InsertRequest {
|
||||
region_id: insert.region_id,
|
||||
rows: insert.rows,
|
||||
.map(|insert| {
|
||||
insert.rows.as_ref().inspect(|x| row_count += x.rows.len());
|
||||
api::v1::region::InsertRequest {
|
||||
region_id: insert.region_id,
|
||||
rows: insert.rows,
|
||||
}
|
||||
})
|
||||
.collect_vec(),
|
||||
};
|
||||
|
||||
METRIC_FLOW_ROWS
|
||||
.with_label_values(&["in"])
|
||||
.inc_by(row_count as u64);
|
||||
|
||||
self.manager
|
||||
.handle_inserts(request)
|
||||
.await
|
||||
.map(Response::new)
|
||||
.map_err(|e| {
|
||||
let msg = format!("failed to handle request: {:?}", e);
|
||||
Status::internal(msg)
|
||||
})
|
||||
.map_err(to_status_with_last_err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -500,6 +512,10 @@ impl FrontendInvoker {
|
||||
requests: RowInsertRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> common_frontend::error::Result<Output> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["output_insert"])
|
||||
.start_timer();
|
||||
|
||||
self.inserter
|
||||
.handle_row_inserts(requests, ctx, &self.statement_executor)
|
||||
.await
|
||||
@@ -512,10 +528,18 @@ impl FrontendInvoker {
|
||||
requests: RowDeleteRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> common_frontend::error::Result<Output> {
|
||||
let _timer = METRIC_FLOW_PROCESSING_TIME
|
||||
.with_label_values(&["output_delete"])
|
||||
.start_timer();
|
||||
|
||||
self.deleter
|
||||
.handle_row_deletes(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_frontend::error::ExternalSnafu)
|
||||
}
|
||||
|
||||
pub fn statement_executor(&self) -> Arc<StatementExecutor> {
|
||||
self.statement_executor.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,6 +216,7 @@ impl KeyValPlan {
|
||||
|
||||
/// find out the column that should be time index in group exprs(which is all columns that should be keys)
|
||||
/// TODO(discord9): better ways to assign time index
|
||||
/// for now, it will found the first column that is timestamp or has a tumble window floor function
|
||||
fn find_time_index_in_group_exprs(group_exprs: &[TypedExpr]) -> Option<usize> {
|
||||
group_exprs.iter().position(|expr| {
|
||||
matches!(
|
||||
@@ -224,7 +225,7 @@ fn find_time_index_in_group_exprs(group_exprs: &[TypedExpr]) -> Option<usize> {
|
||||
func: UnaryFunc::TumbleWindowFloor { .. },
|
||||
expr: _
|
||||
}
|
||||
)
|
||||
) || expr.typ.scalar_type.is_timestamp()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1482,7 +1483,7 @@ mod test {
|
||||
ColumnType::new(CDT::float64_datatype(), true),
|
||||
ColumnType::new(CDT::timestamp_millisecond_datatype(), true),
|
||||
])
|
||||
.with_key(vec![1])
|
||||
.with_time_index(Some(1))
|
||||
.into_named(vec![
|
||||
Some(
|
||||
"MAX(numbers_with_ts.number) - MIN(numbers_with_ts.number) / Float64(30)"
|
||||
@@ -1571,7 +1572,7 @@ mod test {
|
||||
ColumnType::new(ConcreteDataType::uint32_datatype(), true), // max
|
||||
ColumnType::new(ConcreteDataType::uint32_datatype(), true), // min
|
||||
])
|
||||
.with_key(vec![0])
|
||||
.with_time_index(Some(0))
|
||||
.into_unnamed(),
|
||||
),
|
||||
),
|
||||
|
||||
@@ -41,6 +41,7 @@ datafusion-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
lazy_static.workspace = true
|
||||
log-query.workspace = true
|
||||
log-store.workspace = true
|
||||
meta-client.workspace = true
|
||||
opentelemetry-proto.workspace = true
|
||||
|
||||
@@ -321,6 +321,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("In-flight write bytes exceeded the maximum limit"))]
|
||||
InFlightWriteBytesExceeded {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -392,6 +398,8 @@ impl ErrorExt for Error {
|
||||
Error::StartScriptManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::TableOperation { source, .. } => source.status_code(),
|
||||
|
||||
Error::InFlightWriteBytesExceeded { .. } => StatusCode::RateLimited,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::config::Configurable;
|
||||
use common_options::datanode::DatanodeClientOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
@@ -46,6 +47,7 @@ pub struct FrontendOptions {
|
||||
pub user_provider: Option<String>,
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
pub tracing: TracingOptions,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
impl Default for FrontendOptions {
|
||||
@@ -68,6 +70,7 @@ impl Default for FrontendOptions {
|
||||
user_provider: None,
|
||||
export_metrics: ExportMetricsOption::default(),
|
||||
tracing: TracingOptions::default(),
|
||||
max_in_flight_write_bytes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ pub mod builder;
|
||||
mod grpc;
|
||||
mod influxdb;
|
||||
mod log_handler;
|
||||
mod logs;
|
||||
mod opentsdb;
|
||||
mod otlp;
|
||||
mod prom_store;
|
||||
@@ -64,8 +65,8 @@ use servers::prometheus_handler::PrometheusHandler;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::{
|
||||
InfluxdbLineProtocolHandler, OpenTelemetryProtocolHandler, OpentsdbProtocolHandler,
|
||||
PipelineHandler, PromStoreProtocolHandler, ScriptHandler,
|
||||
InfluxdbLineProtocolHandler, LogQueryHandler, OpenTelemetryProtocolHandler,
|
||||
OpentsdbProtocolHandler, PipelineHandler, PromStoreProtocolHandler, ScriptHandler,
|
||||
};
|
||||
use servers::server::ServerHandlers;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -86,6 +87,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::limiter::LimiterRef;
|
||||
use crate::script::ScriptExecutor;
|
||||
|
||||
#[async_trait]
|
||||
@@ -99,6 +101,7 @@ pub trait FrontendInstance:
|
||||
+ ScriptHandler
|
||||
+ PrometheusHandler
|
||||
+ PipelineHandler
|
||||
+ LogQueryHandler
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
@@ -124,6 +127,7 @@ pub struct Instance {
|
||||
export_metrics_task: Option<ExportMetricsTask>,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
stats: StatementStatistics,
|
||||
limiter: Option<LimiterRef>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
|
||||
@@ -43,6 +43,7 @@ use crate::frontend::FrontendOptions;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::instance::region_query::FrontendRegionQueryHandler;
|
||||
use crate::instance::Instance;
|
||||
use crate::limiter::Limiter;
|
||||
use crate::script::ScriptExecutor;
|
||||
|
||||
/// The frontend [`Instance`] builder.
|
||||
@@ -196,6 +197,14 @@ impl FrontendBuilder {
|
||||
|
||||
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
|
||||
|
||||
// Create the limiter if the max_in_flight_write_bytes is set.
|
||||
let limiter = self
|
||||
.options
|
||||
.max_in_flight_write_bytes
|
||||
.map(|max_in_flight_write_bytes| {
|
||||
Arc::new(Limiter::new(max_in_flight_write_bytes.as_bytes()))
|
||||
});
|
||||
|
||||
Ok(Instance {
|
||||
options: self.options,
|
||||
catalog_manager: self.catalog_manager,
|
||||
@@ -211,6 +220,7 @@ impl FrontendBuilder {
|
||||
export_metrics_task: None,
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(kv_backend)),
|
||||
stats: self.stats,
|
||||
limiter,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::error::{
|
||||
Error, IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, Result,
|
||||
TableOperationSnafu,
|
||||
Error, InFlightWriteBytesExceededSnafu, IncompleteGrpcRequestSnafu, NotSupportedSnafu,
|
||||
PermissionSnafu, Result, TableOperationSnafu,
|
||||
};
|
||||
use crate::instance::{attach_timer, Instance};
|
||||
use crate::metrics::{GRPC_HANDLE_PROMQL_ELAPSED, GRPC_HANDLE_SQL_ELAPSED};
|
||||
@@ -50,6 +50,16 @@ impl GrpcQueryHandler for Instance {
|
||||
.check_permission(ctx.current_user(), PermissionReq::GrpcRequest(&request))
|
||||
.context(PermissionSnafu)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_request(&request);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = match request {
|
||||
Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
|
||||
Request::RowInserts(requests) => self.handle_row_inserts(requests, ctx.clone()).await?,
|
||||
|
||||
@@ -16,7 +16,7 @@ use async_trait::async_trait;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use client::Output;
|
||||
use common_error::ext::BoxedError;
|
||||
use servers::error::{AuthSnafu, Error};
|
||||
use servers::error::{AuthSnafu, Error, InFlightWriteBytesExceededSnafu};
|
||||
use servers::influxdb::InfluxdbRequest;
|
||||
use servers::interceptor::{LineProtocolInterceptor, LineProtocolInterceptorRef};
|
||||
use servers::query_handler::InfluxdbLineProtocolHandler;
|
||||
@@ -46,6 +46,16 @@ impl InfluxdbLineProtocolHandler for Instance {
|
||||
.post_lines_conversion(requests, ctx.clone())
|
||||
.await?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_influx_row_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
|
||||
@@ -22,7 +22,8 @@ use common_error::ext::BoxedError;
|
||||
use pipeline::pipeline_operator::PipelineOperator;
|
||||
use pipeline::{GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion};
|
||||
use servers::error::{
|
||||
AuthSnafu, Error as ServerError, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult,
|
||||
AuthSnafu, Error as ServerError, ExecuteGrpcRequestSnafu, InFlightWriteBytesExceededSnafu,
|
||||
PipelineSnafu, Result as ServerResult,
|
||||
};
|
||||
use servers::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef};
|
||||
use servers::query_handler::PipelineHandler;
|
||||
@@ -110,6 +111,16 @@ impl Instance {
|
||||
log: RowInsertRequests,
|
||||
ctx: QueryContextRef,
|
||||
) -> ServerResult<Output> {
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&log);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.inserter
|
||||
.handle_log_inserts(log, ctx, self.statement_executor.as_ref())
|
||||
.await
|
||||
|
||||
67
src/frontend/src/instance/logs.rs
Normal file
67
src/frontend/src/instance/logs.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use client::Output;
|
||||
use common_error::ext::BoxedError;
|
||||
use log_query::LogQuery;
|
||||
use server_error::Result as ServerResult;
|
||||
use servers::error::{self as server_error, AuthSnafu, ExecuteQuerySnafu};
|
||||
use servers::interceptor::{LogQueryInterceptor, LogQueryInterceptorRef};
|
||||
use servers::query_handler::LogQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
use tonic::async_trait;
|
||||
|
||||
use super::Instance;
|
||||
|
||||
#[async_trait]
|
||||
impl LogQueryHandler for Instance {
|
||||
async fn query(&self, mut request: LogQuery, ctx: QueryContextRef) -> ServerResult<Output> {
|
||||
let interceptor = self
|
||||
.plugins
|
||||
.get::<LogQueryInterceptorRef<server_error::Error>>();
|
||||
|
||||
self.plugins
|
||||
.get::<PermissionCheckerRef>()
|
||||
.as_ref()
|
||||
.check_permission(ctx.current_user(), PermissionReq::LogQuery)
|
||||
.context(AuthSnafu)?;
|
||||
|
||||
interceptor.as_ref().pre_query(&request, ctx.clone())?;
|
||||
|
||||
request
|
||||
.time_filter
|
||||
.canonicalize()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteQuerySnafu)?;
|
||||
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan_logs_query(request, ctx.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteQuerySnafu)?;
|
||||
|
||||
let output = self
|
||||
.statement_executor
|
||||
.exec_plan(plan, ctx.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteQuerySnafu)?;
|
||||
|
||||
Ok(interceptor.as_ref().post_query(output, ctx.clone())?)
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::tracing;
|
||||
use servers::error as server_error;
|
||||
use servers::error::AuthSnafu;
|
||||
use servers::error::{AuthSnafu, InFlightWriteBytesExceededSnafu};
|
||||
use servers::opentsdb::codec::DataPoint;
|
||||
use servers::opentsdb::data_point_to_grpc_row_insert_requests;
|
||||
use servers::query_handler::OpentsdbProtocolHandler;
|
||||
@@ -41,6 +41,17 @@ impl OpentsdbProtocolHandler for Instance {
|
||||
.context(AuthSnafu)?;
|
||||
|
||||
let (requests, _) = data_point_to_grpc_row_insert_requests(data_points)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = self
|
||||
.handle_row_inserts(requests, ctx)
|
||||
.await
|
||||
|
||||
@@ -21,7 +21,7 @@ use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
|
||||
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
|
||||
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
|
||||
use pipeline::PipelineWay;
|
||||
use servers::error::{self, AuthSnafu, Result as ServerResult};
|
||||
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
|
||||
use servers::interceptor::{OpenTelemetryProtocolInterceptor, OpenTelemetryProtocolInterceptorRef};
|
||||
use servers::otlp;
|
||||
use servers::query_handler::OpenTelemetryProtocolHandler;
|
||||
@@ -53,6 +53,16 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
let (requests, rows) = otlp::metrics::to_grpc_insert_requests(request)?;
|
||||
OTLP_METRICS_ROWS.inc_by(rows as u64);
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_row_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
@@ -83,6 +93,16 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
|
||||
OTLP_TRACES_ROWS.inc_by(rows as u64);
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_log_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
@@ -109,6 +129,17 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
interceptor_ref.pre_execute(ctx.clone())?;
|
||||
|
||||
let (requests, rows) = otlp::logs::to_grpc_insert_requests(request, pipeline, table_name)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.handle_log_inserts(requests, ctx)
|
||||
.await
|
||||
.inspect(|_| OTLP_LOGS_ROWS.inc_by(rows as u64))
|
||||
|
||||
@@ -30,7 +30,7 @@ use common_telemetry::{debug, tracing};
|
||||
use operator::insert::InserterRef;
|
||||
use operator::statement::StatementExecutor;
|
||||
use prost::Message;
|
||||
use servers::error::{self, AuthSnafu, Result as ServerResult};
|
||||
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
|
||||
use servers::http::header::{collect_plan_metrics, CONTENT_ENCODING_SNAPPY, CONTENT_TYPE_PROTOBUF};
|
||||
use servers::http::prom_store::PHYSICAL_TABLE_PARAM;
|
||||
use servers::interceptor::{PromStoreProtocolInterceptor, PromStoreProtocolInterceptorRef};
|
||||
@@ -175,6 +175,16 @@ impl PromStoreProtocolHandler for Instance {
|
||||
.get::<PromStoreProtocolInterceptorRef<servers::error::Error>>();
|
||||
interceptor_ref.pre_write(&request, ctx.clone())?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&request);
|
||||
if result.is_none() {
|
||||
return InFlightWriteBytesExceededSnafu.fail();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = if with_metric_engine {
|
||||
let physical_table = ctx
|
||||
.extension(PHYSICAL_TABLE_PARAM)
|
||||
|
||||
@@ -18,6 +18,7 @@ pub mod error;
|
||||
pub mod frontend;
|
||||
pub mod heartbeat;
|
||||
pub mod instance;
|
||||
pub(crate) mod limiter;
|
||||
pub(crate) mod metrics;
|
||||
mod script;
|
||||
pub mod server;
|
||||
|
||||
291
src/frontend/src/limiter.rs
Normal file
291
src/frontend/src/limiter.rs
Normal file
@@ -0,0 +1,291 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::column::Values;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{Decimal128, InsertRequests, IntervalMonthDayNano, RowInsertRequests};
|
||||
use common_telemetry::{debug, warn};
|
||||
|
||||
pub(crate) type LimiterRef = Arc<Limiter>;
|
||||
|
||||
/// A frontend request limiter that controls the total size of in-flight write requests.
|
||||
pub(crate) struct Limiter {
|
||||
// The maximum number of bytes that can be in flight.
|
||||
max_in_flight_write_bytes: u64,
|
||||
|
||||
// The current in-flight write bytes.
|
||||
in_flight_write_bytes: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
/// A counter for the in-flight write bytes.
|
||||
pub(crate) struct InFlightWriteBytesCounter {
|
||||
// The current in-flight write bytes.
|
||||
in_flight_write_bytes: Arc<AtomicU64>,
|
||||
|
||||
// The write bytes that are being processed.
|
||||
processing_write_bytes: u64,
|
||||
}
|
||||
|
||||
impl InFlightWriteBytesCounter {
|
||||
/// Creates a new InFlightWriteBytesCounter. It will decrease the in-flight write bytes when dropped.
|
||||
pub fn new(in_flight_write_bytes: Arc<AtomicU64>, processing_write_bytes: u64) -> Self {
|
||||
debug!(
|
||||
"processing write bytes: {}, current in-flight write bytes: {}",
|
||||
processing_write_bytes,
|
||||
in_flight_write_bytes.load(Ordering::Relaxed)
|
||||
);
|
||||
Self {
|
||||
in_flight_write_bytes,
|
||||
processing_write_bytes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for InFlightWriteBytesCounter {
|
||||
// When the request is finished, the in-flight write bytes should be decreased.
|
||||
fn drop(&mut self) {
|
||||
self.in_flight_write_bytes
|
||||
.fetch_sub(self.processing_write_bytes, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl Limiter {
|
||||
pub fn new(max_in_flight_write_bytes: u64) -> Self {
|
||||
Self {
|
||||
max_in_flight_write_bytes,
|
||||
in_flight_write_bytes: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limit_request(&self, request: &Request) -> Option<InFlightWriteBytesCounter> {
|
||||
let size = match request {
|
||||
Request::Inserts(requests) => self.insert_requests_data_size(requests),
|
||||
Request::RowInserts(requests) => self.rows_insert_requests_data_size(requests),
|
||||
_ => 0,
|
||||
};
|
||||
self.limit_in_flight_write_bytes(size as u64)
|
||||
}
|
||||
|
||||
pub fn limit_row_inserts(
|
||||
&self,
|
||||
requests: &RowInsertRequests,
|
||||
) -> Option<InFlightWriteBytesCounter> {
|
||||
let size = self.rows_insert_requests_data_size(requests);
|
||||
self.limit_in_flight_write_bytes(size as u64)
|
||||
}
|
||||
|
||||
/// Returns None if the in-flight write bytes exceed the maximum limit.
|
||||
/// Otherwise, returns Some(InFlightWriteBytesCounter) and the in-flight write bytes will be increased.
|
||||
pub fn limit_in_flight_write_bytes(&self, bytes: u64) -> Option<InFlightWriteBytesCounter> {
|
||||
let result = self.in_flight_write_bytes.fetch_update(
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
|current| {
|
||||
if current + bytes > self.max_in_flight_write_bytes {
|
||||
warn!(
|
||||
"in-flight write bytes exceed the maximum limit {}, request with {} bytes will be limited",
|
||||
self.max_in_flight_write_bytes,
|
||||
bytes
|
||||
);
|
||||
return None;
|
||||
}
|
||||
Some(current + bytes)
|
||||
},
|
||||
);
|
||||
|
||||
match result {
|
||||
// Update the in-flight write bytes successfully.
|
||||
Ok(_) => Some(InFlightWriteBytesCounter::new(
|
||||
self.in_flight_write_bytes.clone(),
|
||||
bytes,
|
||||
)),
|
||||
// It means the in-flight write bytes exceed the maximum limit.
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current in-flight write bytes.
|
||||
#[allow(dead_code)]
|
||||
pub fn in_flight_write_bytes(&self) -> u64 {
|
||||
self.in_flight_write_bytes.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn insert_requests_data_size(&self, request: &InsertRequests) -> usize {
|
||||
let mut size: usize = 0;
|
||||
for insert in &request.inserts {
|
||||
for column in &insert.columns {
|
||||
if let Some(values) = &column.values {
|
||||
size += self.size_of_column_values(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
size
|
||||
}
|
||||
|
||||
fn rows_insert_requests_data_size(&self, request: &RowInsertRequests) -> usize {
|
||||
let mut size: usize = 0;
|
||||
for insert in &request.inserts {
|
||||
if let Some(rows) = &insert.rows {
|
||||
for row in &rows.rows {
|
||||
for value in &row.values {
|
||||
if let Some(value) = &value.value_data {
|
||||
size += self.size_of_value_data(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
size
|
||||
}
|
||||
|
||||
fn size_of_column_values(&self, values: &Values) -> usize {
|
||||
let mut size: usize = 0;
|
||||
size += values.i8_values.len() * size_of::<i32>();
|
||||
size += values.i16_values.len() * size_of::<i32>();
|
||||
size += values.i32_values.len() * size_of::<i32>();
|
||||
size += values.i64_values.len() * size_of::<i64>();
|
||||
size += values.u8_values.len() * size_of::<u32>();
|
||||
size += values.u16_values.len() * size_of::<u32>();
|
||||
size += values.u32_values.len() * size_of::<u32>();
|
||||
size += values.u64_values.len() * size_of::<u64>();
|
||||
size += values.f32_values.len() * size_of::<f32>();
|
||||
size += values.f64_values.len() * size_of::<f64>();
|
||||
size += values.bool_values.len() * size_of::<bool>();
|
||||
size += values
|
||||
.binary_values
|
||||
.iter()
|
||||
.map(|v| v.len() * size_of::<u8>())
|
||||
.sum::<usize>();
|
||||
size += values.string_values.iter().map(|v| v.len()).sum::<usize>();
|
||||
size += values.date_values.len() * size_of::<i32>();
|
||||
size += values.datetime_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_second_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_millisecond_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_microsecond_values.len() * size_of::<i64>();
|
||||
size += values.timestamp_nanosecond_values.len() * size_of::<i64>();
|
||||
size += values.time_second_values.len() * size_of::<i64>();
|
||||
size += values.time_millisecond_values.len() * size_of::<i64>();
|
||||
size += values.time_microsecond_values.len() * size_of::<i64>();
|
||||
size += values.time_nanosecond_values.len() * size_of::<i64>();
|
||||
size += values.interval_year_month_values.len() * size_of::<i64>();
|
||||
size += values.interval_day_time_values.len() * size_of::<i64>();
|
||||
size += values.interval_month_day_nano_values.len() * size_of::<IntervalMonthDayNano>();
|
||||
size += values.decimal128_values.len() * size_of::<Decimal128>();
|
||||
size
|
||||
}
|
||||
|
||||
fn size_of_value_data(&self, value: &ValueData) -> usize {
|
||||
match value {
|
||||
ValueData::I8Value(_) => size_of::<i32>(),
|
||||
ValueData::I16Value(_) => size_of::<i32>(),
|
||||
ValueData::I32Value(_) => size_of::<i32>(),
|
||||
ValueData::I64Value(_) => size_of::<i64>(),
|
||||
ValueData::U8Value(_) => size_of::<u32>(),
|
||||
ValueData::U16Value(_) => size_of::<u32>(),
|
||||
ValueData::U32Value(_) => size_of::<u32>(),
|
||||
ValueData::U64Value(_) => size_of::<u64>(),
|
||||
ValueData::F32Value(_) => size_of::<f32>(),
|
||||
ValueData::F64Value(_) => size_of::<f64>(),
|
||||
ValueData::BoolValue(_) => size_of::<bool>(),
|
||||
ValueData::BinaryValue(v) => v.len() * size_of::<u8>(),
|
||||
ValueData::StringValue(v) => v.len(),
|
||||
ValueData::DateValue(_) => size_of::<i32>(),
|
||||
ValueData::DatetimeValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampSecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampMillisecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampMicrosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimestampNanosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeSecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeMillisecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeMicrosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::TimeNanosecondValue(_) => size_of::<i64>(),
|
||||
ValueData::IntervalYearMonthValue(_) => size_of::<i32>(),
|
||||
ValueData::IntervalDayTimeValue(_) => size_of::<i64>(),
|
||||
ValueData::IntervalMonthDayNanoValue(_) => size_of::<IntervalMonthDayNano>(),
|
||||
ValueData::Decimal128Value(_) => size_of::<Decimal128>(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::column::Values;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{Column, InsertRequest};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn generate_request(size: usize) -> Request {
|
||||
let i8_values = vec![0; size / 4];
|
||||
Request::Inserts(InsertRequests {
|
||||
inserts: vec![InsertRequest {
|
||||
columns: vec![Column {
|
||||
values: Some(Values {
|
||||
i8_values,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
}],
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_limiter() {
|
||||
let limiter_ref: LimiterRef = Arc::new(Limiter::new(1024));
|
||||
let tasks_count = 10;
|
||||
let request_data_size = 100;
|
||||
let mut handles = vec![];
|
||||
|
||||
// Generate multiple requests to test the limiter.
|
||||
for _ in 0..tasks_count {
|
||||
let limiter = limiter_ref.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let result = limiter.limit_request(&generate_request(request_data_size));
|
||||
assert!(result.is_some());
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all threads to complete.
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_in_flight_write_bytes() {
|
||||
let limiter_ref: LimiterRef = Arc::new(Limiter::new(1024));
|
||||
let req1 = generate_request(100);
|
||||
let result1 = limiter_ref.limit_request(&req1);
|
||||
assert!(result1.is_some());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 100);
|
||||
|
||||
let req2 = generate_request(200);
|
||||
let result2 = limiter_ref.limit_request(&req2);
|
||||
assert!(result2.is_some());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 300);
|
||||
|
||||
drop(result1.unwrap());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 200);
|
||||
|
||||
drop(result2.unwrap());
|
||||
assert_eq!(limiter_ref.in_flight_write_bytes(), 0);
|
||||
}
|
||||
}
|
||||
@@ -87,6 +87,7 @@ where
|
||||
let ingest_interceptor = self.plugins.get::<LogIngestInterceptorRef<ServerError>>();
|
||||
builder =
|
||||
builder.with_log_ingest_handler(self.instance.clone(), validator, ingest_interceptor);
|
||||
builder = builder.with_logs_handler(self.instance.clone());
|
||||
|
||||
if let Some(user_provider) = self.plugins.get::<UserProviderRef>() {
|
||||
builder = builder.with_user_provider(user_provider);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user